1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150#include <linux/module.h>
151#include <linux/moduleparam.h>
152#include <linux/kernel.h>
153#include <linux/types.h>
154#include <linux/sched.h>
155#include <linux/slab.h>
156#include <linux/delay.h>
157#include <linux/init.h>
158#include <linux/pci.h>
159#include <linux/dma-mapping.h>
160#include <linux/dmapool.h>
161#include <linux/netdevice.h>
162#include <linux/etherdevice.h>
163#include <linux/mii.h>
164#include <linux/if_vlan.h>
165#include <linux/skbuff.h>
166#include <linux/ethtool.h>
167#include <linux/string.h>
168#include <linux/firmware.h>
169#include <asm/unaligned.h>
170
171
172#define DRV_NAME "e100"
173#define DRV_EXT "-NAPI"
174#define DRV_VERSION "3.5.24-k2"DRV_EXT
175#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
176#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
177#define PFX DRV_NAME ": "
178
179#define E100_WATCHDOG_PERIOD (2 * HZ)
180#define E100_NAPI_WEIGHT 16
181
182#define FIRMWARE_D101M "e100/d101m_ucode.bin"
183#define FIRMWARE_D101S "e100/d101s_ucode.bin"
184#define FIRMWARE_D102E "e100/d102e_ucode.bin"
185
186MODULE_DESCRIPTION(DRV_DESCRIPTION);
187MODULE_AUTHOR(DRV_COPYRIGHT);
188MODULE_LICENSE("GPL");
189MODULE_VERSION(DRV_VERSION);
190MODULE_FIRMWARE(FIRMWARE_D101M);
191MODULE_FIRMWARE(FIRMWARE_D101S);
192MODULE_FIRMWARE(FIRMWARE_D102E);
193
194static int debug = 3;
195static int eeprom_bad_csum_allow = 0;
196static int use_io = 0;
197module_param(debug, int, 0);
198module_param(eeprom_bad_csum_allow, int, 0);
199module_param(use_io, int, 0);
200MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
201MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
202MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
203#define DPRINTK(nlevel, klevel, fmt, args...) \
204 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
205 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
206 __func__ , ## args))
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static struct pci_device_id e100_id_table[] = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
285};
286
287
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
305 rus_no_res = 0x08,
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
350
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
415enum cb_command {
416 cb_nop = 0x0000,
417 cb_iaaddr = 0x0001,
418 cb_config = 0x0002,
419 cb_multi = 0x0003,
420 cb_tx = 0x0004,
421 cb_ucode = 0x0005,
422 cb_dump = 0x0006,
423 cb_tx_sf = 0x0008,
424 cb_cid = 0x1f00,
425 cb_i = 0x2000,
426 cb_s = 0x4000,
427 cb_el = 0x8000,
428};
429
430struct rfd {
431 __le16 status;
432 __le16 command;
433 __le32 link;
434 __le32 rbd;
435 __le16 actual_size;
436 __le16 size;
437};
438
439struct rx {
440 struct rx *next, *prev;
441 struct sk_buff *skb;
442 dma_addr_t dma_addr;
443};
444
445#if defined(__BIG_ENDIAN_BITFIELD)
446#define X(a,b) b,a
447#else
448#define X(a,b) a,b
449#endif
450struct config {
451 u8 X(byte_count:6, pad0:2);
452 u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
453 u8 adaptive_ifs;
454 u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
455 term_write_cache_line:1), pad3:4);
456 u8 X(rx_dma_max_count:7, pad4:1);
457 u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
458 u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
459 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
460 rx_discard_overruns:1), rx_save_bad_frames:1);
461 u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
462 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
463 tx_dynamic_tbd:1);
464 u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
465 u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
466 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
467 u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
468 loopback:2);
469 u8 X(linear_priority:3, pad11:5);
470 u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
471 u8 ip_addr_lo;
472 u8 ip_addr_hi;
473 u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
474 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
475 pad15_2:1), crs_or_cdt:1);
476 u8 fc_delay_lo;
477 u8 fc_delay_hi;
478 u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
479 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
480 u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
481 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
482 full_duplex_force:1), full_duplex_pin:1);
483 u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
484 u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
485 u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
486 u8 pad_d102[9];
487};
488
489#define E100_MAX_MULTICAST_ADDRS 64
490struct multi {
491 __le16 count;
492 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2];
493};
494
495
496#define UCODE_SIZE 134
497struct cb {
498 __le16 status;
499 __le16 command;
500 __le32 link;
501 union {
502 u8 iaaddr[ETH_ALEN];
503 __le32 ucode[UCODE_SIZE];
504 struct config config;
505 struct multi multi;
506 struct {
507 u32 tbd_array;
508 u16 tcb_byte_count;
509 u8 threshold;
510 u8 tbd_count;
511 struct {
512 __le32 buf_addr;
513 __le16 size;
514 u16 eol;
515 } tbd;
516 } tcb;
517 __le32 dump_buffer_addr;
518 } u;
519 struct cb *next, *prev;
520 dma_addr_t dma_addr;
521 struct sk_buff *skb;
522};
523
524enum loopback {
525 lb_none = 0, lb_mac = 1, lb_phy = 3,
526};
527
528struct stats {
529 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
530 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
531 tx_multiple_collisions, tx_total_collisions;
532 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
533 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
534 rx_short_frame_errors;
535 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
536 __le16 xmt_tco_frames, rcv_tco_frames;
537 __le32 complete;
538};
539
540struct mem {
541 struct {
542 u32 signature;
543 u32 result;
544 } selftest;
545 struct stats stats;
546 u8 dump_buf[596];
547};
548
549struct param_range {
550 u32 min;
551 u32 max;
552 u32 count;
553};
554
555struct params {
556 struct param_range rfds;
557 struct param_range cbs;
558};
559
560struct nic {
561
562 u32 msg_enable ____cacheline_aligned;
563 struct net_device *netdev;
564 struct pci_dev *pdev;
565 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
566
567 struct rx *rxs ____cacheline_aligned;
568 struct rx *rx_to_use;
569 struct rx *rx_to_clean;
570 struct rfd blank_rfd;
571 enum ru_state ru_running;
572
573 spinlock_t cb_lock ____cacheline_aligned;
574 spinlock_t cmd_lock;
575 struct csr __iomem *csr;
576 enum scb_cmd_lo cuc_cmd;
577 unsigned int cbs_avail;
578 struct napi_struct napi;
579 struct cb *cbs;
580 struct cb *cb_to_use;
581 struct cb *cb_to_send;
582 struct cb *cb_to_clean;
583 __le16 tx_command;
584
585
586 enum {
587 ich = (1 << 0),
588 promiscuous = (1 << 1),
589 multicast_all = (1 << 2),
590 wol_magic = (1 << 3),
591 ich_10h_workaround = (1 << 4),
592 } flags ____cacheline_aligned;
593
594 enum mac mac;
595 enum phy phy;
596 struct params params;
597 struct timer_list watchdog;
598 struct timer_list blink_timer;
599 struct mii_if_info mii;
600 struct work_struct tx_timeout_task;
601 enum loopback loopback;
602
603 struct mem *mem;
604 dma_addr_t dma_addr;
605
606 struct pci_pool *cbs_pool;
607 dma_addr_t cbs_dma_addr;
608 u8 adaptive_ifs;
609 u8 tx_threshold;
610 u32 tx_frames;
611 u32 tx_collisions;
612 u32 tx_deferred;
613 u32 tx_single_collisions;
614 u32 tx_multiple_collisions;
615 u32 tx_fc_pause;
616 u32 tx_tco_frames;
617
618 u32 rx_fc_pause;
619 u32 rx_fc_unsupported;
620 u32 rx_tco_frames;
621 u32 rx_over_length_errors;
622
623 u16 leds;
624 u16 eeprom_wc;
625 __le16 eeprom[256];
626 spinlock_t mdio_lock;
627};
628
629static inline void e100_write_flush(struct nic *nic)
630{
631
632
633 (void)ioread8(&nic->csr->scb.status);
634}
635
636static void e100_enable_irq(struct nic *nic)
637{
638 unsigned long flags;
639
640 spin_lock_irqsave(&nic->cmd_lock, flags);
641 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
642 e100_write_flush(nic);
643 spin_unlock_irqrestore(&nic->cmd_lock, flags);
644}
645
646static void e100_disable_irq(struct nic *nic)
647{
648 unsigned long flags;
649
650 spin_lock_irqsave(&nic->cmd_lock, flags);
651 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
652 e100_write_flush(nic);
653 spin_unlock_irqrestore(&nic->cmd_lock, flags);
654}
655
656static void e100_hw_reset(struct nic *nic)
657{
658
659
660 iowrite32(selective_reset, &nic->csr->port);
661 e100_write_flush(nic); udelay(20);
662
663
664 iowrite32(software_reset, &nic->csr->port);
665 e100_write_flush(nic); udelay(20);
666
667
668 e100_disable_irq(nic);
669}
670
671static int e100_self_test(struct nic *nic)
672{
673 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
674
675
676
677
678 nic->mem->selftest.signature = 0;
679 nic->mem->selftest.result = 0xFFFFFFFF;
680
681 iowrite32(selftest | dma_addr, &nic->csr->port);
682 e100_write_flush(nic);
683
684 msleep(10);
685
686
687 e100_disable_irq(nic);
688
689
690 if (nic->mem->selftest.result != 0) {
691 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
692 nic->mem->selftest.result);
693 return -ETIMEDOUT;
694 }
695 if (nic->mem->selftest.signature == 0) {
696 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
697 return -ETIMEDOUT;
698 }
699
700 return 0;
701}
702
703static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
704{
705 u32 cmd_addr_data[3];
706 u8 ctrl;
707 int i, j;
708
709
710 cmd_addr_data[0] = op_ewen << (addr_len - 2);
711 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
712 le16_to_cpu(data);
713 cmd_addr_data[2] = op_ewds << (addr_len - 2);
714
715
716 for (j = 0; j < 3; j++) {
717
718
719 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
720 e100_write_flush(nic); udelay(4);
721
722 for (i = 31; i >= 0; i--) {
723 ctrl = (cmd_addr_data[j] & (1 << i)) ?
724 eecs | eedi : eecs;
725 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
727
728 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
729 e100_write_flush(nic); udelay(4);
730 }
731
732 msleep(10);
733
734
735 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
736 e100_write_flush(nic); udelay(4);
737 }
738};
739
740
741static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
742{
743 u32 cmd_addr_data;
744 u16 data = 0;
745 u8 ctrl;
746 int i;
747
748 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
749
750
751 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
752 e100_write_flush(nic); udelay(4);
753
754
755 for (i = 31; i >= 0; i--) {
756 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
757 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
759
760 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
761 e100_write_flush(nic); udelay(4);
762
763
764
765 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
766 if (!(ctrl & eedo) && i > 16) {
767 *addr_len -= (i - 16);
768 i = 17;
769 }
770
771 data = (data << 1) | (ctrl & eedo ? 1 : 0);
772 }
773
774
775 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
776 e100_write_flush(nic); udelay(4);
777
778 return cpu_to_le16(data);
779};
780
781
782static int e100_eeprom_load(struct nic *nic)
783{
784 u16 addr, addr_len = 8, checksum = 0;
785
786
787 e100_eeprom_read(nic, &addr_len, 0);
788 nic->eeprom_wc = 1 << addr_len;
789
790 for (addr = 0; addr < nic->eeprom_wc; addr++) {
791 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
792 if (addr < nic->eeprom_wc - 1)
793 checksum += le16_to_cpu(nic->eeprom[addr]);
794 }
795
796
797
798 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
799 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
800 if (!eeprom_bad_csum_allow)
801 return -EAGAIN;
802 }
803
804 return 0;
805}
806
807
808static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
809{
810 u16 addr, addr_len = 8, checksum = 0;
811
812
813 e100_eeprom_read(nic, &addr_len, 0);
814 nic->eeprom_wc = 1 << addr_len;
815
816 if (start + count >= nic->eeprom_wc)
817 return -EINVAL;
818
819 for (addr = start; addr < start + count; addr++)
820 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
821
822
823
824 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
825 checksum += le16_to_cpu(nic->eeprom[addr]);
826 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
827 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
828 nic->eeprom[nic->eeprom_wc - 1]);
829
830 return 0;
831}
832
833#define E100_WAIT_SCB_TIMEOUT 20000
834#define E100_WAIT_SCB_FAST 20
835static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
836{
837 unsigned long flags;
838 unsigned int i;
839 int err = 0;
840
841 spin_lock_irqsave(&nic->cmd_lock, flags);
842
843
844 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
845 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
846 break;
847 cpu_relax();
848 if (unlikely(i > E100_WAIT_SCB_FAST))
849 udelay(5);
850 }
851 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
852 err = -EAGAIN;
853 goto err_unlock;
854 }
855
856 if (unlikely(cmd != cuc_resume))
857 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
858 iowrite8(cmd, &nic->csr->scb.cmd_lo);
859
860err_unlock:
861 spin_unlock_irqrestore(&nic->cmd_lock, flags);
862
863 return err;
864}
865
866static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
867 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
868{
869 struct cb *cb;
870 unsigned long flags;
871 int err = 0;
872
873 spin_lock_irqsave(&nic->cb_lock, flags);
874
875 if (unlikely(!nic->cbs_avail)) {
876 err = -ENOMEM;
877 goto err_unlock;
878 }
879
880 cb = nic->cb_to_use;
881 nic->cb_to_use = cb->next;
882 nic->cbs_avail--;
883 cb->skb = skb;
884
885 if (unlikely(!nic->cbs_avail))
886 err = -ENOSPC;
887
888 cb_prepare(nic, cb, skb);
889
890
891
892 cb->command |= cpu_to_le16(cb_s);
893 wmb();
894 cb->prev->command &= cpu_to_le16(~cb_s);
895
896 while (nic->cb_to_send != nic->cb_to_use) {
897 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
898 nic->cb_to_send->dma_addr))) {
899
900
901
902
903
904 if (err == -ENOSPC) {
905
906 schedule_work(&nic->tx_timeout_task);
907 }
908 break;
909 } else {
910 nic->cuc_cmd = cuc_resume;
911 nic->cb_to_send = nic->cb_to_send->next;
912 }
913 }
914
915err_unlock:
916 spin_unlock_irqrestore(&nic->cb_lock, flags);
917
918 return err;
919}
920
921static int mdio_read(struct net_device *netdev, int addr, int reg)
922{
923 struct nic *nic = netdev_priv(netdev);
924 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
925}
926
927static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
928{
929 struct nic *nic = netdev_priv(netdev);
930
931 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
932}
933
934
935static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
936{
937 u32 data_out = 0;
938 unsigned int i;
939 unsigned long flags;
940
941
942
943
944
945
946
947
948 spin_lock_irqsave(&nic->mdio_lock, flags);
949 for (i = 100; i; --i) {
950 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
951 break;
952 udelay(20);
953 }
954 if (unlikely(!i)) {
955 printk("e100.mdio_ctrl(%s) won't go Ready\n",
956 nic->netdev->name );
957 spin_unlock_irqrestore(&nic->mdio_lock, flags);
958 return 0;
959 }
960 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
961
962 for (i = 0; i < 100; i++) {
963 udelay(20);
964 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
965 break;
966 }
967 spin_unlock_irqrestore(&nic->mdio_lock, flags);
968 DPRINTK(HW, DEBUG,
969 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
970 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
971 return (u16)data_out;
972}
973
974
975static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
976 u32 addr,
977 u32 dir,
978 u32 reg,
979 u16 data)
980{
981 if ((reg == MII_BMCR) && (dir == mdi_write)) {
982 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
983 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
984 MII_ADVERTISE);
985
986
987
988
989
990 if (advert & ADVERTISE_100FULL)
991 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
992 else if (advert & ADVERTISE_100HALF)
993 data |= BMCR_SPEED100;
994 }
995 }
996 return mdio_ctrl_hw(nic, addr, dir, reg, data);
997}
998
999
1000
1001
1002
1003
1004
1005static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1006 u32 addr,
1007 u32 dir,
1008 u32 reg,
1009 u16 data)
1010{
1011
1012
1013
1014
1015 if (dir == mdi_read) {
1016 switch (reg) {
1017 case MII_BMCR:
1018
1019 return BMCR_ANENABLE |
1020 BMCR_FULLDPLX;
1021 case MII_BMSR:
1022 return BMSR_LSTATUS |
1023 BMSR_ANEGCAPABLE |
1024 BMSR_10FULL;
1025 case MII_ADVERTISE:
1026
1027 return ADVERTISE_10HALF |
1028 ADVERTISE_10FULL;
1029 default:
1030 DPRINTK(HW, DEBUG,
1031 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1032 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1033 return 0xFFFF;
1034 }
1035 } else {
1036 switch (reg) {
1037 default:
1038 DPRINTK(HW, DEBUG,
1039 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1040 dir == mdi_read ? "READ" : "WRITE", addr, reg, data);
1041 return 0xFFFF;
1042 }
1043 }
1044}
1045static inline int e100_phy_supports_mii(struct nic *nic)
1046{
1047
1048
1049
1050 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1051}
1052
1053static void e100_get_defaults(struct nic *nic)
1054{
1055 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1056 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1057
1058
1059 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1060 if (nic->mac == mac_unknown)
1061 nic->mac = mac_82557_D100_A;
1062
1063 nic->params.rfds = rfds;
1064 nic->params.cbs = cbs;
1065
1066
1067 nic->tx_threshold = 0xE0;
1068
1069
1070 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1071 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1072
1073
1074 nic->blank_rfd.command = 0;
1075 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1076 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1077
1078
1079 nic->mii.phy_id_mask = 0x1F;
1080 nic->mii.reg_num_mask = 0x1F;
1081 nic->mii.dev = nic->netdev;
1082 nic->mii.mdio_read = mdio_read;
1083 nic->mii.mdio_write = mdio_write;
1084}
1085
1086static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1087{
1088 struct config *config = &cb->u.config;
1089 u8 *c = (u8 *)config;
1090
1091 cb->command = cpu_to_le16(cb_config);
1092
1093 memset(config, 0, sizeof(struct config));
1094
1095 config->byte_count = 0x16;
1096 config->rx_fifo_limit = 0x8;
1097 config->direct_rx_dma = 0x1;
1098 config->standard_tcb = 0x1;
1099 config->standard_stat_counter = 0x1;
1100 config->rx_discard_short_frames = 0x1;
1101 config->tx_underrun_retry = 0x3;
1102 if (e100_phy_supports_mii(nic))
1103 config->mii_mode = 1;
1104 config->pad10 = 0x6;
1105 config->no_source_addr_insertion = 0x1;
1106 config->preamble_length = 0x2;
1107 config->ifs = 0x6;
1108 config->ip_addr_hi = 0xF2;
1109 config->pad15_1 = 0x1;
1110 config->pad15_2 = 0x1;
1111 config->crs_or_cdt = 0x0;
1112 config->fc_delay_hi = 0x40;
1113 config->tx_padding = 0x1;
1114 config->fc_priority_threshold = 0x7;
1115 config->pad18 = 0x1;
1116 config->full_duplex_pin = 0x1;
1117 config->pad20_1 = 0x1F;
1118 config->fc_priority_location = 0x1;
1119 config->pad21_1 = 0x5;
1120
1121 config->adaptive_ifs = nic->adaptive_ifs;
1122 config->loopback = nic->loopback;
1123
1124 if (nic->mii.force_media && nic->mii.full_duplex)
1125 config->full_duplex_force = 0x1;
1126
1127 if (nic->flags & promiscuous || nic->loopback) {
1128 config->rx_save_bad_frames = 0x1;
1129 config->rx_discard_short_frames = 0x0;
1130 config->promiscuous_mode = 0x1;
1131 }
1132
1133 if (nic->flags & multicast_all)
1134 config->multicast_all = 0x1;
1135
1136
1137 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1138 config->magic_packet_disable = 0x1;
1139
1140 if (nic->mac >= mac_82558_D101_A4) {
1141 config->fc_disable = 0x1;
1142 config->mwi_enable = 0x1;
1143 config->standard_tcb = 0x0;
1144 config->rx_long_ok = 0x1;
1145 if (nic->mac >= mac_82559_D101M) {
1146 config->tno_intr = 0x1;
1147
1148 if (nic->mac >= mac_82551_10) {
1149 config->byte_count = 0x20;
1150 config->rx_d102_mode = 0x1;
1151 }
1152 } else {
1153 config->standard_stat_counter = 0x0;
1154 }
1155 }
1156
1157 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1158 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1159 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1160 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1161 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1162 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220#define BUNDLESMALL 1
1221#define BUNDLEMAX (u16)6
1222#define INTDELAY (u16)1536
1223
1224
1225static const struct firmware *e100_request_firmware(struct nic *nic)
1226{
1227 const char *fw_name;
1228 const struct firmware *fw;
1229 u8 timer, bundle, min_size;
1230 int err;
1231
1232
1233 if (nic->flags & ich)
1234 return NULL;
1235
1236
1237 if (nic->mac == mac_82559_D101M)
1238 fw_name = FIRMWARE_D101M;
1239 else if (nic->mac == mac_82559_D101S)
1240 fw_name = FIRMWARE_D101S;
1241 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1242 fw_name = FIRMWARE_D102E;
1243 else
1244 return NULL;
1245
1246 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1247 if (err) {
1248 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1249 fw_name, err);
1250 return ERR_PTR(err);
1251 }
1252
1253
1254 if (fw->size != UCODE_SIZE * 4 + 3) {
1255 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n",
1256 fw_name, fw->size);
1257 release_firmware(fw);
1258 return ERR_PTR(-EINVAL);
1259 }
1260
1261
1262 timer = fw->data[UCODE_SIZE * 4];
1263 bundle = fw->data[UCODE_SIZE * 4 + 1];
1264 min_size = fw->data[UCODE_SIZE * 4 + 2];
1265
1266 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1267 min_size >= UCODE_SIZE) {
1268 DPRINTK(PROBE, ERR,
1269 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1270 fw_name, timer, bundle, min_size);
1271 release_firmware(fw);
1272 return ERR_PTR(-EINVAL);
1273 }
1274
1275 return fw;
1276}
1277
1278static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1279 struct sk_buff *skb)
1280{
1281 const struct firmware *fw = (void *)skb;
1282 u8 timer, bundle, min_size;
1283
1284
1285
1286 cb->skb = NULL;
1287
1288
1289 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1290
1291
1292 timer = fw->data[UCODE_SIZE * 4];
1293 bundle = fw->data[UCODE_SIZE * 4 + 1];
1294 min_size = fw->data[UCODE_SIZE * 4 + 2];
1295
1296
1297 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1298 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1299 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1300 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1301 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1302 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1303
1304 cb->command = cpu_to_le16(cb_ucode | cb_el);
1305}
1306
1307static inline int e100_load_ucode_wait(struct nic *nic)
1308{
1309 const struct firmware *fw;
1310 int err = 0, counter = 50;
1311 struct cb *cb = nic->cb_to_clean;
1312
1313 fw = e100_request_firmware(nic);
1314
1315 if (!fw || IS_ERR(fw))
1316 return PTR_ERR(fw);
1317
1318 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1319 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1320
1321
1322 nic->cuc_cmd = cuc_start;
1323
1324
1325 e100_write_flush(nic);
1326 udelay(10);
1327
1328
1329 while (!(cb->status & cpu_to_le16(cb_complete))) {
1330 msleep(10);
1331 if (!--counter) break;
1332 }
1333
1334
1335 iowrite8(~0, &nic->csr->scb.stat_ack);
1336
1337
1338 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1339 DPRINTK(PROBE,ERR, "ucode load failed\n");
1340 err = -EPERM;
1341 }
1342
1343 return err;
1344}
1345
1346static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1347 struct sk_buff *skb)
1348{
1349 cb->command = cpu_to_le16(cb_iaaddr);
1350 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1351}
1352
1353static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1354{
1355 cb->command = cpu_to_le16(cb_dump);
1356 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1357 offsetof(struct mem, dump_buf));
1358}
1359
1360static int e100_phy_check_without_mii(struct nic *nic)
1361{
1362 u8 phy_type;
1363 int without_mii;
1364
1365 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1366
1367 switch (phy_type) {
1368 case NoSuchPhy:
1369 case I82503:
1370 case S80C24:
1371
1372
1373
1374
1375
1376
1377 DPRINTK(PROBE, INFO,
1378 "found MII-less i82503 or 80c24 or other PHY\n");
1379
1380 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1381 nic->mii.phy_id = 0;
1382
1383
1384
1385
1386
1387 without_mii = 1;
1388 break;
1389 default:
1390 without_mii = 0;
1391 break;
1392 }
1393 return without_mii;
1394}
1395
1396#define NCONFIG_AUTO_SWITCH 0x0080
1397#define MII_NSC_CONG MII_RESV1
1398#define NSC_CONG_ENABLE 0x0100
1399#define NSC_CONG_TXREADY 0x0400
1400#define ADVERTISE_FC_SUPPORTED 0x0400
1401static int e100_phy_init(struct nic *nic)
1402{
1403 struct net_device *netdev = nic->netdev;
1404 u32 addr;
1405 u16 bmcr, stat, id_lo, id_hi, cong;
1406
1407
1408 for (addr = 0; addr < 32; addr++) {
1409 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1410 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1411 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1412 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1413 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1414 break;
1415 }
1416 if (addr == 32) {
1417
1418
1419
1420
1421 if (e100_phy_check_without_mii(nic))
1422 return 0;
1423 else {
1424
1425 DPRINTK(HW, ERR,
1426 "Failed to locate any known PHY, aborting.\n");
1427 return -EAGAIN;
1428 }
1429 } else
1430 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1431
1432
1433 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1434 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1435 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1436 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1437
1438
1439 for (addr = 0; addr < 32; addr++) {
1440 if (addr != nic->mii.phy_id) {
1441 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1442 } else if (nic->phy != phy_82552_v) {
1443 bmcr = mdio_read(netdev, addr, MII_BMCR);
1444 mdio_write(netdev, addr, MII_BMCR,
1445 bmcr & ~BMCR_ISOLATE);
1446 }
1447 }
1448
1449
1450
1451
1452
1453 if (nic->phy == phy_82552_v)
1454 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1455 bmcr & ~BMCR_ISOLATE);
1456
1457
1458#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1459 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1460
1461 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1462 cong |= NSC_CONG_TXREADY;
1463 cong &= ~NSC_CONG_ENABLE;
1464 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1465 }
1466
1467 if (nic->phy == phy_82552_v) {
1468 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1469
1470
1471 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1472
1473
1474 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1475 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1476
1477
1478 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1479 bmcr |= BMCR_RESET;
1480 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1481 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1482 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1483 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1484
1485 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1486 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1487 }
1488
1489 return 0;
1490}
1491
1492static int e100_hw_init(struct nic *nic)
1493{
1494 int err;
1495
1496 e100_hw_reset(nic);
1497
1498 DPRINTK(HW, ERR, "e100_hw_init\n");
1499 if (!in_interrupt() && (err = e100_self_test(nic)))
1500 return err;
1501
1502 if ((err = e100_phy_init(nic)))
1503 return err;
1504 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1505 return err;
1506 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1507 return err;
1508 if ((err = e100_load_ucode_wait(nic)))
1509 return err;
1510 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1511 return err;
1512 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1513 return err;
1514 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1515 nic->dma_addr + offsetof(struct mem, stats))))
1516 return err;
1517 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1518 return err;
1519
1520 e100_disable_irq(nic);
1521
1522 return 0;
1523}
1524
1525static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1526{
1527 struct net_device *netdev = nic->netdev;
1528 struct dev_mc_list *list = netdev->mc_list;
1529 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1530
1531 cb->command = cpu_to_le16(cb_multi);
1532 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1533 for (i = 0; list && i < count; i++, list = list->next)
1534 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1535 ETH_ALEN);
1536}
1537
1538static void e100_set_multicast_list(struct net_device *netdev)
1539{
1540 struct nic *nic = netdev_priv(netdev);
1541
1542 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1543 netdev->mc_count, netdev->flags);
1544
1545 if (netdev->flags & IFF_PROMISC)
1546 nic->flags |= promiscuous;
1547 else
1548 nic->flags &= ~promiscuous;
1549
1550 if (netdev->flags & IFF_ALLMULTI ||
1551 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1552 nic->flags |= multicast_all;
1553 else
1554 nic->flags &= ~multicast_all;
1555
1556 e100_exec_cb(nic, NULL, e100_configure);
1557 e100_exec_cb(nic, NULL, e100_multi);
1558}
1559
1560static void e100_update_stats(struct nic *nic)
1561{
1562 struct net_device *dev = nic->netdev;
1563 struct net_device_stats *ns = &dev->stats;
1564 struct stats *s = &nic->mem->stats;
1565 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1566 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1567 &s->complete;
1568
1569
1570
1571
1572
1573 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1574 *complete = 0;
1575 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1576 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1577 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1578 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1579 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1580 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1581 ns->collisions += nic->tx_collisions;
1582 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1583 le32_to_cpu(s->tx_lost_crs);
1584 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1585 nic->rx_over_length_errors;
1586 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1587 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1588 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1589 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1590 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1591 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1592 le32_to_cpu(s->rx_alignment_errors) +
1593 le32_to_cpu(s->rx_short_frame_errors) +
1594 le32_to_cpu(s->rx_cdt_errors);
1595 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1596 nic->tx_single_collisions +=
1597 le32_to_cpu(s->tx_single_collisions);
1598 nic->tx_multiple_collisions +=
1599 le32_to_cpu(s->tx_multiple_collisions);
1600 if (nic->mac >= mac_82558_D101_A4) {
1601 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1602 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1603 nic->rx_fc_unsupported +=
1604 le32_to_cpu(s->fc_rcv_unsupported);
1605 if (nic->mac >= mac_82559_D101M) {
1606 nic->tx_tco_frames +=
1607 le16_to_cpu(s->xmt_tco_frames);
1608 nic->rx_tco_frames +=
1609 le16_to_cpu(s->rcv_tco_frames);
1610 }
1611 }
1612 }
1613
1614
1615 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1616 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1617}
1618
1619static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1620{
1621
1622
1623
1624 if (duplex == DUPLEX_HALF) {
1625 u32 prev = nic->adaptive_ifs;
1626 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1627
1628 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1629 (nic->tx_frames > min_frames)) {
1630 if (nic->adaptive_ifs < 60)
1631 nic->adaptive_ifs += 5;
1632 } else if (nic->tx_frames < min_frames) {
1633 if (nic->adaptive_ifs >= 5)
1634 nic->adaptive_ifs -= 5;
1635 }
1636 if (nic->adaptive_ifs != prev)
1637 e100_exec_cb(nic, NULL, e100_configure);
1638 }
1639}
1640
1641static void e100_watchdog(unsigned long data)
1642{
1643 struct nic *nic = (struct nic *)data;
1644 struct ethtool_cmd cmd;
1645
1646 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1647
1648
1649
1650 mii_ethtool_gset(&nic->mii, &cmd);
1651
1652 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1653 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
1654 nic->netdev->name,
1655 cmd.speed == SPEED_100 ? "100" : "10",
1656 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1657 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1658 printk(KERN_INFO "e100: %s NIC Link is Down\n",
1659 nic->netdev->name);
1660 }
1661
1662 mii_check_link(&nic->mii);
1663
1664
1665
1666
1667
1668
1669 spin_lock_irq(&nic->cmd_lock);
1670 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1671 e100_write_flush(nic);
1672 spin_unlock_irq(&nic->cmd_lock);
1673
1674 e100_update_stats(nic);
1675 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1676
1677 if (nic->mac <= mac_82557_D100_C)
1678
1679 e100_set_multicast_list(nic->netdev);
1680
1681 if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1682
1683 nic->flags |= ich_10h_workaround;
1684 else
1685 nic->flags &= ~ich_10h_workaround;
1686
1687 mod_timer(&nic->watchdog,
1688 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1689}
1690
1691static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1692 struct sk_buff *skb)
1693{
1694 cb->command = nic->tx_command;
1695
1696 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1697 cb->command |= cpu_to_le16(cb_i);
1698 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1699 cb->u.tcb.tcb_byte_count = 0;
1700 cb->u.tcb.threshold = nic->tx_threshold;
1701 cb->u.tcb.tbd_count = 1;
1702 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1703 skb->data, skb->len, PCI_DMA_TODEVICE));
1704
1705 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1706}
1707
1708static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1709 struct net_device *netdev)
1710{
1711 struct nic *nic = netdev_priv(netdev);
1712 int err;
1713
1714 if (nic->flags & ich_10h_workaround) {
1715
1716
1717
1718 if (e100_exec_cmd(nic, cuc_nop, 0))
1719 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1720 udelay(1);
1721 }
1722
1723 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1724
1725 switch (err) {
1726 case -ENOSPC:
1727
1728 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1729 netif_stop_queue(netdev);
1730 break;
1731 case -ENOMEM:
1732
1733 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1734 netif_stop_queue(netdev);
1735 return NETDEV_TX_BUSY;
1736 }
1737
1738 netdev->trans_start = jiffies;
1739 return NETDEV_TX_OK;
1740}
1741
1742static int e100_tx_clean(struct nic *nic)
1743{
1744 struct net_device *dev = nic->netdev;
1745 struct cb *cb;
1746 int tx_cleaned = 0;
1747
1748 spin_lock(&nic->cb_lock);
1749
1750
1751 for (cb = nic->cb_to_clean;
1752 cb->status & cpu_to_le16(cb_complete);
1753 cb = nic->cb_to_clean = cb->next) {
1754 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1755 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1756 cb->status);
1757
1758 if (likely(cb->skb != NULL)) {
1759 dev->stats.tx_packets++;
1760 dev->stats.tx_bytes += cb->skb->len;
1761
1762 pci_unmap_single(nic->pdev,
1763 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1764 le16_to_cpu(cb->u.tcb.tbd.size),
1765 PCI_DMA_TODEVICE);
1766 dev_kfree_skb_any(cb->skb);
1767 cb->skb = NULL;
1768 tx_cleaned = 1;
1769 }
1770 cb->status = 0;
1771 nic->cbs_avail++;
1772 }
1773
1774 spin_unlock(&nic->cb_lock);
1775
1776
1777 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1778 netif_wake_queue(nic->netdev);
1779
1780 return tx_cleaned;
1781}
1782
1783static void e100_clean_cbs(struct nic *nic)
1784{
1785 if (nic->cbs) {
1786 while (nic->cbs_avail != nic->params.cbs.count) {
1787 struct cb *cb = nic->cb_to_clean;
1788 if (cb->skb) {
1789 pci_unmap_single(nic->pdev,
1790 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1791 le16_to_cpu(cb->u.tcb.tbd.size),
1792 PCI_DMA_TODEVICE);
1793 dev_kfree_skb(cb->skb);
1794 }
1795 nic->cb_to_clean = nic->cb_to_clean->next;
1796 nic->cbs_avail++;
1797 }
1798 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1799 nic->cbs = NULL;
1800 nic->cbs_avail = 0;
1801 }
1802 nic->cuc_cmd = cuc_start;
1803 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1804 nic->cbs;
1805}
1806
1807static int e100_alloc_cbs(struct nic *nic)
1808{
1809 struct cb *cb;
1810 unsigned int i, count = nic->params.cbs.count;
1811
1812 nic->cuc_cmd = cuc_start;
1813 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1814 nic->cbs_avail = 0;
1815
1816 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1817 &nic->cbs_dma_addr);
1818 if (!nic->cbs)
1819 return -ENOMEM;
1820
1821 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1822 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1823 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1824
1825 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1826 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1827 ((i+1) % count) * sizeof(struct cb));
1828 cb->skb = NULL;
1829 }
1830
1831 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1832 nic->cbs_avail = count;
1833
1834 return 0;
1835}
1836
1837static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1838{
1839 if (!nic->rxs) return;
1840 if (RU_SUSPENDED != nic->ru_running) return;
1841
1842
1843 if (!rx) rx = nic->rxs;
1844
1845
1846 if (rx->skb) {
1847 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1848 nic->ru_running = RU_RUNNING;
1849 }
1850}
1851
1852#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1853static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1854{
1855 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1856 return -ENOMEM;
1857
1858
1859 skb_reserve(rx->skb, NET_IP_ALIGN);
1860 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1861 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1862 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1863
1864 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1865 dev_kfree_skb_any(rx->skb);
1866 rx->skb = NULL;
1867 rx->dma_addr = 0;
1868 return -ENOMEM;
1869 }
1870
1871
1872
1873
1874 if (rx->prev->skb) {
1875 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1876 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1877 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1878 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1879 }
1880
1881 return 0;
1882}
1883
1884static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1885 unsigned int *work_done, unsigned int work_to_do)
1886{
1887 struct net_device *dev = nic->netdev;
1888 struct sk_buff *skb = rx->skb;
1889 struct rfd *rfd = (struct rfd *)skb->data;
1890 u16 rfd_status, actual_size;
1891
1892 if (unlikely(work_done && *work_done >= work_to_do))
1893 return -EAGAIN;
1894
1895
1896 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1897 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1898 rfd_status = le16_to_cpu(rfd->status);
1899
1900 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1901
1902
1903 if (unlikely(!(rfd_status & cb_complete))) {
1904
1905
1906
1907
1908
1909 if ((le16_to_cpu(rfd->command) & cb_el) &&
1910 (RU_RUNNING == nic->ru_running))
1911
1912 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1913 nic->ru_running = RU_SUSPENDED;
1914 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1915 sizeof(struct rfd),
1916 PCI_DMA_FROMDEVICE);
1917 return -ENODATA;
1918 }
1919
1920
1921 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1922 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1923 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1924
1925
1926 pci_unmap_single(nic->pdev, rx->dma_addr,
1927 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1928
1929
1930
1931
1932
1933
1934
1935 if ((le16_to_cpu(rfd->command) & cb_el) &&
1936 (RU_RUNNING == nic->ru_running)) {
1937
1938 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1939 nic->ru_running = RU_SUSPENDED;
1940 }
1941
1942
1943 skb_reserve(skb, sizeof(struct rfd));
1944 skb_put(skb, actual_size);
1945 skb->protocol = eth_type_trans(skb, nic->netdev);
1946
1947 if (unlikely(!(rfd_status & cb_ok))) {
1948
1949 dev_kfree_skb_any(skb);
1950 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1951
1952 nic->rx_over_length_errors++;
1953 dev_kfree_skb_any(skb);
1954 } else {
1955 dev->stats.rx_packets++;
1956 dev->stats.rx_bytes += actual_size;
1957 netif_receive_skb(skb);
1958 if (work_done)
1959 (*work_done)++;
1960 }
1961
1962 rx->skb = NULL;
1963
1964 return 0;
1965}
1966
1967static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1968 unsigned int work_to_do)
1969{
1970 struct rx *rx;
1971 int restart_required = 0, err = 0;
1972 struct rx *old_before_last_rx, *new_before_last_rx;
1973 struct rfd *old_before_last_rfd, *new_before_last_rfd;
1974
1975
1976 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1977 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1978
1979 if (-EAGAIN == err || -ENODATA == err)
1980 break;
1981 }
1982
1983
1984
1985
1986
1987
1988
1989
1990 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
1991 restart_required = 1;
1992
1993 old_before_last_rx = nic->rx_to_use->prev->prev;
1994 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
1995
1996
1997 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1998 if (unlikely(e100_rx_alloc_skb(nic, rx)))
1999 break;
2000 }
2001
2002 new_before_last_rx = nic->rx_to_use->prev->prev;
2003 if (new_before_last_rx != old_before_last_rx) {
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 new_before_last_rfd =
2014 (struct rfd *)new_before_last_rx->skb->data;
2015 new_before_last_rfd->size = 0;
2016 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2017 pci_dma_sync_single_for_device(nic->pdev,
2018 new_before_last_rx->dma_addr, sizeof(struct rfd),
2019 PCI_DMA_BIDIRECTIONAL);
2020
2021
2022
2023
2024 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2025 pci_dma_sync_single_for_device(nic->pdev,
2026 old_before_last_rx->dma_addr, sizeof(struct rfd),
2027 PCI_DMA_BIDIRECTIONAL);
2028 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
2029 pci_dma_sync_single_for_device(nic->pdev,
2030 old_before_last_rx->dma_addr, sizeof(struct rfd),
2031 PCI_DMA_BIDIRECTIONAL);
2032 }
2033
2034 if (restart_required) {
2035
2036 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2037 e100_start_receiver(nic, nic->rx_to_clean);
2038 if (work_done)
2039 (*work_done)++;
2040 }
2041}
2042
2043static void e100_rx_clean_list(struct nic *nic)
2044{
2045 struct rx *rx;
2046 unsigned int i, count = nic->params.rfds.count;
2047
2048 nic->ru_running = RU_UNINITIALIZED;
2049
2050 if (nic->rxs) {
2051 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2052 if (rx->skb) {
2053 pci_unmap_single(nic->pdev, rx->dma_addr,
2054 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2055 dev_kfree_skb(rx->skb);
2056 }
2057 }
2058 kfree(nic->rxs);
2059 nic->rxs = NULL;
2060 }
2061
2062 nic->rx_to_use = nic->rx_to_clean = NULL;
2063}
2064
2065static int e100_rx_alloc_list(struct nic *nic)
2066{
2067 struct rx *rx;
2068 unsigned int i, count = nic->params.rfds.count;
2069 struct rfd *before_last;
2070
2071 nic->rx_to_use = nic->rx_to_clean = NULL;
2072 nic->ru_running = RU_UNINITIALIZED;
2073
2074 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2075 return -ENOMEM;
2076
2077 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2078 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2079 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2080 if (e100_rx_alloc_skb(nic, rx)) {
2081 e100_rx_clean_list(nic);
2082 return -ENOMEM;
2083 }
2084 }
2085
2086
2087
2088
2089
2090
2091
2092 rx = nic->rxs->prev->prev;
2093 before_last = (struct rfd *)rx->skb->data;
2094 before_last->command |= cpu_to_le16(cb_el);
2095 before_last->size = 0;
2096 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2097 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2098
2099 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2100 nic->ru_running = RU_SUSPENDED;
2101
2102 return 0;
2103}
2104
2105static irqreturn_t e100_intr(int irq, void *dev_id)
2106{
2107 struct net_device *netdev = dev_id;
2108 struct nic *nic = netdev_priv(netdev);
2109 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2110
2111 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
2112
2113 if (stat_ack == stat_ack_not_ours ||
2114 stat_ack == stat_ack_not_present)
2115 return IRQ_NONE;
2116
2117
2118 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2119
2120
2121 if (stat_ack & stat_ack_rnr)
2122 nic->ru_running = RU_SUSPENDED;
2123
2124 if (likely(napi_schedule_prep(&nic->napi))) {
2125 e100_disable_irq(nic);
2126 __napi_schedule(&nic->napi);
2127 }
2128
2129 return IRQ_HANDLED;
2130}
2131
2132static int e100_poll(struct napi_struct *napi, int budget)
2133{
2134 struct nic *nic = container_of(napi, struct nic, napi);
2135 unsigned int work_done = 0;
2136
2137 e100_rx_clean(nic, &work_done, budget);
2138 e100_tx_clean(nic);
2139
2140
2141 if (work_done < budget) {
2142 napi_complete(napi);
2143 e100_enable_irq(nic);
2144 }
2145
2146 return work_done;
2147}
2148
2149#ifdef CONFIG_NET_POLL_CONTROLLER
2150static void e100_netpoll(struct net_device *netdev)
2151{
2152 struct nic *nic = netdev_priv(netdev);
2153
2154 e100_disable_irq(nic);
2155 e100_intr(nic->pdev->irq, netdev);
2156 e100_tx_clean(nic);
2157 e100_enable_irq(nic);
2158}
2159#endif
2160
2161static int e100_set_mac_address(struct net_device *netdev, void *p)
2162{
2163 struct nic *nic = netdev_priv(netdev);
2164 struct sockaddr *addr = p;
2165
2166 if (!is_valid_ether_addr(addr->sa_data))
2167 return -EADDRNOTAVAIL;
2168
2169 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2170 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2171
2172 return 0;
2173}
2174
2175static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2176{
2177 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2178 return -EINVAL;
2179 netdev->mtu = new_mtu;
2180 return 0;
2181}
2182
2183static int e100_asf(struct nic *nic)
2184{
2185
2186 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2187 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2188 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2189 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2190}
2191
2192static int e100_up(struct nic *nic)
2193{
2194 int err;
2195
2196 if ((err = e100_rx_alloc_list(nic)))
2197 return err;
2198 if ((err = e100_alloc_cbs(nic)))
2199 goto err_rx_clean_list;
2200 if ((err = e100_hw_init(nic)))
2201 goto err_clean_cbs;
2202 e100_set_multicast_list(nic->netdev);
2203 e100_start_receiver(nic, NULL);
2204 mod_timer(&nic->watchdog, jiffies);
2205 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2206 nic->netdev->name, nic->netdev)))
2207 goto err_no_irq;
2208 netif_wake_queue(nic->netdev);
2209 napi_enable(&nic->napi);
2210
2211
2212 e100_enable_irq(nic);
2213 return 0;
2214
2215err_no_irq:
2216 del_timer_sync(&nic->watchdog);
2217err_clean_cbs:
2218 e100_clean_cbs(nic);
2219err_rx_clean_list:
2220 e100_rx_clean_list(nic);
2221 return err;
2222}
2223
2224static void e100_down(struct nic *nic)
2225{
2226
2227 napi_disable(&nic->napi);
2228 netif_stop_queue(nic->netdev);
2229 e100_hw_reset(nic);
2230 free_irq(nic->pdev->irq, nic->netdev);
2231 del_timer_sync(&nic->watchdog);
2232 netif_carrier_off(nic->netdev);
2233 e100_clean_cbs(nic);
2234 e100_rx_clean_list(nic);
2235}
2236
2237static void e100_tx_timeout(struct net_device *netdev)
2238{
2239 struct nic *nic = netdev_priv(netdev);
2240
2241
2242
2243 schedule_work(&nic->tx_timeout_task);
2244}
2245
2246static void e100_tx_timeout_task(struct work_struct *work)
2247{
2248 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2249 struct net_device *netdev = nic->netdev;
2250
2251 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2252 ioread8(&nic->csr->scb.status));
2253 e100_down(netdev_priv(netdev));
2254 e100_up(netdev_priv(netdev));
2255}
2256
2257static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2258{
2259 int err;
2260 struct sk_buff *skb;
2261
2262
2263
2264
2265
2266
2267 if ((err = e100_rx_alloc_list(nic)))
2268 return err;
2269 if ((err = e100_alloc_cbs(nic)))
2270 goto err_clean_rx;
2271
2272
2273 if (nic->flags & ich && loopback_mode == lb_phy)
2274 loopback_mode = lb_mac;
2275
2276 nic->loopback = loopback_mode;
2277 if ((err = e100_hw_init(nic)))
2278 goto err_loopback_none;
2279
2280 if (loopback_mode == lb_phy)
2281 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2282 BMCR_LOOPBACK);
2283
2284 e100_start_receiver(nic, NULL);
2285
2286 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2287 err = -ENOMEM;
2288 goto err_loopback_none;
2289 }
2290 skb_put(skb, ETH_DATA_LEN);
2291 memset(skb->data, 0xFF, ETH_DATA_LEN);
2292 e100_xmit_frame(skb, nic->netdev);
2293
2294 msleep(10);
2295
2296 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2297 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2298
2299 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2300 skb->data, ETH_DATA_LEN))
2301 err = -EAGAIN;
2302
2303err_loopback_none:
2304 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2305 nic->loopback = lb_none;
2306 e100_clean_cbs(nic);
2307 e100_hw_reset(nic);
2308err_clean_rx:
2309 e100_rx_clean_list(nic);
2310 return err;
2311}
2312
2313#define MII_LED_CONTROL 0x1B
2314#define E100_82552_LED_OVERRIDE 0x19
2315#define E100_82552_LED_ON 0x000F
2316#define E100_82552_LED_OFF 0x000A
2317static void e100_blink_led(unsigned long data)
2318{
2319 struct nic *nic = (struct nic *)data;
2320 enum led_state {
2321 led_on = 0x01,
2322 led_off = 0x04,
2323 led_on_559 = 0x05,
2324 led_on_557 = 0x07,
2325 };
2326 u16 led_reg = MII_LED_CONTROL;
2327
2328 if (nic->phy == phy_82552_v) {
2329 led_reg = E100_82552_LED_OVERRIDE;
2330
2331 nic->leds = (nic->leds == E100_82552_LED_ON) ?
2332 E100_82552_LED_OFF : E100_82552_LED_ON;
2333 } else {
2334 nic->leds = (nic->leds & led_on) ? led_off :
2335 (nic->mac < mac_82559_D101M) ? led_on_557 :
2336 led_on_559;
2337 }
2338 mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
2339 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2340}
2341
2342static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2343{
2344 struct nic *nic = netdev_priv(netdev);
2345 return mii_ethtool_gset(&nic->mii, cmd);
2346}
2347
2348static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2349{
2350 struct nic *nic = netdev_priv(netdev);
2351 int err;
2352
2353 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2354 err = mii_ethtool_sset(&nic->mii, cmd);
2355 e100_exec_cb(nic, NULL, e100_configure);
2356
2357 return err;
2358}
2359
2360static void e100_get_drvinfo(struct net_device *netdev,
2361 struct ethtool_drvinfo *info)
2362{
2363 struct nic *nic = netdev_priv(netdev);
2364 strcpy(info->driver, DRV_NAME);
2365 strcpy(info->version, DRV_VERSION);
2366 strcpy(info->fw_version, "N/A");
2367 strcpy(info->bus_info, pci_name(nic->pdev));
2368}
2369
2370#define E100_PHY_REGS 0x1C
2371static int e100_get_regs_len(struct net_device *netdev)
2372{
2373 struct nic *nic = netdev_priv(netdev);
2374 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2375}
2376
2377static void e100_get_regs(struct net_device *netdev,
2378 struct ethtool_regs *regs, void *p)
2379{
2380 struct nic *nic = netdev_priv(netdev);
2381 u32 *buff = p;
2382 int i;
2383
2384 regs->version = (1 << 24) | nic->pdev->revision;
2385 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2386 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2387 ioread16(&nic->csr->scb.status);
2388 for (i = E100_PHY_REGS; i >= 0; i--)
2389 buff[1 + E100_PHY_REGS - i] =
2390 mdio_read(netdev, nic->mii.phy_id, i);
2391 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2392 e100_exec_cb(nic, NULL, e100_dump);
2393 msleep(10);
2394 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2395 sizeof(nic->mem->dump_buf));
2396}
2397
2398static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2399{
2400 struct nic *nic = netdev_priv(netdev);
2401 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2402 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2403}
2404
2405static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2406{
2407 struct nic *nic = netdev_priv(netdev);
2408
2409 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2410 !device_can_wakeup(&nic->pdev->dev))
2411 return -EOPNOTSUPP;
2412
2413 if (wol->wolopts)
2414 nic->flags |= wol_magic;
2415 else
2416 nic->flags &= ~wol_magic;
2417
2418 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2419
2420 e100_exec_cb(nic, NULL, e100_configure);
2421
2422 return 0;
2423}
2424
2425static u32 e100_get_msglevel(struct net_device *netdev)
2426{
2427 struct nic *nic = netdev_priv(netdev);
2428 return nic->msg_enable;
2429}
2430
2431static void e100_set_msglevel(struct net_device *netdev, u32 value)
2432{
2433 struct nic *nic = netdev_priv(netdev);
2434 nic->msg_enable = value;
2435}
2436
2437static int e100_nway_reset(struct net_device *netdev)
2438{
2439 struct nic *nic = netdev_priv(netdev);
2440 return mii_nway_restart(&nic->mii);
2441}
2442
2443static u32 e100_get_link(struct net_device *netdev)
2444{
2445 struct nic *nic = netdev_priv(netdev);
2446 return mii_link_ok(&nic->mii);
2447}
2448
2449static int e100_get_eeprom_len(struct net_device *netdev)
2450{
2451 struct nic *nic = netdev_priv(netdev);
2452 return nic->eeprom_wc << 1;
2453}
2454
2455#define E100_EEPROM_MAGIC 0x1234
2456static int e100_get_eeprom(struct net_device *netdev,
2457 struct ethtool_eeprom *eeprom, u8 *bytes)
2458{
2459 struct nic *nic = netdev_priv(netdev);
2460
2461 eeprom->magic = E100_EEPROM_MAGIC;
2462 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2463
2464 return 0;
2465}
2466
2467static int e100_set_eeprom(struct net_device *netdev,
2468 struct ethtool_eeprom *eeprom, u8 *bytes)
2469{
2470 struct nic *nic = netdev_priv(netdev);
2471
2472 if (eeprom->magic != E100_EEPROM_MAGIC)
2473 return -EINVAL;
2474
2475 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2476
2477 return e100_eeprom_save(nic, eeprom->offset >> 1,
2478 (eeprom->len >> 1) + 1);
2479}
2480
2481static void e100_get_ringparam(struct net_device *netdev,
2482 struct ethtool_ringparam *ring)
2483{
2484 struct nic *nic = netdev_priv(netdev);
2485 struct param_range *rfds = &nic->params.rfds;
2486 struct param_range *cbs = &nic->params.cbs;
2487
2488 ring->rx_max_pending = rfds->max;
2489 ring->tx_max_pending = cbs->max;
2490 ring->rx_mini_max_pending = 0;
2491 ring->rx_jumbo_max_pending = 0;
2492 ring->rx_pending = rfds->count;
2493 ring->tx_pending = cbs->count;
2494 ring->rx_mini_pending = 0;
2495 ring->rx_jumbo_pending = 0;
2496}
2497
2498static int e100_set_ringparam(struct net_device *netdev,
2499 struct ethtool_ringparam *ring)
2500{
2501 struct nic *nic = netdev_priv(netdev);
2502 struct param_range *rfds = &nic->params.rfds;
2503 struct param_range *cbs = &nic->params.cbs;
2504
2505 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2506 return -EINVAL;
2507
2508 if (netif_running(netdev))
2509 e100_down(nic);
2510 rfds->count = max(ring->rx_pending, rfds->min);
2511 rfds->count = min(rfds->count, rfds->max);
2512 cbs->count = max(ring->tx_pending, cbs->min);
2513 cbs->count = min(cbs->count, cbs->max);
2514 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2515 rfds->count, cbs->count);
2516 if (netif_running(netdev))
2517 e100_up(nic);
2518
2519 return 0;
2520}
2521
2522static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2523 "Link test (on/offline)",
2524 "Eeprom test (on/offline)",
2525 "Self test (offline)",
2526 "Mac loopback (offline)",
2527 "Phy loopback (offline)",
2528};
2529#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2530
2531static void e100_diag_test(struct net_device *netdev,
2532 struct ethtool_test *test, u64 *data)
2533{
2534 struct ethtool_cmd cmd;
2535 struct nic *nic = netdev_priv(netdev);
2536 int i, err;
2537
2538 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2539 data[0] = !mii_link_ok(&nic->mii);
2540 data[1] = e100_eeprom_load(nic);
2541 if (test->flags & ETH_TEST_FL_OFFLINE) {
2542
2543
2544 err = mii_ethtool_gset(&nic->mii, &cmd);
2545
2546 if (netif_running(netdev))
2547 e100_down(nic);
2548 data[2] = e100_self_test(nic);
2549 data[3] = e100_loopback_test(nic, lb_mac);
2550 data[4] = e100_loopback_test(nic, lb_phy);
2551
2552
2553 err = mii_ethtool_sset(&nic->mii, &cmd);
2554
2555 if (netif_running(netdev))
2556 e100_up(nic);
2557 }
2558 for (i = 0; i < E100_TEST_LEN; i++)
2559 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2560
2561 msleep_interruptible(4 * 1000);
2562}
2563
2564static int e100_phys_id(struct net_device *netdev, u32 data)
2565{
2566 struct nic *nic = netdev_priv(netdev);
2567 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2568 MII_LED_CONTROL;
2569
2570 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2571 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2572 mod_timer(&nic->blink_timer, jiffies);
2573 msleep_interruptible(data * 1000);
2574 del_timer_sync(&nic->blink_timer);
2575 mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
2576
2577 return 0;
2578}
2579
2580static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2581 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2582 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2583 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2584 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2585 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2586 "tx_heartbeat_errors", "tx_window_errors",
2587
2588 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2589 "tx_flow_control_pause", "rx_flow_control_pause",
2590 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2591};
2592#define E100_NET_STATS_LEN 21
2593#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2594
2595static int e100_get_sset_count(struct net_device *netdev, int sset)
2596{
2597 switch (sset) {
2598 case ETH_SS_TEST:
2599 return E100_TEST_LEN;
2600 case ETH_SS_STATS:
2601 return E100_STATS_LEN;
2602 default:
2603 return -EOPNOTSUPP;
2604 }
2605}
2606
2607static void e100_get_ethtool_stats(struct net_device *netdev,
2608 struct ethtool_stats *stats, u64 *data)
2609{
2610 struct nic *nic = netdev_priv(netdev);
2611 int i;
2612
2613 for (i = 0; i < E100_NET_STATS_LEN; i++)
2614 data[i] = ((unsigned long *)&netdev->stats)[i];
2615
2616 data[i++] = nic->tx_deferred;
2617 data[i++] = nic->tx_single_collisions;
2618 data[i++] = nic->tx_multiple_collisions;
2619 data[i++] = nic->tx_fc_pause;
2620 data[i++] = nic->rx_fc_pause;
2621 data[i++] = nic->rx_fc_unsupported;
2622 data[i++] = nic->tx_tco_frames;
2623 data[i++] = nic->rx_tco_frames;
2624}
2625
2626static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2627{
2628 switch (stringset) {
2629 case ETH_SS_TEST:
2630 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2631 break;
2632 case ETH_SS_STATS:
2633 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2634 break;
2635 }
2636}
2637
2638static const struct ethtool_ops e100_ethtool_ops = {
2639 .get_settings = e100_get_settings,
2640 .set_settings = e100_set_settings,
2641 .get_drvinfo = e100_get_drvinfo,
2642 .get_regs_len = e100_get_regs_len,
2643 .get_regs = e100_get_regs,
2644 .get_wol = e100_get_wol,
2645 .set_wol = e100_set_wol,
2646 .get_msglevel = e100_get_msglevel,
2647 .set_msglevel = e100_set_msglevel,
2648 .nway_reset = e100_nway_reset,
2649 .get_link = e100_get_link,
2650 .get_eeprom_len = e100_get_eeprom_len,
2651 .get_eeprom = e100_get_eeprom,
2652 .set_eeprom = e100_set_eeprom,
2653 .get_ringparam = e100_get_ringparam,
2654 .set_ringparam = e100_set_ringparam,
2655 .self_test = e100_diag_test,
2656 .get_strings = e100_get_strings,
2657 .phys_id = e100_phys_id,
2658 .get_ethtool_stats = e100_get_ethtool_stats,
2659 .get_sset_count = e100_get_sset_count,
2660};
2661
2662static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2663{
2664 struct nic *nic = netdev_priv(netdev);
2665
2666 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2667}
2668
2669static int e100_alloc(struct nic *nic)
2670{
2671 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2672 &nic->dma_addr);
2673 return nic->mem ? 0 : -ENOMEM;
2674}
2675
2676static void e100_free(struct nic *nic)
2677{
2678 if (nic->mem) {
2679 pci_free_consistent(nic->pdev, sizeof(struct mem),
2680 nic->mem, nic->dma_addr);
2681 nic->mem = NULL;
2682 }
2683}
2684
2685static int e100_open(struct net_device *netdev)
2686{
2687 struct nic *nic = netdev_priv(netdev);
2688 int err = 0;
2689
2690 netif_carrier_off(netdev);
2691 if ((err = e100_up(nic)))
2692 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2693 return err;
2694}
2695
2696static int e100_close(struct net_device *netdev)
2697{
2698 e100_down(netdev_priv(netdev));
2699 return 0;
2700}
2701
2702static const struct net_device_ops e100_netdev_ops = {
2703 .ndo_open = e100_open,
2704 .ndo_stop = e100_close,
2705 .ndo_start_xmit = e100_xmit_frame,
2706 .ndo_validate_addr = eth_validate_addr,
2707 .ndo_set_multicast_list = e100_set_multicast_list,
2708 .ndo_set_mac_address = e100_set_mac_address,
2709 .ndo_change_mtu = e100_change_mtu,
2710 .ndo_do_ioctl = e100_do_ioctl,
2711 .ndo_tx_timeout = e100_tx_timeout,
2712#ifdef CONFIG_NET_POLL_CONTROLLER
2713 .ndo_poll_controller = e100_netpoll,
2714#endif
2715};
2716
2717static int __devinit e100_probe(struct pci_dev *pdev,
2718 const struct pci_device_id *ent)
2719{
2720 struct net_device *netdev;
2721 struct nic *nic;
2722 int err;
2723
2724 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2725 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
2726 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2727 return -ENOMEM;
2728 }
2729
2730 netdev->netdev_ops = &e100_netdev_ops;
2731 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2732 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2733 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2734
2735 nic = netdev_priv(netdev);
2736 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2737 nic->netdev = netdev;
2738 nic->pdev = pdev;
2739 nic->msg_enable = (1 << debug) - 1;
2740 nic->mdio_ctrl = mdio_ctrl_hw;
2741 pci_set_drvdata(pdev, netdev);
2742
2743 if ((err = pci_enable_device(pdev))) {
2744 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2745 goto err_out_free_dev;
2746 }
2747
2748 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2749 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2750 "base address, aborting.\n");
2751 err = -ENODEV;
2752 goto err_out_disable_pdev;
2753 }
2754
2755 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2756 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2757 goto err_out_disable_pdev;
2758 }
2759
2760 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2761 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2762 goto err_out_free_res;
2763 }
2764
2765 SET_NETDEV_DEV(netdev, &pdev->dev);
2766
2767 if (use_io)
2768 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2769
2770 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2771 if (!nic->csr) {
2772 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2773 err = -ENOMEM;
2774 goto err_out_free_res;
2775 }
2776
2777 if (ent->driver_data)
2778 nic->flags |= ich;
2779 else
2780 nic->flags &= ~ich;
2781
2782 e100_get_defaults(nic);
2783
2784
2785 spin_lock_init(&nic->cb_lock);
2786 spin_lock_init(&nic->cmd_lock);
2787 spin_lock_init(&nic->mdio_lock);
2788
2789
2790
2791
2792 e100_hw_reset(nic);
2793
2794 pci_set_master(pdev);
2795
2796 init_timer(&nic->watchdog);
2797 nic->watchdog.function = e100_watchdog;
2798 nic->watchdog.data = (unsigned long)nic;
2799 init_timer(&nic->blink_timer);
2800 nic->blink_timer.function = e100_blink_led;
2801 nic->blink_timer.data = (unsigned long)nic;
2802
2803 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2804
2805 if ((err = e100_alloc(nic))) {
2806 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2807 goto err_out_iounmap;
2808 }
2809
2810 if ((err = e100_eeprom_load(nic)))
2811 goto err_out_free;
2812
2813 e100_phy_init(nic);
2814
2815 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2816 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2817 if (!is_valid_ether_addr(netdev->perm_addr)) {
2818 if (!eeprom_bad_csum_allow) {
2819 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2820 "EEPROM, aborting.\n");
2821 err = -EAGAIN;
2822 goto err_out_free;
2823 } else {
2824 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2825 "you MUST configure one.\n");
2826 }
2827 }
2828
2829
2830 if ((nic->mac >= mac_82558_D101_A4) &&
2831 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2832 nic->flags |= wol_magic;
2833 device_set_wakeup_enable(&pdev->dev, true);
2834 }
2835
2836
2837 pci_pme_active(pdev, false);
2838
2839 strcpy(netdev->name, "eth%d");
2840 if ((err = register_netdev(netdev))) {
2841 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2842 goto err_out_free;
2843 }
2844 nic->cbs_pool = pci_pool_create(netdev->name,
2845 nic->pdev,
2846 nic->params.cbs.count * sizeof(struct cb),
2847 sizeof(u32),
2848 0);
2849 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
2850 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2851 pdev->irq, netdev->dev_addr);
2852
2853 return 0;
2854
2855err_out_free:
2856 e100_free(nic);
2857err_out_iounmap:
2858 pci_iounmap(pdev, nic->csr);
2859err_out_free_res:
2860 pci_release_regions(pdev);
2861err_out_disable_pdev:
2862 pci_disable_device(pdev);
2863err_out_free_dev:
2864 pci_set_drvdata(pdev, NULL);
2865 free_netdev(netdev);
2866 return err;
2867}
2868
2869static void __devexit e100_remove(struct pci_dev *pdev)
2870{
2871 struct net_device *netdev = pci_get_drvdata(pdev);
2872
2873 if (netdev) {
2874 struct nic *nic = netdev_priv(netdev);
2875 unregister_netdev(netdev);
2876 e100_free(nic);
2877 pci_iounmap(pdev, nic->csr);
2878 pci_pool_destroy(nic->cbs_pool);
2879 free_netdev(netdev);
2880 pci_release_regions(pdev);
2881 pci_disable_device(pdev);
2882 pci_set_drvdata(pdev, NULL);
2883 }
2884}
2885
2886#define E100_82552_SMARTSPEED 0x14
2887#define E100_82552_REV_ANEG 0x0200
2888#define E100_82552_ANEG_NOW 0x0400
2889static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2890{
2891 struct net_device *netdev = pci_get_drvdata(pdev);
2892 struct nic *nic = netdev_priv(netdev);
2893
2894 if (netif_running(netdev))
2895 e100_down(nic);
2896 netif_device_detach(netdev);
2897
2898 pci_save_state(pdev);
2899
2900 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2901
2902 if (nic->phy == phy_82552_v) {
2903 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2904 E100_82552_SMARTSPEED);
2905
2906 mdio_write(netdev, nic->mii.phy_id,
2907 E100_82552_SMARTSPEED, smartspeed |
2908 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
2909 }
2910 *enable_wake = true;
2911 } else {
2912 *enable_wake = false;
2913 }
2914
2915 pci_disable_device(pdev);
2916}
2917
2918static int __e100_power_off(struct pci_dev *pdev, bool wake)
2919{
2920 if (wake)
2921 return pci_prepare_to_sleep(pdev);
2922
2923 pci_wake_from_d3(pdev, false);
2924 pci_set_power_state(pdev, PCI_D3hot);
2925
2926 return 0;
2927}
2928
2929#ifdef CONFIG_PM
2930static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2931{
2932 bool wake;
2933 __e100_shutdown(pdev, &wake);
2934 return __e100_power_off(pdev, wake);
2935}
2936
2937static int e100_resume(struct pci_dev *pdev)
2938{
2939 struct net_device *netdev = pci_get_drvdata(pdev);
2940 struct nic *nic = netdev_priv(netdev);
2941
2942 pci_set_power_state(pdev, PCI_D0);
2943 pci_restore_state(pdev);
2944
2945 pci_enable_wake(pdev, 0, 0);
2946
2947
2948 if (nic->phy == phy_82552_v) {
2949 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2950 E100_82552_SMARTSPEED);
2951
2952 mdio_write(netdev, nic->mii.phy_id,
2953 E100_82552_SMARTSPEED,
2954 smartspeed & ~(E100_82552_REV_ANEG));
2955 }
2956
2957 netif_device_attach(netdev);
2958 if (netif_running(netdev))
2959 e100_up(nic);
2960
2961 return 0;
2962}
2963#endif
2964
2965static void e100_shutdown(struct pci_dev *pdev)
2966{
2967 bool wake;
2968 __e100_shutdown(pdev, &wake);
2969 if (system_state == SYSTEM_POWER_OFF)
2970 __e100_power_off(pdev, wake);
2971}
2972
2973
2974
2975
2976
2977
2978
2979static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2980{
2981 struct net_device *netdev = pci_get_drvdata(pdev);
2982 struct nic *nic = netdev_priv(netdev);
2983
2984 netif_device_detach(netdev);
2985
2986 if (state == pci_channel_io_perm_failure)
2987 return PCI_ERS_RESULT_DISCONNECT;
2988
2989 if (netif_running(netdev))
2990 e100_down(nic);
2991 pci_disable_device(pdev);
2992
2993
2994 return PCI_ERS_RESULT_NEED_RESET;
2995}
2996
2997
2998
2999
3000
3001
3002
3003static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3004{
3005 struct net_device *netdev = pci_get_drvdata(pdev);
3006 struct nic *nic = netdev_priv(netdev);
3007
3008 if (pci_enable_device(pdev)) {
3009 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
3010 return PCI_ERS_RESULT_DISCONNECT;
3011 }
3012 pci_set_master(pdev);
3013
3014
3015 if (0 != PCI_FUNC(pdev->devfn))
3016 return PCI_ERS_RESULT_RECOVERED;
3017 e100_hw_reset(nic);
3018 e100_phy_init(nic);
3019
3020 return PCI_ERS_RESULT_RECOVERED;
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030static void e100_io_resume(struct pci_dev *pdev)
3031{
3032 struct net_device *netdev = pci_get_drvdata(pdev);
3033 struct nic *nic = netdev_priv(netdev);
3034
3035
3036 pci_enable_wake(pdev, 0, 0);
3037
3038 netif_device_attach(netdev);
3039 if (netif_running(netdev)) {
3040 e100_open(netdev);
3041 mod_timer(&nic->watchdog, jiffies);
3042 }
3043}
3044
3045static struct pci_error_handlers e100_err_handler = {
3046 .error_detected = e100_io_error_detected,
3047 .slot_reset = e100_io_slot_reset,
3048 .resume = e100_io_resume,
3049};
3050
3051static struct pci_driver e100_driver = {
3052 .name = DRV_NAME,
3053 .id_table = e100_id_table,
3054 .probe = e100_probe,
3055 .remove = __devexit_p(e100_remove),
3056#ifdef CONFIG_PM
3057
3058 .suspend = e100_suspend,
3059 .resume = e100_resume,
3060#endif
3061 .shutdown = e100_shutdown,
3062 .err_handler = &e100_err_handler,
3063};
3064
3065static int __init e100_init_module(void)
3066{
3067 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3068 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3069 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
3070 }
3071 return pci_register_driver(&e100_driver);
3072}
3073
3074static void __exit e100_cleanup_module(void)
3075{
3076 pci_unregister_driver(&e100_driver);
3077}
3078
3079module_init(e100_init_module);
3080module_exit(e100_cleanup_module);
3081