1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
158#include <linux/sched.h>
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
163#include <linux/dma-mapping.h>
164#include <linux/dmapool.h>
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
172#include <linux/firmware.h>
173#include <linux/rtnetlink.h>
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
178#define DRV_EXT "-NAPI"
179#define DRV_VERSION "3.5.24-k2"DRV_EXT
180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
197
198static int debug = 3;
199static int eeprom_bad_csum_allow = 0;
200static int use_io = 0;
201module_param(debug, int, 0);
202module_param(eeprom_bad_csum_allow, int, 0);
203module_param(use_io, int, 0);
204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
285};
286
287
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
305 rus_no_res = 0x08,
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
350
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
415
416
417
418
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
428 cb_tx_nc = 0x0010,
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456 u8 X(byte_count:6, pad0:2);
457 u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458 u8 adaptive_ifs;
459 u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461 u8 X(rx_dma_max_count:7, pad4:1);
462 u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463 u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465 rx_save_overruns : 1), rx_save_bad_frames : 1);
466 u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469 u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470 u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472 u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474 u8 X(linear_priority:3, pad11:5);
475 u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476 u8 ip_addr_lo;
477 u8 ip_addr_hi;
478 u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481 u8 fc_delay_lo;
482 u8 fc_delay_hi;
483 u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485 u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488 u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489 u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490 u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
496 __le16 count;
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2];
498};
499
500
501#define UCODE_SIZE 134
502struct cb {
503 __le16 status;
504 __le16 command;
505 __le32 link;
506 union {
507 u8 iaaddr[ETH_ALEN];
508 __le32 ucode[UCODE_SIZE];
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
517 __le32 buf_addr;
518 __le16 size;
519 u16 eol;
520 } tbd;
521 } tcb;
522 __le32 dump_buffer_addr;
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
576 enum ru_state ru_running;
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
583 struct napi_struct napi;
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
588 __le16 tx_command;
589
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
602 struct timer_list watchdog;
603 struct mii_if_info mii;
604 struct work_struct tx_timeout_task;
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
610 struct pci_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
625 u32 rx_short_frame_errors;
626 u32 rx_over_length_errors;
627
628 u16 eeprom_wc;
629 __le16 eeprom[256];
630 spinlock_t mdio_lock;
631 const struct firmware *fw;
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636
637
638 (void)ioread8(&nic->csr->scb.status);
639}
640
641static void e100_enable_irq(struct nic *nic)
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647 e100_write_flush(nic);
648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
649}
650
651static void e100_disable_irq(struct nic *nic)
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657 e100_write_flush(nic);
658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663
664
665 iowrite32(selective_reset, &nic->csr->port);
666 e100_write_flush(nic); udelay(20);
667
668
669 iowrite32(software_reset, &nic->csr->port);
670 e100_write_flush(nic); udelay(20);
671
672
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680
681
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
686 iowrite32(selftest | dma_addr, &nic->csr->port);
687 e100_write_flush(nic);
688
689 msleep(10);
690
691
692 e100_disable_irq(nic);
693
694
695 if (nic->mem->selftest.result != 0) {
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
699 return -ETIMEDOUT;
700 }
701 if (nic->mem->selftest.signature == 0) {
702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
718 le16_to_cpu(data);
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721
722 for (j = 0; j < 3; j++) {
723
724
725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
727
728 for (i = 31; i >= 0; i--) {
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732 e100_write_flush(nic); udelay(4);
733
734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735 e100_write_flush(nic); udelay(4);
736 }
737
738 msleep(10);
739
740
741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746
747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756
757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
759
760
761 for (i = 31; i >= 0; i--) {
762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764 e100_write_flush(nic); udelay(4);
765
766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767 e100_write_flush(nic); udelay(4);
768
769
770
771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772 if (!(ctrl & eedo) && i > 16) {
773 *addr_len -= (i - 16);
774 i = 17;
775 }
776
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780
781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782 e100_write_flush(nic); udelay(4);
783
784 return cpu_to_le16(data);
785};
786
787
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798 if (addr < nic->eeprom_wc - 1)
799 checksum += le16_to_cpu(nic->eeprom[addr]);
800 }
801
802
803
804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
808 }
809
810 return 0;
811}
812
813
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
822 if (start + count >= nic->eeprom_wc)
823 return -EINVAL;
824
825 for (addr = start; addr < start + count; addr++)
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828
829
830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
839#define E100_WAIT_SCB_TIMEOUT 20000
840#define E100_WAIT_SCB_FAST 20
841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
852 break;
853 cpu_relax();
854 if (unlikely(i > E100_WAIT_SCB_FAST))
855 udelay(5);
856 }
857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
862 if (unlikely(cmd != cuc_resume))
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{
875 struct cb *cb;
876 unsigned long flags;
877 int err = 0;
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
881 if (unlikely(!nic->cbs_avail)) {
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
891 if (unlikely(!nic->cbs_avail))
892 err = -ENOSPC;
893
894 cb_prepare(nic, cb, skb);
895
896
897
898 cb->command |= cpu_to_le16(cb_s);
899 wmb();
900 cb->prev->command &= cpu_to_le16(~cb_s);
901
902 while (nic->cb_to_send != nic->cb_to_use) {
903 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
904 nic->cb_to_send->dma_addr))) {
905
906
907
908
909
910 if (err == -ENOSPC) {
911
912 schedule_work(&nic->tx_timeout_task);
913 }
914 break;
915 } else {
916 nic->cuc_cmd = cuc_resume;
917 nic->cb_to_send = nic->cb_to_send->next;
918 }
919 }
920
921err_unlock:
922 spin_unlock_irqrestore(&nic->cb_lock, flags);
923
924 return err;
925}
926
927static int mdio_read(struct net_device *netdev, int addr, int reg)
928{
929 struct nic *nic = netdev_priv(netdev);
930 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
931}
932
933static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
934{
935 struct nic *nic = netdev_priv(netdev);
936
937 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
938}
939
940
941static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
942{
943 u32 data_out = 0;
944 unsigned int i;
945 unsigned long flags;
946
947
948
949
950
951
952
953
954 spin_lock_irqsave(&nic->mdio_lock, flags);
955 for (i = 100; i; --i) {
956 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
957 break;
958 udelay(20);
959 }
960 if (unlikely(!i)) {
961 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
962 spin_unlock_irqrestore(&nic->mdio_lock, flags);
963 return 0;
964 }
965 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
966
967 for (i = 0; i < 100; i++) {
968 udelay(20);
969 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
970 break;
971 }
972 spin_unlock_irqrestore(&nic->mdio_lock, flags);
973 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
974 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
975 dir == mdi_read ? "READ" : "WRITE",
976 addr, reg, data, data_out);
977 return (u16)data_out;
978}
979
980
981static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
982 u32 addr,
983 u32 dir,
984 u32 reg,
985 u16 data)
986{
987 if ((reg == MII_BMCR) && (dir == mdi_write)) {
988 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
989 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
990 MII_ADVERTISE);
991
992
993
994
995
996 if (advert & ADVERTISE_100FULL)
997 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
998 else if (advert & ADVERTISE_100HALF)
999 data |= BMCR_SPEED100;
1000 }
1001 }
1002 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1003}
1004
1005
1006
1007
1008
1009
1010
1011static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1012 u32 addr,
1013 u32 dir,
1014 u32 reg,
1015 u16 data)
1016{
1017
1018
1019
1020
1021 if (dir == mdi_read) {
1022 switch (reg) {
1023 case MII_BMCR:
1024
1025 return BMCR_ANENABLE |
1026 BMCR_FULLDPLX;
1027 case MII_BMSR:
1028 return BMSR_LSTATUS |
1029 BMSR_ANEGCAPABLE |
1030 BMSR_10FULL;
1031 case MII_ADVERTISE:
1032
1033 return ADVERTISE_10HALF |
1034 ADVERTISE_10FULL;
1035 default:
1036 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1037 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1038 dir == mdi_read ? "READ" : "WRITE",
1039 addr, reg, data);
1040 return 0xFFFF;
1041 }
1042 } else {
1043 switch (reg) {
1044 default:
1045 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1046 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1047 dir == mdi_read ? "READ" : "WRITE",
1048 addr, reg, data);
1049 return 0xFFFF;
1050 }
1051 }
1052}
1053static inline int e100_phy_supports_mii(struct nic *nic)
1054{
1055
1056
1057
1058 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1059}
1060
1061static void e100_get_defaults(struct nic *nic)
1062{
1063 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1064 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1065
1066
1067 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1068 if (nic->mac == mac_unknown)
1069 nic->mac = mac_82557_D100_A;
1070
1071 nic->params.rfds = rfds;
1072 nic->params.cbs = cbs;
1073
1074
1075 nic->tx_threshold = 0xE0;
1076
1077
1078 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1079 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1080
1081
1082 nic->blank_rfd.command = 0;
1083 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1084 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1085
1086
1087 nic->mii.phy_id_mask = 0x1F;
1088 nic->mii.reg_num_mask = 0x1F;
1089 nic->mii.dev = nic->netdev;
1090 nic->mii.mdio_read = mdio_read;
1091 nic->mii.mdio_write = mdio_write;
1092}
1093
1094static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095{
1096 struct config *config = &cb->u.config;
1097 u8 *c = (u8 *)config;
1098 struct net_device *netdev = nic->netdev;
1099
1100 cb->command = cpu_to_le16(cb_config);
1101
1102 memset(config, 0, sizeof(struct config));
1103
1104 config->byte_count = 0x16;
1105 config->rx_fifo_limit = 0x8;
1106 config->direct_rx_dma = 0x1;
1107 config->standard_tcb = 0x1;
1108 config->standard_stat_counter = 0x1;
1109 config->rx_discard_short_frames = 0x1;
1110 config->tx_underrun_retry = 0x3;
1111 if (e100_phy_supports_mii(nic))
1112 config->mii_mode = 1;
1113 config->pad10 = 0x6;
1114 config->no_source_addr_insertion = 0x1;
1115 config->preamble_length = 0x2;
1116 config->ifs = 0x6;
1117 config->ip_addr_hi = 0xF2;
1118 config->pad15_1 = 0x1;
1119 config->pad15_2 = 0x1;
1120 config->crs_or_cdt = 0x0;
1121 config->fc_delay_hi = 0x40;
1122 config->tx_padding = 0x1;
1123 config->fc_priority_threshold = 0x7;
1124 config->pad18 = 0x1;
1125 config->full_duplex_pin = 0x1;
1126 config->pad20_1 = 0x1F;
1127 config->fc_priority_location = 0x1;
1128 config->pad21_1 = 0x5;
1129
1130 config->adaptive_ifs = nic->adaptive_ifs;
1131 config->loopback = nic->loopback;
1132
1133 if (nic->mii.force_media && nic->mii.full_duplex)
1134 config->full_duplex_force = 0x1;
1135
1136 if (nic->flags & promiscuous || nic->loopback) {
1137 config->rx_save_bad_frames = 0x1;
1138 config->rx_discard_short_frames = 0x0;
1139 config->promiscuous_mode = 0x1;
1140 }
1141
1142 if (unlikely(netdev->features & NETIF_F_RXFCS))
1143 config->rx_crc_transfer = 0x1;
1144
1145 if (nic->flags & multicast_all)
1146 config->multicast_all = 0x1;
1147
1148
1149 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1150 config->magic_packet_disable = 0x1;
1151
1152 if (nic->mac >= mac_82558_D101_A4) {
1153 config->fc_disable = 0x1;
1154 config->mwi_enable = 0x1;
1155 config->standard_tcb = 0x0;
1156 config->rx_long_ok = 0x1;
1157 if (nic->mac >= mac_82559_D101M) {
1158 config->tno_intr = 0x1;
1159
1160 if (nic->mac >= mac_82551_10) {
1161 config->byte_count = 0x20;
1162 config->rx_d102_mode = 0x1;
1163 }
1164 } else {
1165 config->standard_stat_counter = 0x0;
1166 }
1167 }
1168
1169 if (netdev->features & NETIF_F_RXALL) {
1170 config->rx_save_overruns = 0x1;
1171 config->rx_save_bad_frames = 0x1;
1172 config->rx_discard_short_frames = 0x0;
1173 }
1174
1175 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1176 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1177 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1179 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1180 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241#define BUNDLESMALL 1
1242#define BUNDLEMAX (u16)6
1243#define INTDELAY (u16)1536
1244
1245
1246static const struct firmware *e100_request_firmware(struct nic *nic)
1247{
1248 const char *fw_name;
1249 const struct firmware *fw = nic->fw;
1250 u8 timer, bundle, min_size;
1251 int err = 0;
1252 bool required = false;
1253
1254
1255 if (nic->flags & ich)
1256 return NULL;
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 if (nic->mac == mac_82559_D101M) {
1272 fw_name = FIRMWARE_D101M;
1273 } else if (nic->mac == mac_82559_D101S) {
1274 fw_name = FIRMWARE_D101S;
1275 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1276 fw_name = FIRMWARE_D102E;
1277 required = true;
1278 } else {
1279 return NULL;
1280 }
1281
1282
1283
1284
1285
1286
1287 if (!fw)
1288 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1289
1290 if (err) {
1291 if (required) {
1292 netif_err(nic, probe, nic->netdev,
1293 "Failed to load firmware \"%s\": %d\n",
1294 fw_name, err);
1295 return ERR_PTR(err);
1296 } else {
1297 netif_info(nic, probe, nic->netdev,
1298 "CPUSaver disabled. Needs \"%s\": %d\n",
1299 fw_name, err);
1300 return NULL;
1301 }
1302 }
1303
1304
1305
1306 if (fw->size != UCODE_SIZE * 4 + 3) {
1307 netif_err(nic, probe, nic->netdev,
1308 "Firmware \"%s\" has wrong size %zu\n",
1309 fw_name, fw->size);
1310 release_firmware(fw);
1311 return ERR_PTR(-EINVAL);
1312 }
1313
1314
1315 timer = fw->data[UCODE_SIZE * 4];
1316 bundle = fw->data[UCODE_SIZE * 4 + 1];
1317 min_size = fw->data[UCODE_SIZE * 4 + 2];
1318
1319 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1320 min_size >= UCODE_SIZE) {
1321 netif_err(nic, probe, nic->netdev,
1322 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1323 fw_name, timer, bundle, min_size);
1324 release_firmware(fw);
1325 return ERR_PTR(-EINVAL);
1326 }
1327
1328
1329
1330 nic->fw = fw;
1331 return fw;
1332}
1333
1334static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1335 struct sk_buff *skb)
1336{
1337 const struct firmware *fw = (void *)skb;
1338 u8 timer, bundle, min_size;
1339
1340
1341
1342 cb->skb = NULL;
1343
1344
1345 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1346
1347
1348 timer = fw->data[UCODE_SIZE * 4];
1349 bundle = fw->data[UCODE_SIZE * 4 + 1];
1350 min_size = fw->data[UCODE_SIZE * 4 + 2];
1351
1352
1353 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1354 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1355 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1356 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1357 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1358 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1359
1360 cb->command = cpu_to_le16(cb_ucode | cb_el);
1361}
1362
1363static inline int e100_load_ucode_wait(struct nic *nic)
1364{
1365 const struct firmware *fw;
1366 int err = 0, counter = 50;
1367 struct cb *cb = nic->cb_to_clean;
1368
1369 fw = e100_request_firmware(nic);
1370
1371 if (!fw || IS_ERR(fw))
1372 return PTR_ERR(fw);
1373
1374 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1375 netif_err(nic, probe, nic->netdev,
1376 "ucode cmd failed with error %d\n", err);
1377
1378
1379 nic->cuc_cmd = cuc_start;
1380
1381
1382 e100_write_flush(nic);
1383 udelay(10);
1384
1385
1386 while (!(cb->status & cpu_to_le16(cb_complete))) {
1387 msleep(10);
1388 if (!--counter) break;
1389 }
1390
1391
1392 iowrite8(~0, &nic->csr->scb.stat_ack);
1393
1394
1395 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1396 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1397 err = -EPERM;
1398 }
1399
1400 return err;
1401}
1402
1403static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1404 struct sk_buff *skb)
1405{
1406 cb->command = cpu_to_le16(cb_iaaddr);
1407 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1408}
1409
1410static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1411{
1412 cb->command = cpu_to_le16(cb_dump);
1413 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1414 offsetof(struct mem, dump_buf));
1415}
1416
1417static int e100_phy_check_without_mii(struct nic *nic)
1418{
1419 u8 phy_type;
1420 int without_mii;
1421
1422 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1423
1424 switch (phy_type) {
1425 case NoSuchPhy:
1426 case I82503:
1427 case S80C24:
1428
1429
1430
1431
1432
1433
1434 netif_info(nic, probe, nic->netdev,
1435 "found MII-less i82503 or 80c24 or other PHY\n");
1436
1437 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1438 nic->mii.phy_id = 0;
1439
1440
1441
1442
1443
1444 without_mii = 1;
1445 break;
1446 default:
1447 without_mii = 0;
1448 break;
1449 }
1450 return without_mii;
1451}
1452
1453#define NCONFIG_AUTO_SWITCH 0x0080
1454#define MII_NSC_CONG MII_RESV1
1455#define NSC_CONG_ENABLE 0x0100
1456#define NSC_CONG_TXREADY 0x0400
1457#define ADVERTISE_FC_SUPPORTED 0x0400
1458static int e100_phy_init(struct nic *nic)
1459{
1460 struct net_device *netdev = nic->netdev;
1461 u32 addr;
1462 u16 bmcr, stat, id_lo, id_hi, cong;
1463
1464
1465 for (addr = 0; addr < 32; addr++) {
1466 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1467 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1468 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1469 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1470 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1471 break;
1472 }
1473 if (addr == 32) {
1474
1475
1476
1477
1478 if (e100_phy_check_without_mii(nic))
1479 return 0;
1480 else {
1481
1482 netif_err(nic, hw, nic->netdev,
1483 "Failed to locate any known PHY, aborting\n");
1484 return -EAGAIN;
1485 }
1486 } else
1487 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1488 "phy_addr = %d\n", nic->mii.phy_id);
1489
1490
1491 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1492 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1493 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1494 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1495 "phy ID = 0x%08X\n", nic->phy);
1496
1497
1498 for (addr = 0; addr < 32; addr++) {
1499 if (addr != nic->mii.phy_id) {
1500 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1501 } else if (nic->phy != phy_82552_v) {
1502 bmcr = mdio_read(netdev, addr, MII_BMCR);
1503 mdio_write(netdev, addr, MII_BMCR,
1504 bmcr & ~BMCR_ISOLATE);
1505 }
1506 }
1507
1508
1509
1510
1511
1512 if (nic->phy == phy_82552_v)
1513 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1514 bmcr & ~BMCR_ISOLATE);
1515
1516
1517#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1518 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1519
1520 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1521 cong |= NSC_CONG_TXREADY;
1522 cong &= ~NSC_CONG_ENABLE;
1523 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1524 }
1525
1526 if (nic->phy == phy_82552_v) {
1527 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1528
1529
1530 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1531
1532
1533 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1534 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1535
1536
1537 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1538 bmcr |= BMCR_RESET;
1539 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1540 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1541 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1542 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1543
1544 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1545 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1546 }
1547
1548 return 0;
1549}
1550
1551static int e100_hw_init(struct nic *nic)
1552{
1553 int err = 0;
1554
1555 e100_hw_reset(nic);
1556
1557 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1558 if (!in_interrupt() && (err = e100_self_test(nic)))
1559 return err;
1560
1561 if ((err = e100_phy_init(nic)))
1562 return err;
1563 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1564 return err;
1565 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1566 return err;
1567 if ((err = e100_load_ucode_wait(nic)))
1568 return err;
1569 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1570 return err;
1571 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1572 return err;
1573 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1574 nic->dma_addr + offsetof(struct mem, stats))))
1575 return err;
1576 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1577 return err;
1578
1579 e100_disable_irq(nic);
1580
1581 return 0;
1582}
1583
1584static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1585{
1586 struct net_device *netdev = nic->netdev;
1587 struct netdev_hw_addr *ha;
1588 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1589
1590 cb->command = cpu_to_le16(cb_multi);
1591 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1592 i = 0;
1593 netdev_for_each_mc_addr(ha, netdev) {
1594 if (i == count)
1595 break;
1596 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1597 ETH_ALEN);
1598 }
1599}
1600
1601static void e100_set_multicast_list(struct net_device *netdev)
1602{
1603 struct nic *nic = netdev_priv(netdev);
1604
1605 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1606 "mc_count=%d, flags=0x%04X\n",
1607 netdev_mc_count(netdev), netdev->flags);
1608
1609 if (netdev->flags & IFF_PROMISC)
1610 nic->flags |= promiscuous;
1611 else
1612 nic->flags &= ~promiscuous;
1613
1614 if (netdev->flags & IFF_ALLMULTI ||
1615 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1616 nic->flags |= multicast_all;
1617 else
1618 nic->flags &= ~multicast_all;
1619
1620 e100_exec_cb(nic, NULL, e100_configure);
1621 e100_exec_cb(nic, NULL, e100_multi);
1622}
1623
1624static void e100_update_stats(struct nic *nic)
1625{
1626 struct net_device *dev = nic->netdev;
1627 struct net_device_stats *ns = &dev->stats;
1628 struct stats *s = &nic->mem->stats;
1629 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1630 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1631 &s->complete;
1632
1633
1634
1635
1636
1637 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1638 *complete = 0;
1639 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1640 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1641 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1642 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1643 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1644 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1645 ns->collisions += nic->tx_collisions;
1646 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1647 le32_to_cpu(s->tx_lost_crs);
1648 nic->rx_short_frame_errors +=
1649 le32_to_cpu(s->rx_short_frame_errors);
1650 ns->rx_length_errors = nic->rx_short_frame_errors +
1651 nic->rx_over_length_errors;
1652 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1653 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1654 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1655 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1656 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1657 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1658 le32_to_cpu(s->rx_alignment_errors) +
1659 le32_to_cpu(s->rx_short_frame_errors) +
1660 le32_to_cpu(s->rx_cdt_errors);
1661 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1662 nic->tx_single_collisions +=
1663 le32_to_cpu(s->tx_single_collisions);
1664 nic->tx_multiple_collisions +=
1665 le32_to_cpu(s->tx_multiple_collisions);
1666 if (nic->mac >= mac_82558_D101_A4) {
1667 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1668 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1669 nic->rx_fc_unsupported +=
1670 le32_to_cpu(s->fc_rcv_unsupported);
1671 if (nic->mac >= mac_82559_D101M) {
1672 nic->tx_tco_frames +=
1673 le16_to_cpu(s->xmt_tco_frames);
1674 nic->rx_tco_frames +=
1675 le16_to_cpu(s->rcv_tco_frames);
1676 }
1677 }
1678 }
1679
1680
1681 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1682 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1683 "exec cuc_dump_reset failed\n");
1684}
1685
1686static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1687{
1688
1689
1690
1691 if (duplex == DUPLEX_HALF) {
1692 u32 prev = nic->adaptive_ifs;
1693 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1694
1695 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1696 (nic->tx_frames > min_frames)) {
1697 if (nic->adaptive_ifs < 60)
1698 nic->adaptive_ifs += 5;
1699 } else if (nic->tx_frames < min_frames) {
1700 if (nic->adaptive_ifs >= 5)
1701 nic->adaptive_ifs -= 5;
1702 }
1703 if (nic->adaptive_ifs != prev)
1704 e100_exec_cb(nic, NULL, e100_configure);
1705 }
1706}
1707
1708static void e100_watchdog(unsigned long data)
1709{
1710 struct nic *nic = (struct nic *)data;
1711 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1712 u32 speed;
1713
1714 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1715 "right now = %ld\n", jiffies);
1716
1717
1718
1719 mii_ethtool_gset(&nic->mii, &cmd);
1720 speed = ethtool_cmd_speed(&cmd);
1721
1722 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1723 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1724 speed == SPEED_100 ? 100 : 10,
1725 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1726 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1727 netdev_info(nic->netdev, "NIC Link is Down\n");
1728 }
1729
1730 mii_check_link(&nic->mii);
1731
1732
1733
1734
1735
1736
1737 spin_lock_irq(&nic->cmd_lock);
1738 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1739 e100_write_flush(nic);
1740 spin_unlock_irq(&nic->cmd_lock);
1741
1742 e100_update_stats(nic);
1743 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1744
1745 if (nic->mac <= mac_82557_D100_C)
1746
1747 e100_set_multicast_list(nic->netdev);
1748
1749 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1750
1751 nic->flags |= ich_10h_workaround;
1752 else
1753 nic->flags &= ~ich_10h_workaround;
1754
1755 mod_timer(&nic->watchdog,
1756 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1757}
1758
1759static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1760 struct sk_buff *skb)
1761{
1762 cb->command = nic->tx_command;
1763
1764
1765
1766
1767
1768 if (unlikely(skb->no_fcs))
1769 cb->command |= __constant_cpu_to_le16(cb_tx_nc);
1770 else
1771 cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
1772
1773
1774 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1775 cb->command |= cpu_to_le16(cb_i);
1776 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1777 cb->u.tcb.tcb_byte_count = 0;
1778 cb->u.tcb.threshold = nic->tx_threshold;
1779 cb->u.tcb.tbd_count = 1;
1780 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1781 skb->data, skb->len, PCI_DMA_TODEVICE));
1782
1783 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1784 skb_tx_timestamp(skb);
1785}
1786
1787static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1788 struct net_device *netdev)
1789{
1790 struct nic *nic = netdev_priv(netdev);
1791 int err;
1792
1793 if (nic->flags & ich_10h_workaround) {
1794
1795
1796
1797 if (e100_exec_cmd(nic, cuc_nop, 0))
1798 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1799 "exec cuc_nop failed\n");
1800 udelay(1);
1801 }
1802
1803 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1804
1805 switch (err) {
1806 case -ENOSPC:
1807
1808 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1809 "No space for CB\n");
1810 netif_stop_queue(netdev);
1811 break;
1812 case -ENOMEM:
1813
1814 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1815 "Out of Tx resources, returning skb\n");
1816 netif_stop_queue(netdev);
1817 return NETDEV_TX_BUSY;
1818 }
1819
1820 return NETDEV_TX_OK;
1821}
1822
1823static int e100_tx_clean(struct nic *nic)
1824{
1825 struct net_device *dev = nic->netdev;
1826 struct cb *cb;
1827 int tx_cleaned = 0;
1828
1829 spin_lock(&nic->cb_lock);
1830
1831
1832 for (cb = nic->cb_to_clean;
1833 cb->status & cpu_to_le16(cb_complete);
1834 cb = nic->cb_to_clean = cb->next) {
1835 rmb();
1836 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1837 "cb[%d]->status = 0x%04X\n",
1838 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1839 cb->status);
1840
1841 if (likely(cb->skb != NULL)) {
1842 dev->stats.tx_packets++;
1843 dev->stats.tx_bytes += cb->skb->len;
1844
1845 pci_unmap_single(nic->pdev,
1846 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1847 le16_to_cpu(cb->u.tcb.tbd.size),
1848 PCI_DMA_TODEVICE);
1849 dev_kfree_skb_any(cb->skb);
1850 cb->skb = NULL;
1851 tx_cleaned = 1;
1852 }
1853 cb->status = 0;
1854 nic->cbs_avail++;
1855 }
1856
1857 spin_unlock(&nic->cb_lock);
1858
1859
1860 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1861 netif_wake_queue(nic->netdev);
1862
1863 return tx_cleaned;
1864}
1865
1866static void e100_clean_cbs(struct nic *nic)
1867{
1868 if (nic->cbs) {
1869 while (nic->cbs_avail != nic->params.cbs.count) {
1870 struct cb *cb = nic->cb_to_clean;
1871 if (cb->skb) {
1872 pci_unmap_single(nic->pdev,
1873 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1874 le16_to_cpu(cb->u.tcb.tbd.size),
1875 PCI_DMA_TODEVICE);
1876 dev_kfree_skb(cb->skb);
1877 }
1878 nic->cb_to_clean = nic->cb_to_clean->next;
1879 nic->cbs_avail++;
1880 }
1881 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1882 nic->cbs = NULL;
1883 nic->cbs_avail = 0;
1884 }
1885 nic->cuc_cmd = cuc_start;
1886 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1887 nic->cbs;
1888}
1889
1890static int e100_alloc_cbs(struct nic *nic)
1891{
1892 struct cb *cb;
1893 unsigned int i, count = nic->params.cbs.count;
1894
1895 nic->cuc_cmd = cuc_start;
1896 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1897 nic->cbs_avail = 0;
1898
1899 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1900 &nic->cbs_dma_addr);
1901 if (!nic->cbs)
1902 return -ENOMEM;
1903 memset(nic->cbs, 0, count * sizeof(struct cb));
1904
1905 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1906 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1907 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1908
1909 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1910 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1911 ((i+1) % count) * sizeof(struct cb));
1912 }
1913
1914 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1915 nic->cbs_avail = count;
1916
1917 return 0;
1918}
1919
1920static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1921{
1922 if (!nic->rxs) return;
1923 if (RU_SUSPENDED != nic->ru_running) return;
1924
1925
1926 if (!rx) rx = nic->rxs;
1927
1928
1929 if (rx->skb) {
1930 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1931 nic->ru_running = RU_RUNNING;
1932 }
1933}
1934
1935#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1936static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1937{
1938 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1939 return -ENOMEM;
1940
1941
1942 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1943 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1944 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1945
1946 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1947 dev_kfree_skb_any(rx->skb);
1948 rx->skb = NULL;
1949 rx->dma_addr = 0;
1950 return -ENOMEM;
1951 }
1952
1953
1954
1955
1956 if (rx->prev->skb) {
1957 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1958 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1959 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1960 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1961 }
1962
1963 return 0;
1964}
1965
1966static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1967 unsigned int *work_done, unsigned int work_to_do)
1968{
1969 struct net_device *dev = nic->netdev;
1970 struct sk_buff *skb = rx->skb;
1971 struct rfd *rfd = (struct rfd *)skb->data;
1972 u16 rfd_status, actual_size;
1973 u16 fcs_pad = 0;
1974
1975 if (unlikely(work_done && *work_done >= work_to_do))
1976 return -EAGAIN;
1977
1978
1979 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1980 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1981 rfd_status = le16_to_cpu(rfd->status);
1982
1983 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1984 "status=0x%04X\n", rfd_status);
1985 rmb();
1986
1987
1988 if (unlikely(!(rfd_status & cb_complete))) {
1989
1990
1991
1992
1993
1994 if ((le16_to_cpu(rfd->command) & cb_el) &&
1995 (RU_RUNNING == nic->ru_running))
1996
1997 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1998 nic->ru_running = RU_SUSPENDED;
1999 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2000 sizeof(struct rfd),
2001 PCI_DMA_FROMDEVICE);
2002 return -ENODATA;
2003 }
2004
2005
2006 if (unlikely(dev->features & NETIF_F_RXFCS))
2007 fcs_pad = 4;
2008 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2009 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2010 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2011
2012
2013 pci_unmap_single(nic->pdev, rx->dma_addr,
2014 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2015
2016
2017
2018
2019
2020
2021
2022 if ((le16_to_cpu(rfd->command) & cb_el) &&
2023 (RU_RUNNING == nic->ru_running)) {
2024
2025 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2026 nic->ru_running = RU_SUSPENDED;
2027 }
2028
2029
2030 skb_reserve(skb, sizeof(struct rfd));
2031 skb_put(skb, actual_size);
2032 skb->protocol = eth_type_trans(skb, nic->netdev);
2033
2034
2035
2036
2037 if (unlikely(dev->features & NETIF_F_RXALL)) {
2038 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2039
2040 nic->rx_over_length_errors++;
2041 goto process_skb;
2042 }
2043
2044 if (unlikely(!(rfd_status & cb_ok))) {
2045
2046 dev_kfree_skb_any(skb);
2047 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2048
2049 nic->rx_over_length_errors++;
2050 dev_kfree_skb_any(skb);
2051 } else {
2052process_skb:
2053 dev->stats.rx_packets++;
2054 dev->stats.rx_bytes += (actual_size - fcs_pad);
2055 netif_receive_skb(skb);
2056 if (work_done)
2057 (*work_done)++;
2058 }
2059
2060 rx->skb = NULL;
2061
2062 return 0;
2063}
2064
2065static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2066 unsigned int work_to_do)
2067{
2068 struct rx *rx;
2069 int restart_required = 0, err = 0;
2070 struct rx *old_before_last_rx, *new_before_last_rx;
2071 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2072
2073
2074 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2075 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2076
2077 if (-EAGAIN == err || -ENODATA == err)
2078 break;
2079 }
2080
2081
2082
2083
2084
2085
2086
2087
2088 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2089 restart_required = 1;
2090
2091 old_before_last_rx = nic->rx_to_use->prev->prev;
2092 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2093
2094
2095 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2096 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2097 break;
2098 }
2099
2100 new_before_last_rx = nic->rx_to_use->prev->prev;
2101 if (new_before_last_rx != old_before_last_rx) {
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 new_before_last_rfd =
2112 (struct rfd *)new_before_last_rx->skb->data;
2113 new_before_last_rfd->size = 0;
2114 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2115 pci_dma_sync_single_for_device(nic->pdev,
2116 new_before_last_rx->dma_addr, sizeof(struct rfd),
2117 PCI_DMA_BIDIRECTIONAL);
2118
2119
2120
2121
2122 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2123 pci_dma_sync_single_for_device(nic->pdev,
2124 old_before_last_rx->dma_addr, sizeof(struct rfd),
2125 PCI_DMA_BIDIRECTIONAL);
2126 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2127 + ETH_FCS_LEN);
2128 pci_dma_sync_single_for_device(nic->pdev,
2129 old_before_last_rx->dma_addr, sizeof(struct rfd),
2130 PCI_DMA_BIDIRECTIONAL);
2131 }
2132
2133 if (restart_required) {
2134
2135 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2136 e100_start_receiver(nic, nic->rx_to_clean);
2137 if (work_done)
2138 (*work_done)++;
2139 }
2140}
2141
2142static void e100_rx_clean_list(struct nic *nic)
2143{
2144 struct rx *rx;
2145 unsigned int i, count = nic->params.rfds.count;
2146
2147 nic->ru_running = RU_UNINITIALIZED;
2148
2149 if (nic->rxs) {
2150 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2151 if (rx->skb) {
2152 pci_unmap_single(nic->pdev, rx->dma_addr,
2153 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2154 dev_kfree_skb(rx->skb);
2155 }
2156 }
2157 kfree(nic->rxs);
2158 nic->rxs = NULL;
2159 }
2160
2161 nic->rx_to_use = nic->rx_to_clean = NULL;
2162}
2163
2164static int e100_rx_alloc_list(struct nic *nic)
2165{
2166 struct rx *rx;
2167 unsigned int i, count = nic->params.rfds.count;
2168 struct rfd *before_last;
2169
2170 nic->rx_to_use = nic->rx_to_clean = NULL;
2171 nic->ru_running = RU_UNINITIALIZED;
2172
2173 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2174 return -ENOMEM;
2175
2176 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2177 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2178 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2179 if (e100_rx_alloc_skb(nic, rx)) {
2180 e100_rx_clean_list(nic);
2181 return -ENOMEM;
2182 }
2183 }
2184
2185
2186
2187
2188
2189
2190
2191 rx = nic->rxs->prev->prev;
2192 before_last = (struct rfd *)rx->skb->data;
2193 before_last->command |= cpu_to_le16(cb_el);
2194 before_last->size = 0;
2195 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2196 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2197
2198 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2199 nic->ru_running = RU_SUSPENDED;
2200
2201 return 0;
2202}
2203
2204static irqreturn_t e100_intr(int irq, void *dev_id)
2205{
2206 struct net_device *netdev = dev_id;
2207 struct nic *nic = netdev_priv(netdev);
2208 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2209
2210 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2211 "stat_ack = 0x%02X\n", stat_ack);
2212
2213 if (stat_ack == stat_ack_not_ours ||
2214 stat_ack == stat_ack_not_present)
2215 return IRQ_NONE;
2216
2217
2218 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2219
2220
2221 if (stat_ack & stat_ack_rnr)
2222 nic->ru_running = RU_SUSPENDED;
2223
2224 if (likely(napi_schedule_prep(&nic->napi))) {
2225 e100_disable_irq(nic);
2226 __napi_schedule(&nic->napi);
2227 }
2228
2229 return IRQ_HANDLED;
2230}
2231
2232static int e100_poll(struct napi_struct *napi, int budget)
2233{
2234 struct nic *nic = container_of(napi, struct nic, napi);
2235 unsigned int work_done = 0;
2236
2237 e100_rx_clean(nic, &work_done, budget);
2238 e100_tx_clean(nic);
2239
2240
2241 if (work_done < budget) {
2242 napi_complete(napi);
2243 e100_enable_irq(nic);
2244 }
2245
2246 return work_done;
2247}
2248
2249#ifdef CONFIG_NET_POLL_CONTROLLER
2250static void e100_netpoll(struct net_device *netdev)
2251{
2252 struct nic *nic = netdev_priv(netdev);
2253
2254 e100_disable_irq(nic);
2255 e100_intr(nic->pdev->irq, netdev);
2256 e100_tx_clean(nic);
2257 e100_enable_irq(nic);
2258}
2259#endif
2260
2261static int e100_set_mac_address(struct net_device *netdev, void *p)
2262{
2263 struct nic *nic = netdev_priv(netdev);
2264 struct sockaddr *addr = p;
2265
2266 if (!is_valid_ether_addr(addr->sa_data))
2267 return -EADDRNOTAVAIL;
2268
2269 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2270 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2271
2272 return 0;
2273}
2274
2275static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2276{
2277 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2278 return -EINVAL;
2279 netdev->mtu = new_mtu;
2280 return 0;
2281}
2282
2283static int e100_asf(struct nic *nic)
2284{
2285
2286 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2287 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2288 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2289 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2290}
2291
2292static int e100_up(struct nic *nic)
2293{
2294 int err;
2295
2296 if ((err = e100_rx_alloc_list(nic)))
2297 return err;
2298 if ((err = e100_alloc_cbs(nic)))
2299 goto err_rx_clean_list;
2300 if ((err = e100_hw_init(nic)))
2301 goto err_clean_cbs;
2302 e100_set_multicast_list(nic->netdev);
2303 e100_start_receiver(nic, NULL);
2304 mod_timer(&nic->watchdog, jiffies);
2305 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2306 nic->netdev->name, nic->netdev)))
2307 goto err_no_irq;
2308 netif_wake_queue(nic->netdev);
2309 napi_enable(&nic->napi);
2310
2311
2312 e100_enable_irq(nic);
2313 return 0;
2314
2315err_no_irq:
2316 del_timer_sync(&nic->watchdog);
2317err_clean_cbs:
2318 e100_clean_cbs(nic);
2319err_rx_clean_list:
2320 e100_rx_clean_list(nic);
2321 return err;
2322}
2323
2324static void e100_down(struct nic *nic)
2325{
2326
2327 napi_disable(&nic->napi);
2328 netif_stop_queue(nic->netdev);
2329 e100_hw_reset(nic);
2330 free_irq(nic->pdev->irq, nic->netdev);
2331 del_timer_sync(&nic->watchdog);
2332 netif_carrier_off(nic->netdev);
2333 e100_clean_cbs(nic);
2334 e100_rx_clean_list(nic);
2335}
2336
2337static void e100_tx_timeout(struct net_device *netdev)
2338{
2339 struct nic *nic = netdev_priv(netdev);
2340
2341
2342
2343 schedule_work(&nic->tx_timeout_task);
2344}
2345
2346static void e100_tx_timeout_task(struct work_struct *work)
2347{
2348 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2349 struct net_device *netdev = nic->netdev;
2350
2351 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2352 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2353
2354 rtnl_lock();
2355 if (netif_running(netdev)) {
2356 e100_down(netdev_priv(netdev));
2357 e100_up(netdev_priv(netdev));
2358 }
2359 rtnl_unlock();
2360}
2361
2362static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2363{
2364 int err;
2365 struct sk_buff *skb;
2366
2367
2368
2369
2370
2371
2372 if ((err = e100_rx_alloc_list(nic)))
2373 return err;
2374 if ((err = e100_alloc_cbs(nic)))
2375 goto err_clean_rx;
2376
2377
2378 if (nic->flags & ich && loopback_mode == lb_phy)
2379 loopback_mode = lb_mac;
2380
2381 nic->loopback = loopback_mode;
2382 if ((err = e100_hw_init(nic)))
2383 goto err_loopback_none;
2384
2385 if (loopback_mode == lb_phy)
2386 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2387 BMCR_LOOPBACK);
2388
2389 e100_start_receiver(nic, NULL);
2390
2391 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2392 err = -ENOMEM;
2393 goto err_loopback_none;
2394 }
2395 skb_put(skb, ETH_DATA_LEN);
2396 memset(skb->data, 0xFF, ETH_DATA_LEN);
2397 e100_xmit_frame(skb, nic->netdev);
2398
2399 msleep(10);
2400
2401 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2402 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2403
2404 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2405 skb->data, ETH_DATA_LEN))
2406 err = -EAGAIN;
2407
2408err_loopback_none:
2409 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2410 nic->loopback = lb_none;
2411 e100_clean_cbs(nic);
2412 e100_hw_reset(nic);
2413err_clean_rx:
2414 e100_rx_clean_list(nic);
2415 return err;
2416}
2417
2418#define MII_LED_CONTROL 0x1B
2419#define E100_82552_LED_OVERRIDE 0x19
2420#define E100_82552_LED_ON 0x000F
2421#define E100_82552_LED_OFF 0x000A
2422
2423static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2424{
2425 struct nic *nic = netdev_priv(netdev);
2426 return mii_ethtool_gset(&nic->mii, cmd);
2427}
2428
2429static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2430{
2431 struct nic *nic = netdev_priv(netdev);
2432 int err;
2433
2434 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2435 err = mii_ethtool_sset(&nic->mii, cmd);
2436 e100_exec_cb(nic, NULL, e100_configure);
2437
2438 return err;
2439}
2440
2441static void e100_get_drvinfo(struct net_device *netdev,
2442 struct ethtool_drvinfo *info)
2443{
2444 struct nic *nic = netdev_priv(netdev);
2445 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2446 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2447 strlcpy(info->bus_info, pci_name(nic->pdev),
2448 sizeof(info->bus_info));
2449}
2450
2451#define E100_PHY_REGS 0x1C
2452static int e100_get_regs_len(struct net_device *netdev)
2453{
2454 struct nic *nic = netdev_priv(netdev);
2455 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2456}
2457
2458static void e100_get_regs(struct net_device *netdev,
2459 struct ethtool_regs *regs, void *p)
2460{
2461 struct nic *nic = netdev_priv(netdev);
2462 u32 *buff = p;
2463 int i;
2464
2465 regs->version = (1 << 24) | nic->pdev->revision;
2466 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2467 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2468 ioread16(&nic->csr->scb.status);
2469 for (i = E100_PHY_REGS; i >= 0; i--)
2470 buff[1 + E100_PHY_REGS - i] =
2471 mdio_read(netdev, nic->mii.phy_id, i);
2472 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2473 e100_exec_cb(nic, NULL, e100_dump);
2474 msleep(10);
2475 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2476 sizeof(nic->mem->dump_buf));
2477}
2478
2479static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2480{
2481 struct nic *nic = netdev_priv(netdev);
2482 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2483 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2484}
2485
2486static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2487{
2488 struct nic *nic = netdev_priv(netdev);
2489
2490 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2491 !device_can_wakeup(&nic->pdev->dev))
2492 return -EOPNOTSUPP;
2493
2494 if (wol->wolopts)
2495 nic->flags |= wol_magic;
2496 else
2497 nic->flags &= ~wol_magic;
2498
2499 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2500
2501 e100_exec_cb(nic, NULL, e100_configure);
2502
2503 return 0;
2504}
2505
2506static u32 e100_get_msglevel(struct net_device *netdev)
2507{
2508 struct nic *nic = netdev_priv(netdev);
2509 return nic->msg_enable;
2510}
2511
2512static void e100_set_msglevel(struct net_device *netdev, u32 value)
2513{
2514 struct nic *nic = netdev_priv(netdev);
2515 nic->msg_enable = value;
2516}
2517
2518static int e100_nway_reset(struct net_device *netdev)
2519{
2520 struct nic *nic = netdev_priv(netdev);
2521 return mii_nway_restart(&nic->mii);
2522}
2523
2524static u32 e100_get_link(struct net_device *netdev)
2525{
2526 struct nic *nic = netdev_priv(netdev);
2527 return mii_link_ok(&nic->mii);
2528}
2529
2530static int e100_get_eeprom_len(struct net_device *netdev)
2531{
2532 struct nic *nic = netdev_priv(netdev);
2533 return nic->eeprom_wc << 1;
2534}
2535
2536#define E100_EEPROM_MAGIC 0x1234
2537static int e100_get_eeprom(struct net_device *netdev,
2538 struct ethtool_eeprom *eeprom, u8 *bytes)
2539{
2540 struct nic *nic = netdev_priv(netdev);
2541
2542 eeprom->magic = E100_EEPROM_MAGIC;
2543 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2544
2545 return 0;
2546}
2547
2548static int e100_set_eeprom(struct net_device *netdev,
2549 struct ethtool_eeprom *eeprom, u8 *bytes)
2550{
2551 struct nic *nic = netdev_priv(netdev);
2552
2553 if (eeprom->magic != E100_EEPROM_MAGIC)
2554 return -EINVAL;
2555
2556 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2557
2558 return e100_eeprom_save(nic, eeprom->offset >> 1,
2559 (eeprom->len >> 1) + 1);
2560}
2561
2562static void e100_get_ringparam(struct net_device *netdev,
2563 struct ethtool_ringparam *ring)
2564{
2565 struct nic *nic = netdev_priv(netdev);
2566 struct param_range *rfds = &nic->params.rfds;
2567 struct param_range *cbs = &nic->params.cbs;
2568
2569 ring->rx_max_pending = rfds->max;
2570 ring->tx_max_pending = cbs->max;
2571 ring->rx_pending = rfds->count;
2572 ring->tx_pending = cbs->count;
2573}
2574
2575static int e100_set_ringparam(struct net_device *netdev,
2576 struct ethtool_ringparam *ring)
2577{
2578 struct nic *nic = netdev_priv(netdev);
2579 struct param_range *rfds = &nic->params.rfds;
2580 struct param_range *cbs = &nic->params.cbs;
2581
2582 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2583 return -EINVAL;
2584
2585 if (netif_running(netdev))
2586 e100_down(nic);
2587 rfds->count = max(ring->rx_pending, rfds->min);
2588 rfds->count = min(rfds->count, rfds->max);
2589 cbs->count = max(ring->tx_pending, cbs->min);
2590 cbs->count = min(cbs->count, cbs->max);
2591 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2592 rfds->count, cbs->count);
2593 if (netif_running(netdev))
2594 e100_up(nic);
2595
2596 return 0;
2597}
2598
2599static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2600 "Link test (on/offline)",
2601 "Eeprom test (on/offline)",
2602 "Self test (offline)",
2603 "Mac loopback (offline)",
2604 "Phy loopback (offline)",
2605};
2606#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2607
2608static void e100_diag_test(struct net_device *netdev,
2609 struct ethtool_test *test, u64 *data)
2610{
2611 struct ethtool_cmd cmd;
2612 struct nic *nic = netdev_priv(netdev);
2613 int i, err;
2614
2615 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2616 data[0] = !mii_link_ok(&nic->mii);
2617 data[1] = e100_eeprom_load(nic);
2618 if (test->flags & ETH_TEST_FL_OFFLINE) {
2619
2620
2621 err = mii_ethtool_gset(&nic->mii, &cmd);
2622
2623 if (netif_running(netdev))
2624 e100_down(nic);
2625 data[2] = e100_self_test(nic);
2626 data[3] = e100_loopback_test(nic, lb_mac);
2627 data[4] = e100_loopback_test(nic, lb_phy);
2628
2629
2630 err = mii_ethtool_sset(&nic->mii, &cmd);
2631
2632 if (netif_running(netdev))
2633 e100_up(nic);
2634 }
2635 for (i = 0; i < E100_TEST_LEN; i++)
2636 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2637
2638 msleep_interruptible(4 * 1000);
2639}
2640
2641static int e100_set_phys_id(struct net_device *netdev,
2642 enum ethtool_phys_id_state state)
2643{
2644 struct nic *nic = netdev_priv(netdev);
2645 enum led_state {
2646 led_on = 0x01,
2647 led_off = 0x04,
2648 led_on_559 = 0x05,
2649 led_on_557 = 0x07,
2650 };
2651 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2652 MII_LED_CONTROL;
2653 u16 leds = 0;
2654
2655 switch (state) {
2656 case ETHTOOL_ID_ACTIVE:
2657 return 2;
2658
2659 case ETHTOOL_ID_ON:
2660 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2661 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2662 break;
2663
2664 case ETHTOOL_ID_OFF:
2665 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2666 break;
2667
2668 case ETHTOOL_ID_INACTIVE:
2669 break;
2670 }
2671
2672 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2673 return 0;
2674}
2675
2676static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2677 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2678 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2679 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2680 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2681 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2682 "tx_heartbeat_errors", "tx_window_errors",
2683
2684 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2685 "tx_flow_control_pause", "rx_flow_control_pause",
2686 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2687 "rx_short_frame_errors", "rx_over_length_errors",
2688};
2689#define E100_NET_STATS_LEN 21
2690#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2691
2692static int e100_get_sset_count(struct net_device *netdev, int sset)
2693{
2694 switch (sset) {
2695 case ETH_SS_TEST:
2696 return E100_TEST_LEN;
2697 case ETH_SS_STATS:
2698 return E100_STATS_LEN;
2699 default:
2700 return -EOPNOTSUPP;
2701 }
2702}
2703
2704static void e100_get_ethtool_stats(struct net_device *netdev,
2705 struct ethtool_stats *stats, u64 *data)
2706{
2707 struct nic *nic = netdev_priv(netdev);
2708 int i;
2709
2710 for (i = 0; i < E100_NET_STATS_LEN; i++)
2711 data[i] = ((unsigned long *)&netdev->stats)[i];
2712
2713 data[i++] = nic->tx_deferred;
2714 data[i++] = nic->tx_single_collisions;
2715 data[i++] = nic->tx_multiple_collisions;
2716 data[i++] = nic->tx_fc_pause;
2717 data[i++] = nic->rx_fc_pause;
2718 data[i++] = nic->rx_fc_unsupported;
2719 data[i++] = nic->tx_tco_frames;
2720 data[i++] = nic->rx_tco_frames;
2721 data[i++] = nic->rx_short_frame_errors;
2722 data[i++] = nic->rx_over_length_errors;
2723}
2724
2725static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2726{
2727 switch (stringset) {
2728 case ETH_SS_TEST:
2729 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2730 break;
2731 case ETH_SS_STATS:
2732 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2733 break;
2734 }
2735}
2736
2737static const struct ethtool_ops e100_ethtool_ops = {
2738 .get_settings = e100_get_settings,
2739 .set_settings = e100_set_settings,
2740 .get_drvinfo = e100_get_drvinfo,
2741 .get_regs_len = e100_get_regs_len,
2742 .get_regs = e100_get_regs,
2743 .get_wol = e100_get_wol,
2744 .set_wol = e100_set_wol,
2745 .get_msglevel = e100_get_msglevel,
2746 .set_msglevel = e100_set_msglevel,
2747 .nway_reset = e100_nway_reset,
2748 .get_link = e100_get_link,
2749 .get_eeprom_len = e100_get_eeprom_len,
2750 .get_eeprom = e100_get_eeprom,
2751 .set_eeprom = e100_set_eeprom,
2752 .get_ringparam = e100_get_ringparam,
2753 .set_ringparam = e100_set_ringparam,
2754 .self_test = e100_diag_test,
2755 .get_strings = e100_get_strings,
2756 .set_phys_id = e100_set_phys_id,
2757 .get_ethtool_stats = e100_get_ethtool_stats,
2758 .get_sset_count = e100_get_sset_count,
2759 .get_ts_info = ethtool_op_get_ts_info,
2760};
2761
2762static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2763{
2764 struct nic *nic = netdev_priv(netdev);
2765
2766 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2767}
2768
2769static int e100_alloc(struct nic *nic)
2770{
2771 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2772 &nic->dma_addr);
2773 return nic->mem ? 0 : -ENOMEM;
2774}
2775
2776static void e100_free(struct nic *nic)
2777{
2778 if (nic->mem) {
2779 pci_free_consistent(nic->pdev, sizeof(struct mem),
2780 nic->mem, nic->dma_addr);
2781 nic->mem = NULL;
2782 }
2783}
2784
2785static int e100_open(struct net_device *netdev)
2786{
2787 struct nic *nic = netdev_priv(netdev);
2788 int err = 0;
2789
2790 netif_carrier_off(netdev);
2791 if ((err = e100_up(nic)))
2792 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2793 return err;
2794}
2795
2796static int e100_close(struct net_device *netdev)
2797{
2798 e100_down(netdev_priv(netdev));
2799 return 0;
2800}
2801
2802static int e100_set_features(struct net_device *netdev,
2803 netdev_features_t features)
2804{
2805 struct nic *nic = netdev_priv(netdev);
2806 netdev_features_t changed = features ^ netdev->features;
2807
2808 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2809 return 0;
2810
2811 netdev->features = features;
2812 e100_exec_cb(nic, NULL, e100_configure);
2813 return 0;
2814}
2815
2816static const struct net_device_ops e100_netdev_ops = {
2817 .ndo_open = e100_open,
2818 .ndo_stop = e100_close,
2819 .ndo_start_xmit = e100_xmit_frame,
2820 .ndo_validate_addr = eth_validate_addr,
2821 .ndo_set_rx_mode = e100_set_multicast_list,
2822 .ndo_set_mac_address = e100_set_mac_address,
2823 .ndo_change_mtu = e100_change_mtu,
2824 .ndo_do_ioctl = e100_do_ioctl,
2825 .ndo_tx_timeout = e100_tx_timeout,
2826#ifdef CONFIG_NET_POLL_CONTROLLER
2827 .ndo_poll_controller = e100_netpoll,
2828#endif
2829 .ndo_set_features = e100_set_features,
2830};
2831
2832static int __devinit e100_probe(struct pci_dev *pdev,
2833 const struct pci_device_id *ent)
2834{
2835 struct net_device *netdev;
2836 struct nic *nic;
2837 int err;
2838
2839 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2840 return -ENOMEM;
2841
2842 netdev->hw_features |= NETIF_F_RXFCS;
2843 netdev->priv_flags |= IFF_SUPP_NOFCS;
2844 netdev->hw_features |= NETIF_F_RXALL;
2845
2846 netdev->netdev_ops = &e100_netdev_ops;
2847 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2848 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2849 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2850
2851 nic = netdev_priv(netdev);
2852 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2853 nic->netdev = netdev;
2854 nic->pdev = pdev;
2855 nic->msg_enable = (1 << debug) - 1;
2856 nic->mdio_ctrl = mdio_ctrl_hw;
2857 pci_set_drvdata(pdev, netdev);
2858
2859 if ((err = pci_enable_device(pdev))) {
2860 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2861 goto err_out_free_dev;
2862 }
2863
2864 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2865 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2866 err = -ENODEV;
2867 goto err_out_disable_pdev;
2868 }
2869
2870 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2871 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2872 goto err_out_disable_pdev;
2873 }
2874
2875 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2876 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2877 goto err_out_free_res;
2878 }
2879
2880 SET_NETDEV_DEV(netdev, &pdev->dev);
2881
2882 if (use_io)
2883 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2884
2885 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2886 if (!nic->csr) {
2887 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2888 err = -ENOMEM;
2889 goto err_out_free_res;
2890 }
2891
2892 if (ent->driver_data)
2893 nic->flags |= ich;
2894 else
2895 nic->flags &= ~ich;
2896
2897 e100_get_defaults(nic);
2898
2899
2900 if (nic->mac < mac_82558_D101_A4)
2901 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2902
2903
2904 spin_lock_init(&nic->cb_lock);
2905 spin_lock_init(&nic->cmd_lock);
2906 spin_lock_init(&nic->mdio_lock);
2907
2908
2909
2910
2911 e100_hw_reset(nic);
2912
2913 pci_set_master(pdev);
2914
2915 init_timer(&nic->watchdog);
2916 nic->watchdog.function = e100_watchdog;
2917 nic->watchdog.data = (unsigned long)nic;
2918
2919 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2920
2921 if ((err = e100_alloc(nic))) {
2922 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2923 goto err_out_iounmap;
2924 }
2925
2926 if ((err = e100_eeprom_load(nic)))
2927 goto err_out_free;
2928
2929 e100_phy_init(nic);
2930
2931 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2932 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2933 if (!is_valid_ether_addr(netdev->perm_addr)) {
2934 if (!eeprom_bad_csum_allow) {
2935 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2936 err = -EAGAIN;
2937 goto err_out_free;
2938 } else {
2939 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2940 }
2941 }
2942
2943
2944 if ((nic->mac >= mac_82558_D101_A4) &&
2945 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2946 nic->flags |= wol_magic;
2947 device_set_wakeup_enable(&pdev->dev, true);
2948 }
2949
2950
2951 pci_pme_active(pdev, false);
2952
2953 strcpy(netdev->name, "eth%d");
2954 if ((err = register_netdev(netdev))) {
2955 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2956 goto err_out_free;
2957 }
2958 nic->cbs_pool = pci_pool_create(netdev->name,
2959 nic->pdev,
2960 nic->params.cbs.max * sizeof(struct cb),
2961 sizeof(u32),
2962 0);
2963 netif_info(nic, probe, nic->netdev,
2964 "addr 0x%llx, irq %d, MAC addr %pM\n",
2965 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2966 pdev->irq, netdev->dev_addr);
2967
2968 return 0;
2969
2970err_out_free:
2971 e100_free(nic);
2972err_out_iounmap:
2973 pci_iounmap(pdev, nic->csr);
2974err_out_free_res:
2975 pci_release_regions(pdev);
2976err_out_disable_pdev:
2977 pci_disable_device(pdev);
2978err_out_free_dev:
2979 pci_set_drvdata(pdev, NULL);
2980 free_netdev(netdev);
2981 return err;
2982}
2983
2984static void __devexit e100_remove(struct pci_dev *pdev)
2985{
2986 struct net_device *netdev = pci_get_drvdata(pdev);
2987
2988 if (netdev) {
2989 struct nic *nic = netdev_priv(netdev);
2990 unregister_netdev(netdev);
2991 e100_free(nic);
2992 pci_iounmap(pdev, nic->csr);
2993 pci_pool_destroy(nic->cbs_pool);
2994 free_netdev(netdev);
2995 pci_release_regions(pdev);
2996 pci_disable_device(pdev);
2997 pci_set_drvdata(pdev, NULL);
2998 }
2999}
3000
3001#define E100_82552_SMARTSPEED 0x14
3002#define E100_82552_REV_ANEG 0x0200
3003#define E100_82552_ANEG_NOW 0x0400
3004static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3005{
3006 struct net_device *netdev = pci_get_drvdata(pdev);
3007 struct nic *nic = netdev_priv(netdev);
3008
3009 if (netif_running(netdev))
3010 e100_down(nic);
3011 netif_device_detach(netdev);
3012
3013 pci_save_state(pdev);
3014
3015 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3016
3017 if (nic->phy == phy_82552_v) {
3018 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3019 E100_82552_SMARTSPEED);
3020
3021 mdio_write(netdev, nic->mii.phy_id,
3022 E100_82552_SMARTSPEED, smartspeed |
3023 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3024 }
3025 *enable_wake = true;
3026 } else {
3027 *enable_wake = false;
3028 }
3029
3030 pci_disable_device(pdev);
3031}
3032
3033static int __e100_power_off(struct pci_dev *pdev, bool wake)
3034{
3035 if (wake)
3036 return pci_prepare_to_sleep(pdev);
3037
3038 pci_wake_from_d3(pdev, false);
3039 pci_set_power_state(pdev, PCI_D3hot);
3040
3041 return 0;
3042}
3043
3044#ifdef CONFIG_PM
3045static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3046{
3047 bool wake;
3048 __e100_shutdown(pdev, &wake);
3049 return __e100_power_off(pdev, wake);
3050}
3051
3052static int e100_resume(struct pci_dev *pdev)
3053{
3054 struct net_device *netdev = pci_get_drvdata(pdev);
3055 struct nic *nic = netdev_priv(netdev);
3056
3057 pci_set_power_state(pdev, PCI_D0);
3058 pci_restore_state(pdev);
3059
3060 pci_enable_wake(pdev, 0, 0);
3061
3062
3063 if (nic->phy == phy_82552_v) {
3064 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3065 E100_82552_SMARTSPEED);
3066
3067 mdio_write(netdev, nic->mii.phy_id,
3068 E100_82552_SMARTSPEED,
3069 smartspeed & ~(E100_82552_REV_ANEG));
3070 }
3071
3072 netif_device_attach(netdev);
3073 if (netif_running(netdev))
3074 e100_up(nic);
3075
3076 return 0;
3077}
3078#endif
3079
3080static void e100_shutdown(struct pci_dev *pdev)
3081{
3082 bool wake;
3083 __e100_shutdown(pdev, &wake);
3084 if (system_state == SYSTEM_POWER_OFF)
3085 __e100_power_off(pdev, wake);
3086}
3087
3088
3089
3090
3091
3092
3093
3094static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3095{
3096 struct net_device *netdev = pci_get_drvdata(pdev);
3097 struct nic *nic = netdev_priv(netdev);
3098
3099 netif_device_detach(netdev);
3100
3101 if (state == pci_channel_io_perm_failure)
3102 return PCI_ERS_RESULT_DISCONNECT;
3103
3104 if (netif_running(netdev))
3105 e100_down(nic);
3106 pci_disable_device(pdev);
3107
3108
3109 return PCI_ERS_RESULT_NEED_RESET;
3110}
3111
3112
3113
3114
3115
3116
3117
3118static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3119{
3120 struct net_device *netdev = pci_get_drvdata(pdev);
3121 struct nic *nic = netdev_priv(netdev);
3122
3123 if (pci_enable_device(pdev)) {
3124 pr_err("Cannot re-enable PCI device after reset\n");
3125 return PCI_ERS_RESULT_DISCONNECT;
3126 }
3127 pci_set_master(pdev);
3128
3129
3130 if (0 != PCI_FUNC(pdev->devfn))
3131 return PCI_ERS_RESULT_RECOVERED;
3132 e100_hw_reset(nic);
3133 e100_phy_init(nic);
3134
3135 return PCI_ERS_RESULT_RECOVERED;
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145static void e100_io_resume(struct pci_dev *pdev)
3146{
3147 struct net_device *netdev = pci_get_drvdata(pdev);
3148 struct nic *nic = netdev_priv(netdev);
3149
3150
3151 pci_enable_wake(pdev, 0, 0);
3152
3153 netif_device_attach(netdev);
3154 if (netif_running(netdev)) {
3155 e100_open(netdev);
3156 mod_timer(&nic->watchdog, jiffies);
3157 }
3158}
3159
3160static const struct pci_error_handlers e100_err_handler = {
3161 .error_detected = e100_io_error_detected,
3162 .slot_reset = e100_io_slot_reset,
3163 .resume = e100_io_resume,
3164};
3165
3166static struct pci_driver e100_driver = {
3167 .name = DRV_NAME,
3168 .id_table = e100_id_table,
3169 .probe = e100_probe,
3170 .remove = __devexit_p(e100_remove),
3171#ifdef CONFIG_PM
3172
3173 .suspend = e100_suspend,
3174 .resume = e100_resume,
3175#endif
3176 .shutdown = e100_shutdown,
3177 .err_handler = &e100_err_handler,
3178};
3179
3180static int __init e100_init_module(void)
3181{
3182 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3183 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3184 pr_info("%s\n", DRV_COPYRIGHT);
3185 }
3186 return pci_register_driver(&e100_driver);
3187}
3188
3189static void __exit e100_cleanup_module(void)
3190{
3191 pci_unregister_driver(&e100_driver);
3192}
3193
3194module_init(e100_init_module);
3195module_exit(e100_cleanup_module);
3196