1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
158#include <linux/sched.h>
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
163#include <linux/dma-mapping.h>
164#include <linux/dmapool.h>
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
172#include <linux/firmware.h>
173#include <linux/rtnetlink.h>
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
178#define DRV_EXT "-NAPI"
179#define DRV_VERSION "3.5.24-k2"DRV_EXT
180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
197
198static int debug = 3;
199static int eeprom_bad_csum_allow = 0;
200static int use_io = 0;
201module_param(debug, int, 0);
202module_param(eeprom_bad_csum_allow, int, 0);
203module_param(use_io, int, 0);
204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static const struct pci_device_id e100_id_table[] = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
285};
286
287
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
305 rus_no_res = 0x08,
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
350
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
415
416
417
418
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
428 cb_tx_nc = 0x0010,
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456 u8 X(byte_count:6, pad0:2);
457 u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458 u8 adaptive_ifs;
459 u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461 u8 X(rx_dma_max_count:7, pad4:1);
462 u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463 u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465 rx_save_overruns : 1), rx_save_bad_frames : 1);
466 u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469 u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470 u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472 u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474 u8 X(linear_priority:3, pad11:5);
475 u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476 u8 ip_addr_lo;
477 u8 ip_addr_hi;
478 u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481 u8 fc_delay_lo;
482 u8 fc_delay_hi;
483 u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485 u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488 u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489 u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490 u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
496 __le16 count;
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2];
498};
499
500
501#define UCODE_SIZE 134
502struct cb {
503 __le16 status;
504 __le16 command;
505 __le32 link;
506 union {
507 u8 iaaddr[ETH_ALEN];
508 __le32 ucode[UCODE_SIZE];
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
517 __le32 buf_addr;
518 __le16 size;
519 u16 eol;
520 } tbd;
521 } tcb;
522 __le32 dump_buffer_addr;
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
576 enum ru_state ru_running;
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
583 struct napi_struct napi;
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
588 __le16 tx_command;
589
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
602 struct timer_list watchdog;
603 struct mii_if_info mii;
604 struct work_struct tx_timeout_task;
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
610 struct pci_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
625 u32 rx_short_frame_errors;
626 u32 rx_over_length_errors;
627
628 u16 eeprom_wc;
629 __le16 eeprom[256];
630 spinlock_t mdio_lock;
631 const struct firmware *fw;
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636
637
638 (void)ioread8(&nic->csr->scb.status);
639}
640
641static void e100_enable_irq(struct nic *nic)
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647 e100_write_flush(nic);
648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
649}
650
651static void e100_disable_irq(struct nic *nic)
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657 e100_write_flush(nic);
658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663
664
665 iowrite32(selective_reset, &nic->csr->port);
666 e100_write_flush(nic); udelay(20);
667
668
669 iowrite32(software_reset, &nic->csr->port);
670 e100_write_flush(nic); udelay(20);
671
672
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680
681
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
686 iowrite32(selftest | dma_addr, &nic->csr->port);
687 e100_write_flush(nic);
688
689 msleep(10);
690
691
692 e100_disable_irq(nic);
693
694
695 if (nic->mem->selftest.result != 0) {
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
699 return -ETIMEDOUT;
700 }
701 if (nic->mem->selftest.signature == 0) {
702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
718 le16_to_cpu(data);
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721
722 for (j = 0; j < 3; j++) {
723
724
725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
727
728 for (i = 31; i >= 0; i--) {
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732 e100_write_flush(nic); udelay(4);
733
734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735 e100_write_flush(nic); udelay(4);
736 }
737
738 msleep(10);
739
740
741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746
747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756
757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
759
760
761 for (i = 31; i >= 0; i--) {
762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764 e100_write_flush(nic); udelay(4);
765
766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767 e100_write_flush(nic); udelay(4);
768
769
770
771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772 if (!(ctrl & eedo) && i > 16) {
773 *addr_len -= (i - 16);
774 i = 17;
775 }
776
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780
781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782 e100_write_flush(nic); udelay(4);
783
784 return cpu_to_le16(data);
785};
786
787
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798 if (addr < nic->eeprom_wc - 1)
799 checksum += le16_to_cpu(nic->eeprom[addr]);
800 }
801
802
803
804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
808 }
809
810 return 0;
811}
812
813
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
822 if (start + count >= nic->eeprom_wc)
823 return -EINVAL;
824
825 for (addr = start; addr < start + count; addr++)
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828
829
830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
839#define E100_WAIT_SCB_TIMEOUT 20000
840#define E100_WAIT_SCB_FAST 20
841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
852 break;
853 cpu_relax();
854 if (unlikely(i > E100_WAIT_SCB_FAST))
855 udelay(5);
856 }
857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
862 if (unlikely(cmd != cuc_resume))
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{
875 struct cb *cb;
876 unsigned long flags;
877 int err;
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
881 if (unlikely(!nic->cbs_avail)) {
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
891 err = cb_prepare(nic, cb, skb);
892 if (err)
893 goto err_unlock;
894
895 if (unlikely(!nic->cbs_avail))
896 err = -ENOSPC;
897
898
899
900
901 cb->command |= cpu_to_le16(cb_s);
902 dma_wmb();
903 cb->prev->command &= cpu_to_le16(~cb_s);
904
905 while (nic->cb_to_send != nic->cb_to_use) {
906 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
907 nic->cb_to_send->dma_addr))) {
908
909
910
911
912
913 if (err == -ENOSPC) {
914
915 schedule_work(&nic->tx_timeout_task);
916 }
917 break;
918 } else {
919 nic->cuc_cmd = cuc_resume;
920 nic->cb_to_send = nic->cb_to_send->next;
921 }
922 }
923
924err_unlock:
925 spin_unlock_irqrestore(&nic->cb_lock, flags);
926
927 return err;
928}
929
930static int mdio_read(struct net_device *netdev, int addr, int reg)
931{
932 struct nic *nic = netdev_priv(netdev);
933 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
934}
935
936static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
937{
938 struct nic *nic = netdev_priv(netdev);
939
940 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
941}
942
943
944static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
945{
946 u32 data_out = 0;
947 unsigned int i;
948 unsigned long flags;
949
950
951
952
953
954
955
956
957 spin_lock_irqsave(&nic->mdio_lock, flags);
958 for (i = 100; i; --i) {
959 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
960 break;
961 udelay(20);
962 }
963 if (unlikely(!i)) {
964 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
966 return 0;
967 }
968 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
969
970 for (i = 0; i < 100; i++) {
971 udelay(20);
972 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
973 break;
974 }
975 spin_unlock_irqrestore(&nic->mdio_lock, flags);
976 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
977 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
978 dir == mdi_read ? "READ" : "WRITE",
979 addr, reg, data, data_out);
980 return (u16)data_out;
981}
982
983
984static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
985 u32 addr,
986 u32 dir,
987 u32 reg,
988 u16 data)
989{
990 if ((reg == MII_BMCR) && (dir == mdi_write)) {
991 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
992 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
993 MII_ADVERTISE);
994
995
996
997
998
999 if (advert & ADVERTISE_100FULL)
1000 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
1001 else if (advert & ADVERTISE_100HALF)
1002 data |= BMCR_SPEED100;
1003 }
1004 }
1005 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1006}
1007
1008
1009
1010
1011
1012
1013
1014static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1015 u32 addr,
1016 u32 dir,
1017 u32 reg,
1018 u16 data)
1019{
1020
1021
1022
1023
1024 if (dir == mdi_read) {
1025 switch (reg) {
1026 case MII_BMCR:
1027
1028 return BMCR_ANENABLE |
1029 BMCR_FULLDPLX;
1030 case MII_BMSR:
1031 return BMSR_LSTATUS |
1032 BMSR_ANEGCAPABLE |
1033 BMSR_10FULL;
1034 case MII_ADVERTISE:
1035
1036 return ADVERTISE_10HALF |
1037 ADVERTISE_10FULL;
1038 default:
1039 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE",
1042 addr, reg, data);
1043 return 0xFFFF;
1044 }
1045 } else {
1046 switch (reg) {
1047 default:
1048 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1049 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1050 dir == mdi_read ? "READ" : "WRITE",
1051 addr, reg, data);
1052 return 0xFFFF;
1053 }
1054 }
1055}
1056static inline int e100_phy_supports_mii(struct nic *nic)
1057{
1058
1059
1060
1061 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1062}
1063
1064static void e100_get_defaults(struct nic *nic)
1065{
1066 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1067 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1068
1069
1070 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1071 if (nic->mac == mac_unknown)
1072 nic->mac = mac_82557_D100_A;
1073
1074 nic->params.rfds = rfds;
1075 nic->params.cbs = cbs;
1076
1077
1078 nic->tx_threshold = 0xE0;
1079
1080
1081 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1082 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1083
1084
1085 nic->blank_rfd.command = 0;
1086 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1087 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1088
1089
1090 nic->mii.phy_id_mask = 0x1F;
1091 nic->mii.reg_num_mask = 0x1F;
1092 nic->mii.dev = nic->netdev;
1093 nic->mii.mdio_read = mdio_read;
1094 nic->mii.mdio_write = mdio_write;
1095}
1096
1097static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1098{
1099 struct config *config = &cb->u.config;
1100 u8 *c = (u8 *)config;
1101 struct net_device *netdev = nic->netdev;
1102
1103 cb->command = cpu_to_le16(cb_config);
1104
1105 memset(config, 0, sizeof(struct config));
1106
1107 config->byte_count = 0x16;
1108 config->rx_fifo_limit = 0x8;
1109 config->direct_rx_dma = 0x1;
1110 config->standard_tcb = 0x1;
1111 config->standard_stat_counter = 0x1;
1112 config->rx_discard_short_frames = 0x1;
1113 config->tx_underrun_retry = 0x3;
1114 if (e100_phy_supports_mii(nic))
1115 config->mii_mode = 1;
1116 config->pad10 = 0x6;
1117 config->no_source_addr_insertion = 0x1;
1118 config->preamble_length = 0x2;
1119 config->ifs = 0x6;
1120 config->ip_addr_hi = 0xF2;
1121 config->pad15_1 = 0x1;
1122 config->pad15_2 = 0x1;
1123 config->crs_or_cdt = 0x0;
1124 config->fc_delay_hi = 0x40;
1125 config->tx_padding = 0x1;
1126 config->fc_priority_threshold = 0x7;
1127 config->pad18 = 0x1;
1128 config->full_duplex_pin = 0x1;
1129 config->pad20_1 = 0x1F;
1130 config->fc_priority_location = 0x1;
1131 config->pad21_1 = 0x5;
1132
1133 config->adaptive_ifs = nic->adaptive_ifs;
1134 config->loopback = nic->loopback;
1135
1136 if (nic->mii.force_media && nic->mii.full_duplex)
1137 config->full_duplex_force = 0x1;
1138
1139 if (nic->flags & promiscuous || nic->loopback) {
1140 config->rx_save_bad_frames = 0x1;
1141 config->rx_discard_short_frames = 0x0;
1142 config->promiscuous_mode = 0x1;
1143 }
1144
1145 if (unlikely(netdev->features & NETIF_F_RXFCS))
1146 config->rx_crc_transfer = 0x1;
1147
1148 if (nic->flags & multicast_all)
1149 config->multicast_all = 0x1;
1150
1151
1152 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1153 config->magic_packet_disable = 0x1;
1154
1155 if (nic->mac >= mac_82558_D101_A4) {
1156 config->fc_disable = 0x1;
1157 config->mwi_enable = 0x1;
1158 config->standard_tcb = 0x0;
1159 config->rx_long_ok = 0x1;
1160 if (nic->mac >= mac_82559_D101M) {
1161 config->tno_intr = 0x1;
1162
1163 if (nic->mac >= mac_82551_10) {
1164 config->byte_count = 0x20;
1165 config->rx_d102_mode = 0x1;
1166 }
1167 } else {
1168 config->standard_stat_counter = 0x0;
1169 }
1170 }
1171
1172 if (netdev->features & NETIF_F_RXALL) {
1173 config->rx_save_overruns = 0x1;
1174 config->rx_save_bad_frames = 0x1;
1175 config->rx_discard_short_frames = 0x0;
1176 }
1177
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1179 c + 0);
1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1181 c + 8);
1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1183 c + 16);
1184 return 0;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242#define BUNDLESMALL 1
1243#define BUNDLEMAX (u16)6
1244#define INTDELAY (u16)1536
1245
1246
1247static const struct firmware *e100_request_firmware(struct nic *nic)
1248{
1249 const char *fw_name;
1250 const struct firmware *fw = nic->fw;
1251 u8 timer, bundle, min_size;
1252 int err = 0;
1253 bool required = false;
1254
1255
1256 if (nic->flags & ich)
1257 return NULL;
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 if (nic->mac == mac_82559_D101M) {
1273 fw_name = FIRMWARE_D101M;
1274 } else if (nic->mac == mac_82559_D101S) {
1275 fw_name = FIRMWARE_D101S;
1276 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1277 fw_name = FIRMWARE_D102E;
1278 required = true;
1279 } else {
1280 return NULL;
1281 }
1282
1283
1284
1285
1286
1287
1288 if (!fw)
1289 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1290
1291 if (err) {
1292 if (required) {
1293 netif_err(nic, probe, nic->netdev,
1294 "Failed to load firmware \"%s\": %d\n",
1295 fw_name, err);
1296 return ERR_PTR(err);
1297 } else {
1298 netif_info(nic, probe, nic->netdev,
1299 "CPUSaver disabled. Needs \"%s\": %d\n",
1300 fw_name, err);
1301 return NULL;
1302 }
1303 }
1304
1305
1306
1307 if (fw->size != UCODE_SIZE * 4 + 3) {
1308 netif_err(nic, probe, nic->netdev,
1309 "Firmware \"%s\" has wrong size %zu\n",
1310 fw_name, fw->size);
1311 release_firmware(fw);
1312 return ERR_PTR(-EINVAL);
1313 }
1314
1315
1316 timer = fw->data[UCODE_SIZE * 4];
1317 bundle = fw->data[UCODE_SIZE * 4 + 1];
1318 min_size = fw->data[UCODE_SIZE * 4 + 2];
1319
1320 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1321 min_size >= UCODE_SIZE) {
1322 netif_err(nic, probe, nic->netdev,
1323 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1324 fw_name, timer, bundle, min_size);
1325 release_firmware(fw);
1326 return ERR_PTR(-EINVAL);
1327 }
1328
1329
1330
1331 nic->fw = fw;
1332 return fw;
1333}
1334
1335static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1336 struct sk_buff *skb)
1337{
1338 const struct firmware *fw = (void *)skb;
1339 u8 timer, bundle, min_size;
1340
1341
1342
1343 cb->skb = NULL;
1344
1345
1346 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1347
1348
1349 timer = fw->data[UCODE_SIZE * 4];
1350 bundle = fw->data[UCODE_SIZE * 4 + 1];
1351 min_size = fw->data[UCODE_SIZE * 4 + 2];
1352
1353
1354 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1355 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1356 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1357 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1358 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1359 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1360
1361 cb->command = cpu_to_le16(cb_ucode | cb_el);
1362 return 0;
1363}
1364
1365static inline int e100_load_ucode_wait(struct nic *nic)
1366{
1367 const struct firmware *fw;
1368 int err = 0, counter = 50;
1369 struct cb *cb = nic->cb_to_clean;
1370
1371 fw = e100_request_firmware(nic);
1372
1373 if (!fw || IS_ERR(fw))
1374 return PTR_ERR(fw);
1375
1376 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1377 netif_err(nic, probe, nic->netdev,
1378 "ucode cmd failed with error %d\n", err);
1379
1380
1381 nic->cuc_cmd = cuc_start;
1382
1383
1384 e100_write_flush(nic);
1385 udelay(10);
1386
1387
1388 while (!(cb->status & cpu_to_le16(cb_complete))) {
1389 msleep(10);
1390 if (!--counter) break;
1391 }
1392
1393
1394 iowrite8(~0, &nic->csr->scb.stat_ack);
1395
1396
1397 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1398 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1399 err = -EPERM;
1400 }
1401
1402 return err;
1403}
1404
1405static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1406 struct sk_buff *skb)
1407{
1408 cb->command = cpu_to_le16(cb_iaaddr);
1409 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1410 return 0;
1411}
1412
1413static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1414{
1415 cb->command = cpu_to_le16(cb_dump);
1416 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1417 offsetof(struct mem, dump_buf));
1418 return 0;
1419}
1420
1421static int e100_phy_check_without_mii(struct nic *nic)
1422{
1423 u8 phy_type;
1424 int without_mii;
1425
1426 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1427
1428 switch (phy_type) {
1429 case NoSuchPhy:
1430 case I82503:
1431 case S80C24:
1432
1433
1434
1435
1436
1437
1438 netif_info(nic, probe, nic->netdev,
1439 "found MII-less i82503 or 80c24 or other PHY\n");
1440
1441 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1442 nic->mii.phy_id = 0;
1443
1444
1445
1446
1447
1448 without_mii = 1;
1449 break;
1450 default:
1451 without_mii = 0;
1452 break;
1453 }
1454 return without_mii;
1455}
1456
1457#define NCONFIG_AUTO_SWITCH 0x0080
1458#define MII_NSC_CONG MII_RESV1
1459#define NSC_CONG_ENABLE 0x0100
1460#define NSC_CONG_TXREADY 0x0400
1461#define ADVERTISE_FC_SUPPORTED 0x0400
1462static int e100_phy_init(struct nic *nic)
1463{
1464 struct net_device *netdev = nic->netdev;
1465 u32 addr;
1466 u16 bmcr, stat, id_lo, id_hi, cong;
1467
1468
1469 for (addr = 0; addr < 32; addr++) {
1470 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1471 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1472 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1473 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1474 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1475 break;
1476 }
1477 if (addr == 32) {
1478
1479
1480
1481
1482 if (e100_phy_check_without_mii(nic))
1483 return 0;
1484 else {
1485
1486 netif_err(nic, hw, nic->netdev,
1487 "Failed to locate any known PHY, aborting\n");
1488 return -EAGAIN;
1489 }
1490 } else
1491 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1492 "phy_addr = %d\n", nic->mii.phy_id);
1493
1494
1495 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1496 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1497 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1498 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1499 "phy ID = 0x%08X\n", nic->phy);
1500
1501
1502 for (addr = 0; addr < 32; addr++) {
1503 if (addr != nic->mii.phy_id) {
1504 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1505 } else if (nic->phy != phy_82552_v) {
1506 bmcr = mdio_read(netdev, addr, MII_BMCR);
1507 mdio_write(netdev, addr, MII_BMCR,
1508 bmcr & ~BMCR_ISOLATE);
1509 }
1510 }
1511
1512
1513
1514
1515
1516 if (nic->phy == phy_82552_v)
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1518 bmcr & ~BMCR_ISOLATE);
1519
1520
1521#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1522 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1523
1524 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1525 cong |= NSC_CONG_TXREADY;
1526 cong &= ~NSC_CONG_ENABLE;
1527 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1528 }
1529
1530 if (nic->phy == phy_82552_v) {
1531 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1532
1533
1534 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1535
1536
1537 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1538 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1539
1540
1541 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1542 bmcr |= BMCR_RESET;
1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1546 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1547
1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1550 }
1551
1552 return 0;
1553}
1554
1555static int e100_hw_init(struct nic *nic)
1556{
1557 int err = 0;
1558
1559 e100_hw_reset(nic);
1560
1561 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1562 if (!in_interrupt() && (err = e100_self_test(nic)))
1563 return err;
1564
1565 if ((err = e100_phy_init(nic)))
1566 return err;
1567 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1568 return err;
1569 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1570 return err;
1571 if ((err = e100_load_ucode_wait(nic)))
1572 return err;
1573 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1574 return err;
1575 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1576 return err;
1577 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1578 nic->dma_addr + offsetof(struct mem, stats))))
1579 return err;
1580 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1581 return err;
1582
1583 e100_disable_irq(nic);
1584
1585 return 0;
1586}
1587
1588static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1589{
1590 struct net_device *netdev = nic->netdev;
1591 struct netdev_hw_addr *ha;
1592 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1593
1594 cb->command = cpu_to_le16(cb_multi);
1595 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1596 i = 0;
1597 netdev_for_each_mc_addr(ha, netdev) {
1598 if (i == count)
1599 break;
1600 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1601 ETH_ALEN);
1602 }
1603 return 0;
1604}
1605
1606static void e100_set_multicast_list(struct net_device *netdev)
1607{
1608 struct nic *nic = netdev_priv(netdev);
1609
1610 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1611 "mc_count=%d, flags=0x%04X\n",
1612 netdev_mc_count(netdev), netdev->flags);
1613
1614 if (netdev->flags & IFF_PROMISC)
1615 nic->flags |= promiscuous;
1616 else
1617 nic->flags &= ~promiscuous;
1618
1619 if (netdev->flags & IFF_ALLMULTI ||
1620 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1621 nic->flags |= multicast_all;
1622 else
1623 nic->flags &= ~multicast_all;
1624
1625 e100_exec_cb(nic, NULL, e100_configure);
1626 e100_exec_cb(nic, NULL, e100_multi);
1627}
1628
1629static void e100_update_stats(struct nic *nic)
1630{
1631 struct net_device *dev = nic->netdev;
1632 struct net_device_stats *ns = &dev->stats;
1633 struct stats *s = &nic->mem->stats;
1634 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1635 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1636 &s->complete;
1637
1638
1639
1640
1641
1642 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1643 *complete = 0;
1644 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1645 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1646 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1647 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1648 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1649 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1650 ns->collisions += nic->tx_collisions;
1651 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1652 le32_to_cpu(s->tx_lost_crs);
1653 nic->rx_short_frame_errors +=
1654 le32_to_cpu(s->rx_short_frame_errors);
1655 ns->rx_length_errors = nic->rx_short_frame_errors +
1656 nic->rx_over_length_errors;
1657 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1658 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1659 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1660 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1661 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1662 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1663 le32_to_cpu(s->rx_alignment_errors) +
1664 le32_to_cpu(s->rx_short_frame_errors) +
1665 le32_to_cpu(s->rx_cdt_errors);
1666 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1667 nic->tx_single_collisions +=
1668 le32_to_cpu(s->tx_single_collisions);
1669 nic->tx_multiple_collisions +=
1670 le32_to_cpu(s->tx_multiple_collisions);
1671 if (nic->mac >= mac_82558_D101_A4) {
1672 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1673 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1674 nic->rx_fc_unsupported +=
1675 le32_to_cpu(s->fc_rcv_unsupported);
1676 if (nic->mac >= mac_82559_D101M) {
1677 nic->tx_tco_frames +=
1678 le16_to_cpu(s->xmt_tco_frames);
1679 nic->rx_tco_frames +=
1680 le16_to_cpu(s->rcv_tco_frames);
1681 }
1682 }
1683 }
1684
1685
1686 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1687 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1688 "exec cuc_dump_reset failed\n");
1689}
1690
1691static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1692{
1693
1694
1695
1696 if (duplex == DUPLEX_HALF) {
1697 u32 prev = nic->adaptive_ifs;
1698 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1699
1700 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1701 (nic->tx_frames > min_frames)) {
1702 if (nic->adaptive_ifs < 60)
1703 nic->adaptive_ifs += 5;
1704 } else if (nic->tx_frames < min_frames) {
1705 if (nic->adaptive_ifs >= 5)
1706 nic->adaptive_ifs -= 5;
1707 }
1708 if (nic->adaptive_ifs != prev)
1709 e100_exec_cb(nic, NULL, e100_configure);
1710 }
1711}
1712
1713static void e100_watchdog(unsigned long data)
1714{
1715 struct nic *nic = (struct nic *)data;
1716 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1717 u32 speed;
1718
1719 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1720 "right now = %ld\n", jiffies);
1721
1722
1723
1724 mii_ethtool_gset(&nic->mii, &cmd);
1725 speed = ethtool_cmd_speed(&cmd);
1726
1727 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1728 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1729 speed == SPEED_100 ? 100 : 10,
1730 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1731 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1732 netdev_info(nic->netdev, "NIC Link is Down\n");
1733 }
1734
1735 mii_check_link(&nic->mii);
1736
1737
1738
1739
1740
1741
1742 spin_lock_irq(&nic->cmd_lock);
1743 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1744 e100_write_flush(nic);
1745 spin_unlock_irq(&nic->cmd_lock);
1746
1747 e100_update_stats(nic);
1748 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1749
1750 if (nic->mac <= mac_82557_D100_C)
1751
1752 e100_set_multicast_list(nic->netdev);
1753
1754 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1755
1756 nic->flags |= ich_10h_workaround;
1757 else
1758 nic->flags &= ~ich_10h_workaround;
1759
1760 mod_timer(&nic->watchdog,
1761 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1762}
1763
1764static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1765 struct sk_buff *skb)
1766{
1767 dma_addr_t dma_addr;
1768 cb->command = nic->tx_command;
1769
1770 dma_addr = pci_map_single(nic->pdev,
1771 skb->data, skb->len, PCI_DMA_TODEVICE);
1772
1773 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1774 dev_kfree_skb_any(skb);
1775 skb = NULL;
1776 return -ENOMEM;
1777 }
1778
1779
1780
1781
1782
1783 if (unlikely(skb->no_fcs))
1784 cb->command |= cpu_to_le16(cb_tx_nc);
1785 else
1786 cb->command &= ~cpu_to_le16(cb_tx_nc);
1787
1788
1789 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1790 cb->command |= cpu_to_le16(cb_i);
1791 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1792 cb->u.tcb.tcb_byte_count = 0;
1793 cb->u.tcb.threshold = nic->tx_threshold;
1794 cb->u.tcb.tbd_count = 1;
1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1797 skb_tx_timestamp(skb);
1798 return 0;
1799}
1800
1801static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1802 struct net_device *netdev)
1803{
1804 struct nic *nic = netdev_priv(netdev);
1805 int err;
1806
1807 if (nic->flags & ich_10h_workaround) {
1808
1809
1810
1811 if (e100_exec_cmd(nic, cuc_nop, 0))
1812 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1813 "exec cuc_nop failed\n");
1814 udelay(1);
1815 }
1816
1817 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1818
1819 switch (err) {
1820 case -ENOSPC:
1821
1822 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1823 "No space for CB\n");
1824 netif_stop_queue(netdev);
1825 break;
1826 case -ENOMEM:
1827
1828 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1829 "Out of Tx resources, returning skb\n");
1830 netif_stop_queue(netdev);
1831 return NETDEV_TX_BUSY;
1832 }
1833
1834 return NETDEV_TX_OK;
1835}
1836
1837static int e100_tx_clean(struct nic *nic)
1838{
1839 struct net_device *dev = nic->netdev;
1840 struct cb *cb;
1841 int tx_cleaned = 0;
1842
1843 spin_lock(&nic->cb_lock);
1844
1845
1846 for (cb = nic->cb_to_clean;
1847 cb->status & cpu_to_le16(cb_complete);
1848 cb = nic->cb_to_clean = cb->next) {
1849 dma_rmb();
1850 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1851 "cb[%d]->status = 0x%04X\n",
1852 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1853 cb->status);
1854
1855 if (likely(cb->skb != NULL)) {
1856 dev->stats.tx_packets++;
1857 dev->stats.tx_bytes += cb->skb->len;
1858
1859 pci_unmap_single(nic->pdev,
1860 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1861 le16_to_cpu(cb->u.tcb.tbd.size),
1862 PCI_DMA_TODEVICE);
1863 dev_kfree_skb_any(cb->skb);
1864 cb->skb = NULL;
1865 tx_cleaned = 1;
1866 }
1867 cb->status = 0;
1868 nic->cbs_avail++;
1869 }
1870
1871 spin_unlock(&nic->cb_lock);
1872
1873
1874 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1875 netif_wake_queue(nic->netdev);
1876
1877 return tx_cleaned;
1878}
1879
1880static void e100_clean_cbs(struct nic *nic)
1881{
1882 if (nic->cbs) {
1883 while (nic->cbs_avail != nic->params.cbs.count) {
1884 struct cb *cb = nic->cb_to_clean;
1885 if (cb->skb) {
1886 pci_unmap_single(nic->pdev,
1887 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1888 le16_to_cpu(cb->u.tcb.tbd.size),
1889 PCI_DMA_TODEVICE);
1890 dev_kfree_skb(cb->skb);
1891 }
1892 nic->cb_to_clean = nic->cb_to_clean->next;
1893 nic->cbs_avail++;
1894 }
1895 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1896 nic->cbs = NULL;
1897 nic->cbs_avail = 0;
1898 }
1899 nic->cuc_cmd = cuc_start;
1900 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1901 nic->cbs;
1902}
1903
1904static int e100_alloc_cbs(struct nic *nic)
1905{
1906 struct cb *cb;
1907 unsigned int i, count = nic->params.cbs.count;
1908
1909 nic->cuc_cmd = cuc_start;
1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1911 nic->cbs_avail = 0;
1912
1913 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1914 &nic->cbs_dma_addr);
1915 if (!nic->cbs)
1916 return -ENOMEM;
1917 memset(nic->cbs, 0, count * sizeof(struct cb));
1918
1919 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1920 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1921 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1922
1923 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1924 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1925 ((i+1) % count) * sizeof(struct cb));
1926 }
1927
1928 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1929 nic->cbs_avail = count;
1930
1931 return 0;
1932}
1933
1934static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1935{
1936 if (!nic->rxs) return;
1937 if (RU_SUSPENDED != nic->ru_running) return;
1938
1939
1940 if (!rx) rx = nic->rxs;
1941
1942
1943 if (rx->skb) {
1944 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1945 nic->ru_running = RU_RUNNING;
1946 }
1947}
1948
1949#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1950static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1951{
1952 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1953 return -ENOMEM;
1954
1955
1956 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1957 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1958 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1959
1960 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1961 dev_kfree_skb_any(rx->skb);
1962 rx->skb = NULL;
1963 rx->dma_addr = 0;
1964 return -ENOMEM;
1965 }
1966
1967
1968
1969
1970 if (rx->prev->skb) {
1971 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1972 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1973 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1974 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1975 }
1976
1977 return 0;
1978}
1979
1980static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1981 unsigned int *work_done, unsigned int work_to_do)
1982{
1983 struct net_device *dev = nic->netdev;
1984 struct sk_buff *skb = rx->skb;
1985 struct rfd *rfd = (struct rfd *)skb->data;
1986 u16 rfd_status, actual_size;
1987 u16 fcs_pad = 0;
1988
1989 if (unlikely(work_done && *work_done >= work_to_do))
1990 return -EAGAIN;
1991
1992
1993 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1994 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1995 rfd_status = le16_to_cpu(rfd->status);
1996
1997 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1998 "status=0x%04X\n", rfd_status);
1999 dma_rmb();
2000
2001
2002 if (unlikely(!(rfd_status & cb_complete))) {
2003
2004
2005
2006
2007
2008 if ((le16_to_cpu(rfd->command) & cb_el) &&
2009 (RU_RUNNING == nic->ru_running))
2010
2011 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2012 nic->ru_running = RU_SUSPENDED;
2013 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2014 sizeof(struct rfd),
2015 PCI_DMA_FROMDEVICE);
2016 return -ENODATA;
2017 }
2018
2019
2020 if (unlikely(dev->features & NETIF_F_RXFCS))
2021 fcs_pad = 4;
2022 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2023 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2024 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2025
2026
2027 pci_unmap_single(nic->pdev, rx->dma_addr,
2028 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2029
2030
2031
2032
2033
2034
2035
2036 if ((le16_to_cpu(rfd->command) & cb_el) &&
2037 (RU_RUNNING == nic->ru_running)) {
2038
2039 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2040 nic->ru_running = RU_SUSPENDED;
2041 }
2042
2043
2044 skb_reserve(skb, sizeof(struct rfd));
2045 skb_put(skb, actual_size);
2046 skb->protocol = eth_type_trans(skb, nic->netdev);
2047
2048
2049
2050
2051 if (unlikely(dev->features & NETIF_F_RXALL)) {
2052 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2053
2054 nic->rx_over_length_errors++;
2055 goto process_skb;
2056 }
2057
2058 if (unlikely(!(rfd_status & cb_ok))) {
2059
2060 dev_kfree_skb_any(skb);
2061 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2062
2063 nic->rx_over_length_errors++;
2064 dev_kfree_skb_any(skb);
2065 } else {
2066process_skb:
2067 dev->stats.rx_packets++;
2068 dev->stats.rx_bytes += (actual_size - fcs_pad);
2069 netif_receive_skb(skb);
2070 if (work_done)
2071 (*work_done)++;
2072 }
2073
2074 rx->skb = NULL;
2075
2076 return 0;
2077}
2078
2079static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2080 unsigned int work_to_do)
2081{
2082 struct rx *rx;
2083 int restart_required = 0, err = 0;
2084 struct rx *old_before_last_rx, *new_before_last_rx;
2085 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2086
2087
2088 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2089 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2090
2091 if (-EAGAIN == err || -ENODATA == err)
2092 break;
2093 }
2094
2095
2096
2097
2098
2099
2100
2101
2102 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2103 restart_required = 1;
2104
2105 old_before_last_rx = nic->rx_to_use->prev->prev;
2106 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2107
2108
2109 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2110 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2111 break;
2112 }
2113
2114 new_before_last_rx = nic->rx_to_use->prev->prev;
2115 if (new_before_last_rx != old_before_last_rx) {
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 new_before_last_rfd =
2126 (struct rfd *)new_before_last_rx->skb->data;
2127 new_before_last_rfd->size = 0;
2128 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2129 pci_dma_sync_single_for_device(nic->pdev,
2130 new_before_last_rx->dma_addr, sizeof(struct rfd),
2131 PCI_DMA_BIDIRECTIONAL);
2132
2133
2134
2135
2136 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2137 pci_dma_sync_single_for_device(nic->pdev,
2138 old_before_last_rx->dma_addr, sizeof(struct rfd),
2139 PCI_DMA_BIDIRECTIONAL);
2140 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2141 + ETH_FCS_LEN);
2142 pci_dma_sync_single_for_device(nic->pdev,
2143 old_before_last_rx->dma_addr, sizeof(struct rfd),
2144 PCI_DMA_BIDIRECTIONAL);
2145 }
2146
2147 if (restart_required) {
2148
2149 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2150 e100_start_receiver(nic, nic->rx_to_clean);
2151 if (work_done)
2152 (*work_done)++;
2153 }
2154}
2155
2156static void e100_rx_clean_list(struct nic *nic)
2157{
2158 struct rx *rx;
2159 unsigned int i, count = nic->params.rfds.count;
2160
2161 nic->ru_running = RU_UNINITIALIZED;
2162
2163 if (nic->rxs) {
2164 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2165 if (rx->skb) {
2166 pci_unmap_single(nic->pdev, rx->dma_addr,
2167 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2168 dev_kfree_skb(rx->skb);
2169 }
2170 }
2171 kfree(nic->rxs);
2172 nic->rxs = NULL;
2173 }
2174
2175 nic->rx_to_use = nic->rx_to_clean = NULL;
2176}
2177
2178static int e100_rx_alloc_list(struct nic *nic)
2179{
2180 struct rx *rx;
2181 unsigned int i, count = nic->params.rfds.count;
2182 struct rfd *before_last;
2183
2184 nic->rx_to_use = nic->rx_to_clean = NULL;
2185 nic->ru_running = RU_UNINITIALIZED;
2186
2187 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2188 return -ENOMEM;
2189
2190 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2191 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2192 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2193 if (e100_rx_alloc_skb(nic, rx)) {
2194 e100_rx_clean_list(nic);
2195 return -ENOMEM;
2196 }
2197 }
2198
2199
2200
2201
2202
2203
2204
2205 rx = nic->rxs->prev->prev;
2206 before_last = (struct rfd *)rx->skb->data;
2207 before_last->command |= cpu_to_le16(cb_el);
2208 before_last->size = 0;
2209 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2210 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2211
2212 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2213 nic->ru_running = RU_SUSPENDED;
2214
2215 return 0;
2216}
2217
2218static irqreturn_t e100_intr(int irq, void *dev_id)
2219{
2220 struct net_device *netdev = dev_id;
2221 struct nic *nic = netdev_priv(netdev);
2222 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2223
2224 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2225 "stat_ack = 0x%02X\n", stat_ack);
2226
2227 if (stat_ack == stat_ack_not_ours ||
2228 stat_ack == stat_ack_not_present)
2229 return IRQ_NONE;
2230
2231
2232 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2233
2234
2235 if (stat_ack & stat_ack_rnr)
2236 nic->ru_running = RU_SUSPENDED;
2237
2238 if (likely(napi_schedule_prep(&nic->napi))) {
2239 e100_disable_irq(nic);
2240 __napi_schedule(&nic->napi);
2241 }
2242
2243 return IRQ_HANDLED;
2244}
2245
2246static int e100_poll(struct napi_struct *napi, int budget)
2247{
2248 struct nic *nic = container_of(napi, struct nic, napi);
2249 unsigned int work_done = 0;
2250
2251 e100_rx_clean(nic, &work_done, budget);
2252 e100_tx_clean(nic);
2253
2254
2255 if (work_done < budget) {
2256 napi_complete(napi);
2257 e100_enable_irq(nic);
2258 }
2259
2260 return work_done;
2261}
2262
2263#ifdef CONFIG_NET_POLL_CONTROLLER
2264static void e100_netpoll(struct net_device *netdev)
2265{
2266 struct nic *nic = netdev_priv(netdev);
2267
2268 e100_disable_irq(nic);
2269 e100_intr(nic->pdev->irq, netdev);
2270 e100_tx_clean(nic);
2271 e100_enable_irq(nic);
2272}
2273#endif
2274
2275static int e100_set_mac_address(struct net_device *netdev, void *p)
2276{
2277 struct nic *nic = netdev_priv(netdev);
2278 struct sockaddr *addr = p;
2279
2280 if (!is_valid_ether_addr(addr->sa_data))
2281 return -EADDRNOTAVAIL;
2282
2283 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2284 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2285
2286 return 0;
2287}
2288
2289static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2290{
2291 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2292 return -EINVAL;
2293 netdev->mtu = new_mtu;
2294 return 0;
2295}
2296
2297static int e100_asf(struct nic *nic)
2298{
2299
2300 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2301 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2302 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2303 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2304}
2305
2306static int e100_up(struct nic *nic)
2307{
2308 int err;
2309
2310 if ((err = e100_rx_alloc_list(nic)))
2311 return err;
2312 if ((err = e100_alloc_cbs(nic)))
2313 goto err_rx_clean_list;
2314 if ((err = e100_hw_init(nic)))
2315 goto err_clean_cbs;
2316 e100_set_multicast_list(nic->netdev);
2317 e100_start_receiver(nic, NULL);
2318 mod_timer(&nic->watchdog, jiffies);
2319 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2320 nic->netdev->name, nic->netdev)))
2321 goto err_no_irq;
2322 netif_wake_queue(nic->netdev);
2323 napi_enable(&nic->napi);
2324
2325
2326 e100_enable_irq(nic);
2327 return 0;
2328
2329err_no_irq:
2330 del_timer_sync(&nic->watchdog);
2331err_clean_cbs:
2332 e100_clean_cbs(nic);
2333err_rx_clean_list:
2334 e100_rx_clean_list(nic);
2335 return err;
2336}
2337
2338static void e100_down(struct nic *nic)
2339{
2340
2341 napi_disable(&nic->napi);
2342 netif_stop_queue(nic->netdev);
2343 e100_hw_reset(nic);
2344 free_irq(nic->pdev->irq, nic->netdev);
2345 del_timer_sync(&nic->watchdog);
2346 netif_carrier_off(nic->netdev);
2347 e100_clean_cbs(nic);
2348 e100_rx_clean_list(nic);
2349}
2350
2351static void e100_tx_timeout(struct net_device *netdev)
2352{
2353 struct nic *nic = netdev_priv(netdev);
2354
2355
2356
2357 schedule_work(&nic->tx_timeout_task);
2358}
2359
2360static void e100_tx_timeout_task(struct work_struct *work)
2361{
2362 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2363 struct net_device *netdev = nic->netdev;
2364
2365 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2366 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2367
2368 rtnl_lock();
2369 if (netif_running(netdev)) {
2370 e100_down(netdev_priv(netdev));
2371 e100_up(netdev_priv(netdev));
2372 }
2373 rtnl_unlock();
2374}
2375
2376static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2377{
2378 int err;
2379 struct sk_buff *skb;
2380
2381
2382
2383
2384
2385
2386 if ((err = e100_rx_alloc_list(nic)))
2387 return err;
2388 if ((err = e100_alloc_cbs(nic)))
2389 goto err_clean_rx;
2390
2391
2392 if (nic->flags & ich && loopback_mode == lb_phy)
2393 loopback_mode = lb_mac;
2394
2395 nic->loopback = loopback_mode;
2396 if ((err = e100_hw_init(nic)))
2397 goto err_loopback_none;
2398
2399 if (loopback_mode == lb_phy)
2400 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2401 BMCR_LOOPBACK);
2402
2403 e100_start_receiver(nic, NULL);
2404
2405 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2406 err = -ENOMEM;
2407 goto err_loopback_none;
2408 }
2409 skb_put(skb, ETH_DATA_LEN);
2410 memset(skb->data, 0xFF, ETH_DATA_LEN);
2411 e100_xmit_frame(skb, nic->netdev);
2412
2413 msleep(10);
2414
2415 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2416 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2417
2418 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2419 skb->data, ETH_DATA_LEN))
2420 err = -EAGAIN;
2421
2422err_loopback_none:
2423 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2424 nic->loopback = lb_none;
2425 e100_clean_cbs(nic);
2426 e100_hw_reset(nic);
2427err_clean_rx:
2428 e100_rx_clean_list(nic);
2429 return err;
2430}
2431
2432#define MII_LED_CONTROL 0x1B
2433#define E100_82552_LED_OVERRIDE 0x19
2434#define E100_82552_LED_ON 0x000F
2435#define E100_82552_LED_OFF 0x000A
2436
2437static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2438{
2439 struct nic *nic = netdev_priv(netdev);
2440 return mii_ethtool_gset(&nic->mii, cmd);
2441}
2442
2443static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2444{
2445 struct nic *nic = netdev_priv(netdev);
2446 int err;
2447
2448 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2449 err = mii_ethtool_sset(&nic->mii, cmd);
2450 e100_exec_cb(nic, NULL, e100_configure);
2451
2452 return err;
2453}
2454
2455static void e100_get_drvinfo(struct net_device *netdev,
2456 struct ethtool_drvinfo *info)
2457{
2458 struct nic *nic = netdev_priv(netdev);
2459 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2460 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2461 strlcpy(info->bus_info, pci_name(nic->pdev),
2462 sizeof(info->bus_info));
2463}
2464
2465#define E100_PHY_REGS 0x1C
2466static int e100_get_regs_len(struct net_device *netdev)
2467{
2468 struct nic *nic = netdev_priv(netdev);
2469 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2470}
2471
2472static void e100_get_regs(struct net_device *netdev,
2473 struct ethtool_regs *regs, void *p)
2474{
2475 struct nic *nic = netdev_priv(netdev);
2476 u32 *buff = p;
2477 int i;
2478
2479 regs->version = (1 << 24) | nic->pdev->revision;
2480 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2481 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2482 ioread16(&nic->csr->scb.status);
2483 for (i = E100_PHY_REGS; i >= 0; i--)
2484 buff[1 + E100_PHY_REGS - i] =
2485 mdio_read(netdev, nic->mii.phy_id, i);
2486 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2487 e100_exec_cb(nic, NULL, e100_dump);
2488 msleep(10);
2489 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2490 sizeof(nic->mem->dump_buf));
2491}
2492
2493static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2494{
2495 struct nic *nic = netdev_priv(netdev);
2496 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2497 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2498}
2499
2500static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2501{
2502 struct nic *nic = netdev_priv(netdev);
2503
2504 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2505 !device_can_wakeup(&nic->pdev->dev))
2506 return -EOPNOTSUPP;
2507
2508 if (wol->wolopts)
2509 nic->flags |= wol_magic;
2510 else
2511 nic->flags &= ~wol_magic;
2512
2513 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2514
2515 e100_exec_cb(nic, NULL, e100_configure);
2516
2517 return 0;
2518}
2519
2520static u32 e100_get_msglevel(struct net_device *netdev)
2521{
2522 struct nic *nic = netdev_priv(netdev);
2523 return nic->msg_enable;
2524}
2525
2526static void e100_set_msglevel(struct net_device *netdev, u32 value)
2527{
2528 struct nic *nic = netdev_priv(netdev);
2529 nic->msg_enable = value;
2530}
2531
2532static int e100_nway_reset(struct net_device *netdev)
2533{
2534 struct nic *nic = netdev_priv(netdev);
2535 return mii_nway_restart(&nic->mii);
2536}
2537
2538static u32 e100_get_link(struct net_device *netdev)
2539{
2540 struct nic *nic = netdev_priv(netdev);
2541 return mii_link_ok(&nic->mii);
2542}
2543
2544static int e100_get_eeprom_len(struct net_device *netdev)
2545{
2546 struct nic *nic = netdev_priv(netdev);
2547 return nic->eeprom_wc << 1;
2548}
2549
2550#define E100_EEPROM_MAGIC 0x1234
2551static int e100_get_eeprom(struct net_device *netdev,
2552 struct ethtool_eeprom *eeprom, u8 *bytes)
2553{
2554 struct nic *nic = netdev_priv(netdev);
2555
2556 eeprom->magic = E100_EEPROM_MAGIC;
2557 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2558
2559 return 0;
2560}
2561
2562static int e100_set_eeprom(struct net_device *netdev,
2563 struct ethtool_eeprom *eeprom, u8 *bytes)
2564{
2565 struct nic *nic = netdev_priv(netdev);
2566
2567 if (eeprom->magic != E100_EEPROM_MAGIC)
2568 return -EINVAL;
2569
2570 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2571
2572 return e100_eeprom_save(nic, eeprom->offset >> 1,
2573 (eeprom->len >> 1) + 1);
2574}
2575
2576static void e100_get_ringparam(struct net_device *netdev,
2577 struct ethtool_ringparam *ring)
2578{
2579 struct nic *nic = netdev_priv(netdev);
2580 struct param_range *rfds = &nic->params.rfds;
2581 struct param_range *cbs = &nic->params.cbs;
2582
2583 ring->rx_max_pending = rfds->max;
2584 ring->tx_max_pending = cbs->max;
2585 ring->rx_pending = rfds->count;
2586 ring->tx_pending = cbs->count;
2587}
2588
2589static int e100_set_ringparam(struct net_device *netdev,
2590 struct ethtool_ringparam *ring)
2591{
2592 struct nic *nic = netdev_priv(netdev);
2593 struct param_range *rfds = &nic->params.rfds;
2594 struct param_range *cbs = &nic->params.cbs;
2595
2596 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2597 return -EINVAL;
2598
2599 if (netif_running(netdev))
2600 e100_down(nic);
2601 rfds->count = max(ring->rx_pending, rfds->min);
2602 rfds->count = min(rfds->count, rfds->max);
2603 cbs->count = max(ring->tx_pending, cbs->min);
2604 cbs->count = min(cbs->count, cbs->max);
2605 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2606 rfds->count, cbs->count);
2607 if (netif_running(netdev))
2608 e100_up(nic);
2609
2610 return 0;
2611}
2612
2613static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2614 "Link test (on/offline)",
2615 "Eeprom test (on/offline)",
2616 "Self test (offline)",
2617 "Mac loopback (offline)",
2618 "Phy loopback (offline)",
2619};
2620#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2621
2622static void e100_diag_test(struct net_device *netdev,
2623 struct ethtool_test *test, u64 *data)
2624{
2625 struct ethtool_cmd cmd;
2626 struct nic *nic = netdev_priv(netdev);
2627 int i, err;
2628
2629 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2630 data[0] = !mii_link_ok(&nic->mii);
2631 data[1] = e100_eeprom_load(nic);
2632 if (test->flags & ETH_TEST_FL_OFFLINE) {
2633
2634
2635 err = mii_ethtool_gset(&nic->mii, &cmd);
2636
2637 if (netif_running(netdev))
2638 e100_down(nic);
2639 data[2] = e100_self_test(nic);
2640 data[3] = e100_loopback_test(nic, lb_mac);
2641 data[4] = e100_loopback_test(nic, lb_phy);
2642
2643
2644 err = mii_ethtool_sset(&nic->mii, &cmd);
2645
2646 if (netif_running(netdev))
2647 e100_up(nic);
2648 }
2649 for (i = 0; i < E100_TEST_LEN; i++)
2650 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2651
2652 msleep_interruptible(4 * 1000);
2653}
2654
2655static int e100_set_phys_id(struct net_device *netdev,
2656 enum ethtool_phys_id_state state)
2657{
2658 struct nic *nic = netdev_priv(netdev);
2659 enum led_state {
2660 led_on = 0x01,
2661 led_off = 0x04,
2662 led_on_559 = 0x05,
2663 led_on_557 = 0x07,
2664 };
2665 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2666 MII_LED_CONTROL;
2667 u16 leds = 0;
2668
2669 switch (state) {
2670 case ETHTOOL_ID_ACTIVE:
2671 return 2;
2672
2673 case ETHTOOL_ID_ON:
2674 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2675 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2676 break;
2677
2678 case ETHTOOL_ID_OFF:
2679 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2680 break;
2681
2682 case ETHTOOL_ID_INACTIVE:
2683 break;
2684 }
2685
2686 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2687 return 0;
2688}
2689
2690static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2691 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2692 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2693 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2694 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2695 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2696 "tx_heartbeat_errors", "tx_window_errors",
2697
2698 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2699 "tx_flow_control_pause", "rx_flow_control_pause",
2700 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2701 "rx_short_frame_errors", "rx_over_length_errors",
2702};
2703#define E100_NET_STATS_LEN 21
2704#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2705
2706static int e100_get_sset_count(struct net_device *netdev, int sset)
2707{
2708 switch (sset) {
2709 case ETH_SS_TEST:
2710 return E100_TEST_LEN;
2711 case ETH_SS_STATS:
2712 return E100_STATS_LEN;
2713 default:
2714 return -EOPNOTSUPP;
2715 }
2716}
2717
2718static void e100_get_ethtool_stats(struct net_device *netdev,
2719 struct ethtool_stats *stats, u64 *data)
2720{
2721 struct nic *nic = netdev_priv(netdev);
2722 int i;
2723
2724 for (i = 0; i < E100_NET_STATS_LEN; i++)
2725 data[i] = ((unsigned long *)&netdev->stats)[i];
2726
2727 data[i++] = nic->tx_deferred;
2728 data[i++] = nic->tx_single_collisions;
2729 data[i++] = nic->tx_multiple_collisions;
2730 data[i++] = nic->tx_fc_pause;
2731 data[i++] = nic->rx_fc_pause;
2732 data[i++] = nic->rx_fc_unsupported;
2733 data[i++] = nic->tx_tco_frames;
2734 data[i++] = nic->rx_tco_frames;
2735 data[i++] = nic->rx_short_frame_errors;
2736 data[i++] = nic->rx_over_length_errors;
2737}
2738
2739static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2740{
2741 switch (stringset) {
2742 case ETH_SS_TEST:
2743 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2744 break;
2745 case ETH_SS_STATS:
2746 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2747 break;
2748 }
2749}
2750
2751static const struct ethtool_ops e100_ethtool_ops = {
2752 .get_settings = e100_get_settings,
2753 .set_settings = e100_set_settings,
2754 .get_drvinfo = e100_get_drvinfo,
2755 .get_regs_len = e100_get_regs_len,
2756 .get_regs = e100_get_regs,
2757 .get_wol = e100_get_wol,
2758 .set_wol = e100_set_wol,
2759 .get_msglevel = e100_get_msglevel,
2760 .set_msglevel = e100_set_msglevel,
2761 .nway_reset = e100_nway_reset,
2762 .get_link = e100_get_link,
2763 .get_eeprom_len = e100_get_eeprom_len,
2764 .get_eeprom = e100_get_eeprom,
2765 .set_eeprom = e100_set_eeprom,
2766 .get_ringparam = e100_get_ringparam,
2767 .set_ringparam = e100_set_ringparam,
2768 .self_test = e100_diag_test,
2769 .get_strings = e100_get_strings,
2770 .set_phys_id = e100_set_phys_id,
2771 .get_ethtool_stats = e100_get_ethtool_stats,
2772 .get_sset_count = e100_get_sset_count,
2773 .get_ts_info = ethtool_op_get_ts_info,
2774};
2775
2776static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2777{
2778 struct nic *nic = netdev_priv(netdev);
2779
2780 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2781}
2782
2783static int e100_alloc(struct nic *nic)
2784{
2785 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2786 &nic->dma_addr);
2787 return nic->mem ? 0 : -ENOMEM;
2788}
2789
2790static void e100_free(struct nic *nic)
2791{
2792 if (nic->mem) {
2793 pci_free_consistent(nic->pdev, sizeof(struct mem),
2794 nic->mem, nic->dma_addr);
2795 nic->mem = NULL;
2796 }
2797}
2798
2799static int e100_open(struct net_device *netdev)
2800{
2801 struct nic *nic = netdev_priv(netdev);
2802 int err = 0;
2803
2804 netif_carrier_off(netdev);
2805 if ((err = e100_up(nic)))
2806 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2807 return err;
2808}
2809
2810static int e100_close(struct net_device *netdev)
2811{
2812 e100_down(netdev_priv(netdev));
2813 return 0;
2814}
2815
2816static int e100_set_features(struct net_device *netdev,
2817 netdev_features_t features)
2818{
2819 struct nic *nic = netdev_priv(netdev);
2820 netdev_features_t changed = features ^ netdev->features;
2821
2822 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2823 return 0;
2824
2825 netdev->features = features;
2826 e100_exec_cb(nic, NULL, e100_configure);
2827 return 0;
2828}
2829
2830static const struct net_device_ops e100_netdev_ops = {
2831 .ndo_open = e100_open,
2832 .ndo_stop = e100_close,
2833 .ndo_start_xmit = e100_xmit_frame,
2834 .ndo_validate_addr = eth_validate_addr,
2835 .ndo_set_rx_mode = e100_set_multicast_list,
2836 .ndo_set_mac_address = e100_set_mac_address,
2837 .ndo_change_mtu = e100_change_mtu,
2838 .ndo_do_ioctl = e100_do_ioctl,
2839 .ndo_tx_timeout = e100_tx_timeout,
2840#ifdef CONFIG_NET_POLL_CONTROLLER
2841 .ndo_poll_controller = e100_netpoll,
2842#endif
2843 .ndo_set_features = e100_set_features,
2844};
2845
2846static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2847{
2848 struct net_device *netdev;
2849 struct nic *nic;
2850 int err;
2851
2852 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2853 return -ENOMEM;
2854
2855 netdev->hw_features |= NETIF_F_RXFCS;
2856 netdev->priv_flags |= IFF_SUPP_NOFCS;
2857 netdev->hw_features |= NETIF_F_RXALL;
2858
2859 netdev->netdev_ops = &e100_netdev_ops;
2860 netdev->ethtool_ops = &e100_ethtool_ops;
2861 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2862 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2863
2864 nic = netdev_priv(netdev);
2865 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2866 nic->netdev = netdev;
2867 nic->pdev = pdev;
2868 nic->msg_enable = (1 << debug) - 1;
2869 nic->mdio_ctrl = mdio_ctrl_hw;
2870 pci_set_drvdata(pdev, netdev);
2871
2872 if ((err = pci_enable_device(pdev))) {
2873 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2874 goto err_out_free_dev;
2875 }
2876
2877 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2878 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2879 err = -ENODEV;
2880 goto err_out_disable_pdev;
2881 }
2882
2883 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2884 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2885 goto err_out_disable_pdev;
2886 }
2887
2888 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2889 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2890 goto err_out_free_res;
2891 }
2892
2893 SET_NETDEV_DEV(netdev, &pdev->dev);
2894
2895 if (use_io)
2896 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2897
2898 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2899 if (!nic->csr) {
2900 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2901 err = -ENOMEM;
2902 goto err_out_free_res;
2903 }
2904
2905 if (ent->driver_data)
2906 nic->flags |= ich;
2907 else
2908 nic->flags &= ~ich;
2909
2910 e100_get_defaults(nic);
2911
2912
2913 if (nic->mac < mac_82558_D101_A4)
2914 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2915
2916
2917 spin_lock_init(&nic->cb_lock);
2918 spin_lock_init(&nic->cmd_lock);
2919 spin_lock_init(&nic->mdio_lock);
2920
2921
2922
2923
2924 e100_hw_reset(nic);
2925
2926 pci_set_master(pdev);
2927
2928 setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
2929
2930 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2931
2932 if ((err = e100_alloc(nic))) {
2933 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2934 goto err_out_iounmap;
2935 }
2936
2937 if ((err = e100_eeprom_load(nic)))
2938 goto err_out_free;
2939
2940 e100_phy_init(nic);
2941
2942 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2943 if (!is_valid_ether_addr(netdev->dev_addr)) {
2944 if (!eeprom_bad_csum_allow) {
2945 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2946 err = -EAGAIN;
2947 goto err_out_free;
2948 } else {
2949 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2950 }
2951 }
2952
2953
2954 if ((nic->mac >= mac_82558_D101_A4) &&
2955 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2956 nic->flags |= wol_magic;
2957 device_set_wakeup_enable(&pdev->dev, true);
2958 }
2959
2960
2961 pci_pme_active(pdev, false);
2962
2963 strcpy(netdev->name, "eth%d");
2964 if ((err = register_netdev(netdev))) {
2965 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2966 goto err_out_free;
2967 }
2968 nic->cbs_pool = pci_pool_create(netdev->name,
2969 nic->pdev,
2970 nic->params.cbs.max * sizeof(struct cb),
2971 sizeof(u32),
2972 0);
2973 if (!nic->cbs_pool) {
2974 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2975 err = -ENOMEM;
2976 goto err_out_pool;
2977 }
2978 netif_info(nic, probe, nic->netdev,
2979 "addr 0x%llx, irq %d, MAC addr %pM\n",
2980 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2981 pdev->irq, netdev->dev_addr);
2982
2983 return 0;
2984
2985err_out_pool:
2986 unregister_netdev(netdev);
2987err_out_free:
2988 e100_free(nic);
2989err_out_iounmap:
2990 pci_iounmap(pdev, nic->csr);
2991err_out_free_res:
2992 pci_release_regions(pdev);
2993err_out_disable_pdev:
2994 pci_disable_device(pdev);
2995err_out_free_dev:
2996 free_netdev(netdev);
2997 return err;
2998}
2999
3000static void e100_remove(struct pci_dev *pdev)
3001{
3002 struct net_device *netdev = pci_get_drvdata(pdev);
3003
3004 if (netdev) {
3005 struct nic *nic = netdev_priv(netdev);
3006 unregister_netdev(netdev);
3007 e100_free(nic);
3008 pci_iounmap(pdev, nic->csr);
3009 pci_pool_destroy(nic->cbs_pool);
3010 free_netdev(netdev);
3011 pci_release_regions(pdev);
3012 pci_disable_device(pdev);
3013 }
3014}
3015
3016#define E100_82552_SMARTSPEED 0x14
3017#define E100_82552_REV_ANEG 0x0200
3018#define E100_82552_ANEG_NOW 0x0400
3019static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3020{
3021 struct net_device *netdev = pci_get_drvdata(pdev);
3022 struct nic *nic = netdev_priv(netdev);
3023
3024 if (netif_running(netdev))
3025 e100_down(nic);
3026 netif_device_detach(netdev);
3027
3028 pci_save_state(pdev);
3029
3030 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3031
3032 if (nic->phy == phy_82552_v) {
3033 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3034 E100_82552_SMARTSPEED);
3035
3036 mdio_write(netdev, nic->mii.phy_id,
3037 E100_82552_SMARTSPEED, smartspeed |
3038 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3039 }
3040 *enable_wake = true;
3041 } else {
3042 *enable_wake = false;
3043 }
3044
3045 pci_clear_master(pdev);
3046}
3047
3048static int __e100_power_off(struct pci_dev *pdev, bool wake)
3049{
3050 if (wake)
3051 return pci_prepare_to_sleep(pdev);
3052
3053 pci_wake_from_d3(pdev, false);
3054 pci_set_power_state(pdev, PCI_D3hot);
3055
3056 return 0;
3057}
3058
3059#ifdef CONFIG_PM
3060static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3061{
3062 bool wake;
3063 __e100_shutdown(pdev, &wake);
3064 return __e100_power_off(pdev, wake);
3065}
3066
3067static int e100_resume(struct pci_dev *pdev)
3068{
3069 struct net_device *netdev = pci_get_drvdata(pdev);
3070 struct nic *nic = netdev_priv(netdev);
3071
3072 pci_set_power_state(pdev, PCI_D0);
3073 pci_restore_state(pdev);
3074
3075 pci_enable_wake(pdev, PCI_D0, 0);
3076
3077
3078 if (nic->phy == phy_82552_v) {
3079 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3080 E100_82552_SMARTSPEED);
3081
3082 mdio_write(netdev, nic->mii.phy_id,
3083 E100_82552_SMARTSPEED,
3084 smartspeed & ~(E100_82552_REV_ANEG));
3085 }
3086
3087 netif_device_attach(netdev);
3088 if (netif_running(netdev))
3089 e100_up(nic);
3090
3091 return 0;
3092}
3093#endif
3094
3095static void e100_shutdown(struct pci_dev *pdev)
3096{
3097 bool wake;
3098 __e100_shutdown(pdev, &wake);
3099 if (system_state == SYSTEM_POWER_OFF)
3100 __e100_power_off(pdev, wake);
3101}
3102
3103
3104
3105
3106
3107
3108
3109static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3110{
3111 struct net_device *netdev = pci_get_drvdata(pdev);
3112 struct nic *nic = netdev_priv(netdev);
3113
3114 netif_device_detach(netdev);
3115
3116 if (state == pci_channel_io_perm_failure)
3117 return PCI_ERS_RESULT_DISCONNECT;
3118
3119 if (netif_running(netdev))
3120 e100_down(nic);
3121 pci_disable_device(pdev);
3122
3123
3124 return PCI_ERS_RESULT_NEED_RESET;
3125}
3126
3127
3128
3129
3130
3131
3132
3133static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3134{
3135 struct net_device *netdev = pci_get_drvdata(pdev);
3136 struct nic *nic = netdev_priv(netdev);
3137
3138 if (pci_enable_device(pdev)) {
3139 pr_err("Cannot re-enable PCI device after reset\n");
3140 return PCI_ERS_RESULT_DISCONNECT;
3141 }
3142 pci_set_master(pdev);
3143
3144
3145 if (0 != PCI_FUNC(pdev->devfn))
3146 return PCI_ERS_RESULT_RECOVERED;
3147 e100_hw_reset(nic);
3148 e100_phy_init(nic);
3149
3150 return PCI_ERS_RESULT_RECOVERED;
3151}
3152
3153
3154
3155
3156
3157
3158
3159
3160static void e100_io_resume(struct pci_dev *pdev)
3161{
3162 struct net_device *netdev = pci_get_drvdata(pdev);
3163 struct nic *nic = netdev_priv(netdev);
3164
3165
3166 pci_enable_wake(pdev, PCI_D0, 0);
3167
3168 netif_device_attach(netdev);
3169 if (netif_running(netdev)) {
3170 e100_open(netdev);
3171 mod_timer(&nic->watchdog, jiffies);
3172 }
3173}
3174
3175static const struct pci_error_handlers e100_err_handler = {
3176 .error_detected = e100_io_error_detected,
3177 .slot_reset = e100_io_slot_reset,
3178 .resume = e100_io_resume,
3179};
3180
3181static struct pci_driver e100_driver = {
3182 .name = DRV_NAME,
3183 .id_table = e100_id_table,
3184 .probe = e100_probe,
3185 .remove = e100_remove,
3186#ifdef CONFIG_PM
3187
3188 .suspend = e100_suspend,
3189 .resume = e100_resume,
3190#endif
3191 .shutdown = e100_shutdown,
3192 .err_handler = &e100_err_handler,
3193};
3194
3195static int __init e100_init_module(void)
3196{
3197 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3198 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3199 pr_info("%s\n", DRV_COPYRIGHT);
3200 }
3201 return pci_register_driver(&e100_driver);
3202}
3203
3204static void __exit e100_cleanup_module(void)
3205{
3206 pci_unregister_driver(&e100_driver);
3207}
3208
3209module_init(e100_init_module);
3210module_exit(e100_cleanup_module);
3211