1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
158#include <linux/sched.h>
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
163#include <linux/dma-mapping.h>
164#include <linux/dmapool.h>
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
172#include <linux/firmware.h>
173#include <linux/rtnetlink.h>
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
178#define DRV_EXT "-NAPI"
179#define DRV_VERSION "3.5.24-k2"DRV_EXT
180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
197
198static int debug = 3;
199static int eeprom_bad_csum_allow = 0;
200static int use_io = 0;
201module_param(debug, int, 0);
202module_param(eeprom_bad_csum_allow, int, 0);
203module_param(use_io, int, 0);
204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static const struct pci_device_id e100_id_table[] = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
285};
286
287
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
305 rus_no_res = 0x08,
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
350
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
415
416
417
418
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
428 cb_tx_nc = 0x0010,
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456 u8 X(byte_count:6, pad0:2);
457 u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458 u8 adaptive_ifs;
459 u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461 u8 X(rx_dma_max_count:7, pad4:1);
462 u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463 u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465 rx_save_overruns : 1), rx_save_bad_frames : 1);
466 u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469 u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470 u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472 u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474 u8 X(linear_priority:3, pad11:5);
475 u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476 u8 ip_addr_lo;
477 u8 ip_addr_hi;
478 u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481 u8 fc_delay_lo;
482 u8 fc_delay_hi;
483 u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485 u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488 u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489 u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490 u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
496 __le16 count;
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2];
498};
499
500
501#define UCODE_SIZE 134
502struct cb {
503 __le16 status;
504 __le16 command;
505 __le32 link;
506 union {
507 u8 iaaddr[ETH_ALEN];
508 __le32 ucode[UCODE_SIZE];
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
517 __le32 buf_addr;
518 __le16 size;
519 u16 eol;
520 } tbd;
521 } tcb;
522 __le32 dump_buffer_addr;
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
576 enum ru_state ru_running;
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
583 struct napi_struct napi;
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
588 __le16 tx_command;
589
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
602 struct timer_list watchdog;
603 struct mii_if_info mii;
604 struct work_struct tx_timeout_task;
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
610 struct pci_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
625 u32 rx_short_frame_errors;
626 u32 rx_over_length_errors;
627
628 u16 eeprom_wc;
629 __le16 eeprom[256];
630 spinlock_t mdio_lock;
631 const struct firmware *fw;
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636
637
638 (void)ioread8(&nic->csr->scb.status);
639}
640
641static void e100_enable_irq(struct nic *nic)
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647 e100_write_flush(nic);
648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
649}
650
651static void e100_disable_irq(struct nic *nic)
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657 e100_write_flush(nic);
658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663
664
665 iowrite32(selective_reset, &nic->csr->port);
666 e100_write_flush(nic); udelay(20);
667
668
669 iowrite32(software_reset, &nic->csr->port);
670 e100_write_flush(nic); udelay(20);
671
672
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680
681
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
686 iowrite32(selftest | dma_addr, &nic->csr->port);
687 e100_write_flush(nic);
688
689 msleep(10);
690
691
692 e100_disable_irq(nic);
693
694
695 if (nic->mem->selftest.result != 0) {
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
699 return -ETIMEDOUT;
700 }
701 if (nic->mem->selftest.signature == 0) {
702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
718 le16_to_cpu(data);
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721
722 for (j = 0; j < 3; j++) {
723
724
725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
727
728 for (i = 31; i >= 0; i--) {
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732 e100_write_flush(nic); udelay(4);
733
734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735 e100_write_flush(nic); udelay(4);
736 }
737
738 msleep(10);
739
740
741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746
747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756
757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
759
760
761 for (i = 31; i >= 0; i--) {
762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764 e100_write_flush(nic); udelay(4);
765
766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767 e100_write_flush(nic); udelay(4);
768
769
770
771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772 if (!(ctrl & eedo) && i > 16) {
773 *addr_len -= (i - 16);
774 i = 17;
775 }
776
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780
781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782 e100_write_flush(nic); udelay(4);
783
784 return cpu_to_le16(data);
785};
786
787
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798 if (addr < nic->eeprom_wc - 1)
799 checksum += le16_to_cpu(nic->eeprom[addr]);
800 }
801
802
803
804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
808 }
809
810 return 0;
811}
812
813
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
822 if (start + count >= nic->eeprom_wc)
823 return -EINVAL;
824
825 for (addr = start; addr < start + count; addr++)
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828
829
830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
839#define E100_WAIT_SCB_TIMEOUT 20000
840#define E100_WAIT_SCB_FAST 20
841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
852 break;
853 cpu_relax();
854 if (unlikely(i > E100_WAIT_SCB_FAST))
855 udelay(5);
856 }
857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
862 if (unlikely(cmd != cuc_resume))
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{
875 struct cb *cb;
876 unsigned long flags;
877 int err;
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
881 if (unlikely(!nic->cbs_avail)) {
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
891 err = cb_prepare(nic, cb, skb);
892 if (err)
893 goto err_unlock;
894
895 if (unlikely(!nic->cbs_avail))
896 err = -ENOSPC;
897
898
899
900
901 cb->command |= cpu_to_le16(cb_s);
902 dma_wmb();
903 cb->prev->command &= cpu_to_le16(~cb_s);
904
905 while (nic->cb_to_send != nic->cb_to_use) {
906 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
907 nic->cb_to_send->dma_addr))) {
908
909
910
911
912
913 if (err == -ENOSPC) {
914
915 schedule_work(&nic->tx_timeout_task);
916 }
917 break;
918 } else {
919 nic->cuc_cmd = cuc_resume;
920 nic->cb_to_send = nic->cb_to_send->next;
921 }
922 }
923
924err_unlock:
925 spin_unlock_irqrestore(&nic->cb_lock, flags);
926
927 return err;
928}
929
930static int mdio_read(struct net_device *netdev, int addr, int reg)
931{
932 struct nic *nic = netdev_priv(netdev);
933 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
934}
935
936static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
937{
938 struct nic *nic = netdev_priv(netdev);
939
940 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
941}
942
943
944static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
945{
946 u32 data_out = 0;
947 unsigned int i;
948 unsigned long flags;
949
950
951
952
953
954
955
956
957 spin_lock_irqsave(&nic->mdio_lock, flags);
958 for (i = 100; i; --i) {
959 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
960 break;
961 udelay(20);
962 }
963 if (unlikely(!i)) {
964 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
966 return 0;
967 }
968 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
969
970 for (i = 0; i < 100; i++) {
971 udelay(20);
972 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
973 break;
974 }
975 spin_unlock_irqrestore(&nic->mdio_lock, flags);
976 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
977 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
978 dir == mdi_read ? "READ" : "WRITE",
979 addr, reg, data, data_out);
980 return (u16)data_out;
981}
982
983
984static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
985 u32 addr,
986 u32 dir,
987 u32 reg,
988 u16 data)
989{
990 if ((reg == MII_BMCR) && (dir == mdi_write)) {
991 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
992 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
993 MII_ADVERTISE);
994
995
996
997
998
999 if (advert & ADVERTISE_100FULL)
1000 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
1001 else if (advert & ADVERTISE_100HALF)
1002 data |= BMCR_SPEED100;
1003 }
1004 }
1005 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1006}
1007
1008
1009
1010
1011
1012
1013
1014static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1015 u32 addr,
1016 u32 dir,
1017 u32 reg,
1018 u16 data)
1019{
1020
1021
1022
1023
1024 if (dir == mdi_read) {
1025 switch (reg) {
1026 case MII_BMCR:
1027
1028 return BMCR_ANENABLE |
1029 BMCR_FULLDPLX;
1030 case MII_BMSR:
1031 return BMSR_LSTATUS |
1032 BMSR_ANEGCAPABLE |
1033 BMSR_10FULL;
1034 case MII_ADVERTISE:
1035
1036 return ADVERTISE_10HALF |
1037 ADVERTISE_10FULL;
1038 default:
1039 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE",
1042 addr, reg, data);
1043 return 0xFFFF;
1044 }
1045 } else {
1046 switch (reg) {
1047 default:
1048 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1049 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1050 dir == mdi_read ? "READ" : "WRITE",
1051 addr, reg, data);
1052 return 0xFFFF;
1053 }
1054 }
1055}
1056static inline int e100_phy_supports_mii(struct nic *nic)
1057{
1058
1059
1060
1061 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1062}
1063
1064static void e100_get_defaults(struct nic *nic)
1065{
1066 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1067 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1068
1069
1070 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1071 if (nic->mac == mac_unknown)
1072 nic->mac = mac_82557_D100_A;
1073
1074 nic->params.rfds = rfds;
1075 nic->params.cbs = cbs;
1076
1077
1078 nic->tx_threshold = 0xE0;
1079
1080
1081 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1082 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1083
1084
1085 nic->blank_rfd.command = 0;
1086 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1087 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1088
1089
1090 nic->mii.phy_id_mask = 0x1F;
1091 nic->mii.reg_num_mask = 0x1F;
1092 nic->mii.dev = nic->netdev;
1093 nic->mii.mdio_read = mdio_read;
1094 nic->mii.mdio_write = mdio_write;
1095}
1096
1097static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1098{
1099 struct config *config = &cb->u.config;
1100 u8 *c = (u8 *)config;
1101 struct net_device *netdev = nic->netdev;
1102
1103 cb->command = cpu_to_le16(cb_config);
1104
1105 memset(config, 0, sizeof(struct config));
1106
1107 config->byte_count = 0x16;
1108 config->rx_fifo_limit = 0x8;
1109 config->direct_rx_dma = 0x1;
1110 config->standard_tcb = 0x1;
1111 config->standard_stat_counter = 0x1;
1112 config->rx_discard_short_frames = 0x1;
1113 config->tx_underrun_retry = 0x3;
1114 if (e100_phy_supports_mii(nic))
1115 config->mii_mode = 1;
1116 config->pad10 = 0x6;
1117 config->no_source_addr_insertion = 0x1;
1118 config->preamble_length = 0x2;
1119 config->ifs = 0x6;
1120 config->ip_addr_hi = 0xF2;
1121 config->pad15_1 = 0x1;
1122 config->pad15_2 = 0x1;
1123 config->crs_or_cdt = 0x0;
1124 config->fc_delay_hi = 0x40;
1125 config->tx_padding = 0x1;
1126 config->fc_priority_threshold = 0x7;
1127 config->pad18 = 0x1;
1128 config->full_duplex_pin = 0x1;
1129 config->pad20_1 = 0x1F;
1130 config->fc_priority_location = 0x1;
1131 config->pad21_1 = 0x5;
1132
1133 config->adaptive_ifs = nic->adaptive_ifs;
1134 config->loopback = nic->loopback;
1135
1136 if (nic->mii.force_media && nic->mii.full_duplex)
1137 config->full_duplex_force = 0x1;
1138
1139 if (nic->flags & promiscuous || nic->loopback) {
1140 config->rx_save_bad_frames = 0x1;
1141 config->rx_discard_short_frames = 0x0;
1142 config->promiscuous_mode = 0x1;
1143 }
1144
1145 if (unlikely(netdev->features & NETIF_F_RXFCS))
1146 config->rx_crc_transfer = 0x1;
1147
1148 if (nic->flags & multicast_all)
1149 config->multicast_all = 0x1;
1150
1151
1152 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1153 config->magic_packet_disable = 0x1;
1154
1155 if (nic->mac >= mac_82558_D101_A4) {
1156 config->fc_disable = 0x1;
1157 config->mwi_enable = 0x1;
1158 config->standard_tcb = 0x0;
1159 config->rx_long_ok = 0x1;
1160 if (nic->mac >= mac_82559_D101M) {
1161 config->tno_intr = 0x1;
1162
1163 if (nic->mac >= mac_82551_10) {
1164 config->byte_count = 0x20;
1165 config->rx_d102_mode = 0x1;
1166 }
1167 } else {
1168 config->standard_stat_counter = 0x0;
1169 }
1170 }
1171
1172 if (netdev->features & NETIF_F_RXALL) {
1173 config->rx_save_overruns = 0x1;
1174 config->rx_save_bad_frames = 0x1;
1175 config->rx_discard_short_frames = 0x0;
1176 }
1177
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1179 c + 0);
1180 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1181 c + 8);
1182 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1183 c + 16);
1184 return 0;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242#define BUNDLESMALL 1
1243#define BUNDLEMAX (u16)6
1244#define INTDELAY (u16)1536
1245
1246
1247static const struct firmware *e100_request_firmware(struct nic *nic)
1248{
1249 const char *fw_name;
1250 const struct firmware *fw = nic->fw;
1251 u8 timer, bundle, min_size;
1252 int err = 0;
1253 bool required = false;
1254
1255
1256 if (nic->flags & ich)
1257 return NULL;
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 if (nic->mac == mac_82559_D101M) {
1273 fw_name = FIRMWARE_D101M;
1274 } else if (nic->mac == mac_82559_D101S) {
1275 fw_name = FIRMWARE_D101S;
1276 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1277 fw_name = FIRMWARE_D102E;
1278 required = true;
1279 } else {
1280 return NULL;
1281 }
1282
1283
1284
1285
1286
1287
1288 if (!fw)
1289 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1290
1291 if (err) {
1292 if (required) {
1293 netif_err(nic, probe, nic->netdev,
1294 "Failed to load firmware \"%s\": %d\n",
1295 fw_name, err);
1296 return ERR_PTR(err);
1297 } else {
1298 netif_info(nic, probe, nic->netdev,
1299 "CPUSaver disabled. Needs \"%s\": %d\n",
1300 fw_name, err);
1301 return NULL;
1302 }
1303 }
1304
1305
1306
1307 if (fw->size != UCODE_SIZE * 4 + 3) {
1308 netif_err(nic, probe, nic->netdev,
1309 "Firmware \"%s\" has wrong size %zu\n",
1310 fw_name, fw->size);
1311 release_firmware(fw);
1312 return ERR_PTR(-EINVAL);
1313 }
1314
1315
1316 timer = fw->data[UCODE_SIZE * 4];
1317 bundle = fw->data[UCODE_SIZE * 4 + 1];
1318 min_size = fw->data[UCODE_SIZE * 4 + 2];
1319
1320 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1321 min_size >= UCODE_SIZE) {
1322 netif_err(nic, probe, nic->netdev,
1323 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1324 fw_name, timer, bundle, min_size);
1325 release_firmware(fw);
1326 return ERR_PTR(-EINVAL);
1327 }
1328
1329
1330
1331 nic->fw = fw;
1332 return fw;
1333}
1334
1335static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1336 struct sk_buff *skb)
1337{
1338 const struct firmware *fw = (void *)skb;
1339 u8 timer, bundle, min_size;
1340
1341
1342
1343 cb->skb = NULL;
1344
1345
1346 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1347
1348
1349 timer = fw->data[UCODE_SIZE * 4];
1350 bundle = fw->data[UCODE_SIZE * 4 + 1];
1351 min_size = fw->data[UCODE_SIZE * 4 + 2];
1352
1353
1354 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1355 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1356 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1357 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1358 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1359 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1360
1361 cb->command = cpu_to_le16(cb_ucode | cb_el);
1362 return 0;
1363}
1364
1365static inline int e100_load_ucode_wait(struct nic *nic)
1366{
1367 const struct firmware *fw;
1368 int err = 0, counter = 50;
1369 struct cb *cb = nic->cb_to_clean;
1370
1371 fw = e100_request_firmware(nic);
1372
1373 if (!fw || IS_ERR(fw))
1374 return PTR_ERR(fw);
1375
1376 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1377 netif_err(nic, probe, nic->netdev,
1378 "ucode cmd failed with error %d\n", err);
1379
1380
1381 nic->cuc_cmd = cuc_start;
1382
1383
1384 e100_write_flush(nic);
1385 udelay(10);
1386
1387
1388 while (!(cb->status & cpu_to_le16(cb_complete))) {
1389 msleep(10);
1390 if (!--counter) break;
1391 }
1392
1393
1394 iowrite8(~0, &nic->csr->scb.stat_ack);
1395
1396
1397 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1398 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1399 err = -EPERM;
1400 }
1401
1402 return err;
1403}
1404
1405static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1406 struct sk_buff *skb)
1407{
1408 cb->command = cpu_to_le16(cb_iaaddr);
1409 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1410 return 0;
1411}
1412
1413static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1414{
1415 cb->command = cpu_to_le16(cb_dump);
1416 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1417 offsetof(struct mem, dump_buf));
1418 return 0;
1419}
1420
1421static int e100_phy_check_without_mii(struct nic *nic)
1422{
1423 u8 phy_type;
1424 int without_mii;
1425
1426 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1427
1428 switch (phy_type) {
1429 case NoSuchPhy:
1430 case I82503:
1431 case S80C24:
1432
1433
1434
1435
1436
1437
1438 netif_info(nic, probe, nic->netdev,
1439 "found MII-less i82503 or 80c24 or other PHY\n");
1440
1441 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1442 nic->mii.phy_id = 0;
1443
1444
1445
1446
1447
1448 without_mii = 1;
1449 break;
1450 default:
1451 without_mii = 0;
1452 break;
1453 }
1454 return without_mii;
1455}
1456
1457#define NCONFIG_AUTO_SWITCH 0x0080
1458#define MII_NSC_CONG MII_RESV1
1459#define NSC_CONG_ENABLE 0x0100
1460#define NSC_CONG_TXREADY 0x0400
1461#define ADVERTISE_FC_SUPPORTED 0x0400
1462static int e100_phy_init(struct nic *nic)
1463{
1464 struct net_device *netdev = nic->netdev;
1465 u32 addr;
1466 u16 bmcr, stat, id_lo, id_hi, cong;
1467
1468
1469 for (addr = 0; addr < 32; addr++) {
1470 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1471 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1472 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1473 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1474 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1475 break;
1476 }
1477 if (addr == 32) {
1478
1479
1480
1481
1482 if (e100_phy_check_without_mii(nic))
1483 return 0;
1484 else {
1485
1486 netif_err(nic, hw, nic->netdev,
1487 "Failed to locate any known PHY, aborting\n");
1488 return -EAGAIN;
1489 }
1490 } else
1491 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1492 "phy_addr = %d\n", nic->mii.phy_id);
1493
1494
1495 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1496 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1497 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1498 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1499 "phy ID = 0x%08X\n", nic->phy);
1500
1501
1502 for (addr = 0; addr < 32; addr++) {
1503 if (addr != nic->mii.phy_id) {
1504 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1505 } else if (nic->phy != phy_82552_v) {
1506 bmcr = mdio_read(netdev, addr, MII_BMCR);
1507 mdio_write(netdev, addr, MII_BMCR,
1508 bmcr & ~BMCR_ISOLATE);
1509 }
1510 }
1511
1512
1513
1514
1515
1516 if (nic->phy == phy_82552_v)
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1518 bmcr & ~BMCR_ISOLATE);
1519
1520
1521#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1522 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1523
1524 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1525 cong |= NSC_CONG_TXREADY;
1526 cong &= ~NSC_CONG_ENABLE;
1527 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1528 }
1529
1530 if (nic->phy == phy_82552_v) {
1531 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1532
1533
1534 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1535
1536
1537 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1538 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1539
1540
1541 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1542 bmcr |= BMCR_RESET;
1543 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1544 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1545 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1546 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1547
1548 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1549 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1550 }
1551
1552 return 0;
1553}
1554
1555static int e100_hw_init(struct nic *nic)
1556{
1557 int err = 0;
1558
1559 e100_hw_reset(nic);
1560
1561 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1562 if (!in_interrupt() && (err = e100_self_test(nic)))
1563 return err;
1564
1565 if ((err = e100_phy_init(nic)))
1566 return err;
1567 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1568 return err;
1569 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1570 return err;
1571 if ((err = e100_load_ucode_wait(nic)))
1572 return err;
1573 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1574 return err;
1575 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1576 return err;
1577 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1578 nic->dma_addr + offsetof(struct mem, stats))))
1579 return err;
1580 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1581 return err;
1582
1583 e100_disable_irq(nic);
1584
1585 return 0;
1586}
1587
1588static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1589{
1590 struct net_device *netdev = nic->netdev;
1591 struct netdev_hw_addr *ha;
1592 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1593
1594 cb->command = cpu_to_le16(cb_multi);
1595 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1596 i = 0;
1597 netdev_for_each_mc_addr(ha, netdev) {
1598 if (i == count)
1599 break;
1600 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1601 ETH_ALEN);
1602 }
1603 return 0;
1604}
1605
1606static void e100_set_multicast_list(struct net_device *netdev)
1607{
1608 struct nic *nic = netdev_priv(netdev);
1609
1610 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1611 "mc_count=%d, flags=0x%04X\n",
1612 netdev_mc_count(netdev), netdev->flags);
1613
1614 if (netdev->flags & IFF_PROMISC)
1615 nic->flags |= promiscuous;
1616 else
1617 nic->flags &= ~promiscuous;
1618
1619 if (netdev->flags & IFF_ALLMULTI ||
1620 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1621 nic->flags |= multicast_all;
1622 else
1623 nic->flags &= ~multicast_all;
1624
1625 e100_exec_cb(nic, NULL, e100_configure);
1626 e100_exec_cb(nic, NULL, e100_multi);
1627}
1628
1629static void e100_update_stats(struct nic *nic)
1630{
1631 struct net_device *dev = nic->netdev;
1632 struct net_device_stats *ns = &dev->stats;
1633 struct stats *s = &nic->mem->stats;
1634 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1635 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1636 &s->complete;
1637
1638
1639
1640
1641
1642 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1643 *complete = 0;
1644 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1645 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1646 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1647 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1648 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1649 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1650 ns->collisions += nic->tx_collisions;
1651 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1652 le32_to_cpu(s->tx_lost_crs);
1653 nic->rx_short_frame_errors +=
1654 le32_to_cpu(s->rx_short_frame_errors);
1655 ns->rx_length_errors = nic->rx_short_frame_errors +
1656 nic->rx_over_length_errors;
1657 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1658 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1659 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1660 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1661 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1662 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1663 le32_to_cpu(s->rx_alignment_errors) +
1664 le32_to_cpu(s->rx_short_frame_errors) +
1665 le32_to_cpu(s->rx_cdt_errors);
1666 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1667 nic->tx_single_collisions +=
1668 le32_to_cpu(s->tx_single_collisions);
1669 nic->tx_multiple_collisions +=
1670 le32_to_cpu(s->tx_multiple_collisions);
1671 if (nic->mac >= mac_82558_D101_A4) {
1672 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1673 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1674 nic->rx_fc_unsupported +=
1675 le32_to_cpu(s->fc_rcv_unsupported);
1676 if (nic->mac >= mac_82559_D101M) {
1677 nic->tx_tco_frames +=
1678 le16_to_cpu(s->xmt_tco_frames);
1679 nic->rx_tco_frames +=
1680 le16_to_cpu(s->rcv_tco_frames);
1681 }
1682 }
1683 }
1684
1685
1686 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1687 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1688 "exec cuc_dump_reset failed\n");
1689}
1690
1691static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1692{
1693
1694
1695
1696 if (duplex == DUPLEX_HALF) {
1697 u32 prev = nic->adaptive_ifs;
1698 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1699
1700 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1701 (nic->tx_frames > min_frames)) {
1702 if (nic->adaptive_ifs < 60)
1703 nic->adaptive_ifs += 5;
1704 } else if (nic->tx_frames < min_frames) {
1705 if (nic->adaptive_ifs >= 5)
1706 nic->adaptive_ifs -= 5;
1707 }
1708 if (nic->adaptive_ifs != prev)
1709 e100_exec_cb(nic, NULL, e100_configure);
1710 }
1711}
1712
1713static void e100_watchdog(unsigned long data)
1714{
1715 struct nic *nic = (struct nic *)data;
1716 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1717 u32 speed;
1718
1719 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1720 "right now = %ld\n", jiffies);
1721
1722
1723
1724 mii_ethtool_gset(&nic->mii, &cmd);
1725 speed = ethtool_cmd_speed(&cmd);
1726
1727 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1728 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1729 speed == SPEED_100 ? 100 : 10,
1730 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1731 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1732 netdev_info(nic->netdev, "NIC Link is Down\n");
1733 }
1734
1735 mii_check_link(&nic->mii);
1736
1737
1738
1739
1740
1741
1742 spin_lock_irq(&nic->cmd_lock);
1743 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1744 e100_write_flush(nic);
1745 spin_unlock_irq(&nic->cmd_lock);
1746
1747 e100_update_stats(nic);
1748 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1749
1750 if (nic->mac <= mac_82557_D100_C)
1751
1752 e100_set_multicast_list(nic->netdev);
1753
1754 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1755
1756 nic->flags |= ich_10h_workaround;
1757 else
1758 nic->flags &= ~ich_10h_workaround;
1759
1760 mod_timer(&nic->watchdog,
1761 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1762}
1763
1764static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1765 struct sk_buff *skb)
1766{
1767 dma_addr_t dma_addr;
1768 cb->command = nic->tx_command;
1769
1770 dma_addr = pci_map_single(nic->pdev,
1771 skb->data, skb->len, PCI_DMA_TODEVICE);
1772
1773 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1774 dev_kfree_skb_any(skb);
1775 skb = NULL;
1776 return -ENOMEM;
1777 }
1778
1779
1780
1781
1782
1783 if (unlikely(skb->no_fcs))
1784 cb->command |= cpu_to_le16(cb_tx_nc);
1785 else
1786 cb->command &= ~cpu_to_le16(cb_tx_nc);
1787
1788
1789 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1790 cb->command |= cpu_to_le16(cb_i);
1791 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1792 cb->u.tcb.tcb_byte_count = 0;
1793 cb->u.tcb.threshold = nic->tx_threshold;
1794 cb->u.tcb.tbd_count = 1;
1795 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1796 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1797 skb_tx_timestamp(skb);
1798 return 0;
1799}
1800
1801static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1802 struct net_device *netdev)
1803{
1804 struct nic *nic = netdev_priv(netdev);
1805 int err;
1806
1807 if (nic->flags & ich_10h_workaround) {
1808
1809
1810
1811 if (e100_exec_cmd(nic, cuc_nop, 0))
1812 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1813 "exec cuc_nop failed\n");
1814 udelay(1);
1815 }
1816
1817 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1818
1819 switch (err) {
1820 case -ENOSPC:
1821
1822 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1823 "No space for CB\n");
1824 netif_stop_queue(netdev);
1825 break;
1826 case -ENOMEM:
1827
1828 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1829 "Out of Tx resources, returning skb\n");
1830 netif_stop_queue(netdev);
1831 return NETDEV_TX_BUSY;
1832 }
1833
1834 return NETDEV_TX_OK;
1835}
1836
1837static int e100_tx_clean(struct nic *nic)
1838{
1839 struct net_device *dev = nic->netdev;
1840 struct cb *cb;
1841 int tx_cleaned = 0;
1842
1843 spin_lock(&nic->cb_lock);
1844
1845
1846 for (cb = nic->cb_to_clean;
1847 cb->status & cpu_to_le16(cb_complete);
1848 cb = nic->cb_to_clean = cb->next) {
1849 dma_rmb();
1850 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1851 "cb[%d]->status = 0x%04X\n",
1852 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1853 cb->status);
1854
1855 if (likely(cb->skb != NULL)) {
1856 dev->stats.tx_packets++;
1857 dev->stats.tx_bytes += cb->skb->len;
1858
1859 pci_unmap_single(nic->pdev,
1860 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1861 le16_to_cpu(cb->u.tcb.tbd.size),
1862 PCI_DMA_TODEVICE);
1863 dev_kfree_skb_any(cb->skb);
1864 cb->skb = NULL;
1865 tx_cleaned = 1;
1866 }
1867 cb->status = 0;
1868 nic->cbs_avail++;
1869 }
1870
1871 spin_unlock(&nic->cb_lock);
1872
1873
1874 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1875 netif_wake_queue(nic->netdev);
1876
1877 return tx_cleaned;
1878}
1879
1880static void e100_clean_cbs(struct nic *nic)
1881{
1882 if (nic->cbs) {
1883 while (nic->cbs_avail != nic->params.cbs.count) {
1884 struct cb *cb = nic->cb_to_clean;
1885 if (cb->skb) {
1886 pci_unmap_single(nic->pdev,
1887 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1888 le16_to_cpu(cb->u.tcb.tbd.size),
1889 PCI_DMA_TODEVICE);
1890 dev_kfree_skb(cb->skb);
1891 }
1892 nic->cb_to_clean = nic->cb_to_clean->next;
1893 nic->cbs_avail++;
1894 }
1895 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1896 nic->cbs = NULL;
1897 nic->cbs_avail = 0;
1898 }
1899 nic->cuc_cmd = cuc_start;
1900 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1901 nic->cbs;
1902}
1903
1904static int e100_alloc_cbs(struct nic *nic)
1905{
1906 struct cb *cb;
1907 unsigned int i, count = nic->params.cbs.count;
1908
1909 nic->cuc_cmd = cuc_start;
1910 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1911 nic->cbs_avail = 0;
1912
1913 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1914 &nic->cbs_dma_addr);
1915 if (!nic->cbs)
1916 return -ENOMEM;
1917 memset(nic->cbs, 0, count * sizeof(struct cb));
1918
1919 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1920 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1921 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1922
1923 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1924 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1925 ((i+1) % count) * sizeof(struct cb));
1926 }
1927
1928 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1929 nic->cbs_avail = count;
1930
1931 return 0;
1932}
1933
1934static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1935{
1936 if (!nic->rxs) return;
1937 if (RU_SUSPENDED != nic->ru_running) return;
1938
1939
1940 if (!rx) rx = nic->rxs;
1941
1942
1943 if (rx->skb) {
1944 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1945 nic->ru_running = RU_RUNNING;
1946 }
1947}
1948
1949#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1950static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1951{
1952 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1953 return -ENOMEM;
1954
1955
1956 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1957 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1958 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1959
1960 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1961 dev_kfree_skb_any(rx->skb);
1962 rx->skb = NULL;
1963 rx->dma_addr = 0;
1964 return -ENOMEM;
1965 }
1966
1967
1968
1969
1970 if (rx->prev->skb) {
1971 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1972 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1973 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1974 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1975 }
1976
1977 return 0;
1978}
1979
1980static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1981 unsigned int *work_done, unsigned int work_to_do)
1982{
1983 struct net_device *dev = nic->netdev;
1984 struct sk_buff *skb = rx->skb;
1985 struct rfd *rfd = (struct rfd *)skb->data;
1986 u16 rfd_status, actual_size;
1987 u16 fcs_pad = 0;
1988
1989 if (unlikely(work_done && *work_done >= work_to_do))
1990 return -EAGAIN;
1991
1992
1993 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1994 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1995 rfd_status = le16_to_cpu(rfd->status);
1996
1997 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1998 "status=0x%04X\n", rfd_status);
1999 dma_rmb();
2000
2001
2002 if (unlikely(!(rfd_status & cb_complete))) {
2003
2004
2005
2006
2007
2008 if ((le16_to_cpu(rfd->command) & cb_el) &&
2009 (RU_RUNNING == nic->ru_running))
2010
2011 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2012 nic->ru_running = RU_SUSPENDED;
2013 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2014 sizeof(struct rfd),
2015 PCI_DMA_FROMDEVICE);
2016 return -ENODATA;
2017 }
2018
2019
2020 if (unlikely(dev->features & NETIF_F_RXFCS))
2021 fcs_pad = 4;
2022 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2023 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2024 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2025
2026
2027 pci_unmap_single(nic->pdev, rx->dma_addr,
2028 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2029
2030
2031
2032
2033
2034
2035
2036 if ((le16_to_cpu(rfd->command) & cb_el) &&
2037 (RU_RUNNING == nic->ru_running)) {
2038
2039 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2040 nic->ru_running = RU_SUSPENDED;
2041 }
2042
2043
2044 skb_reserve(skb, sizeof(struct rfd));
2045 skb_put(skb, actual_size);
2046 skb->protocol = eth_type_trans(skb, nic->netdev);
2047
2048
2049
2050
2051 if (unlikely(dev->features & NETIF_F_RXALL)) {
2052 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2053
2054 nic->rx_over_length_errors++;
2055 goto process_skb;
2056 }
2057
2058 if (unlikely(!(rfd_status & cb_ok))) {
2059
2060 dev_kfree_skb_any(skb);
2061 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2062
2063 nic->rx_over_length_errors++;
2064 dev_kfree_skb_any(skb);
2065 } else {
2066process_skb:
2067 dev->stats.rx_packets++;
2068 dev->stats.rx_bytes += (actual_size - fcs_pad);
2069 netif_receive_skb(skb);
2070 if (work_done)
2071 (*work_done)++;
2072 }
2073
2074 rx->skb = NULL;
2075
2076 return 0;
2077}
2078
2079static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2080 unsigned int work_to_do)
2081{
2082 struct rx *rx;
2083 int restart_required = 0, err = 0;
2084 struct rx *old_before_last_rx, *new_before_last_rx;
2085 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2086
2087
2088 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2089 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2090
2091 if (-EAGAIN == err || -ENODATA == err)
2092 break;
2093 }
2094
2095
2096
2097
2098
2099
2100
2101
2102 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2103 restart_required = 1;
2104
2105 old_before_last_rx = nic->rx_to_use->prev->prev;
2106 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2107
2108
2109 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2110 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2111 break;
2112 }
2113
2114 new_before_last_rx = nic->rx_to_use->prev->prev;
2115 if (new_before_last_rx != old_before_last_rx) {
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125 new_before_last_rfd =
2126 (struct rfd *)new_before_last_rx->skb->data;
2127 new_before_last_rfd->size = 0;
2128 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2129 pci_dma_sync_single_for_device(nic->pdev,
2130 new_before_last_rx->dma_addr, sizeof(struct rfd),
2131 PCI_DMA_BIDIRECTIONAL);
2132
2133
2134
2135
2136 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2137 pci_dma_sync_single_for_device(nic->pdev,
2138 old_before_last_rx->dma_addr, sizeof(struct rfd),
2139 PCI_DMA_BIDIRECTIONAL);
2140 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2141 + ETH_FCS_LEN);
2142 pci_dma_sync_single_for_device(nic->pdev,
2143 old_before_last_rx->dma_addr, sizeof(struct rfd),
2144 PCI_DMA_BIDIRECTIONAL);
2145 }
2146
2147 if (restart_required) {
2148
2149 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2150 e100_start_receiver(nic, nic->rx_to_clean);
2151 if (work_done)
2152 (*work_done)++;
2153 }
2154}
2155
2156static void e100_rx_clean_list(struct nic *nic)
2157{
2158 struct rx *rx;
2159 unsigned int i, count = nic->params.rfds.count;
2160
2161 nic->ru_running = RU_UNINITIALIZED;
2162
2163 if (nic->rxs) {
2164 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2165 if (rx->skb) {
2166 pci_unmap_single(nic->pdev, rx->dma_addr,
2167 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2168 dev_kfree_skb(rx->skb);
2169 }
2170 }
2171 kfree(nic->rxs);
2172 nic->rxs = NULL;
2173 }
2174
2175 nic->rx_to_use = nic->rx_to_clean = NULL;
2176}
2177
2178static int e100_rx_alloc_list(struct nic *nic)
2179{
2180 struct rx *rx;
2181 unsigned int i, count = nic->params.rfds.count;
2182 struct rfd *before_last;
2183
2184 nic->rx_to_use = nic->rx_to_clean = NULL;
2185 nic->ru_running = RU_UNINITIALIZED;
2186
2187 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2188 return -ENOMEM;
2189
2190 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2191 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2192 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2193 if (e100_rx_alloc_skb(nic, rx)) {
2194 e100_rx_clean_list(nic);
2195 return -ENOMEM;
2196 }
2197 }
2198
2199
2200
2201
2202
2203
2204
2205 rx = nic->rxs->prev->prev;
2206 before_last = (struct rfd *)rx->skb->data;
2207 before_last->command |= cpu_to_le16(cb_el);
2208 before_last->size = 0;
2209 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2210 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2211
2212 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2213 nic->ru_running = RU_SUSPENDED;
2214
2215 return 0;
2216}
2217
2218static irqreturn_t e100_intr(int irq, void *dev_id)
2219{
2220 struct net_device *netdev = dev_id;
2221 struct nic *nic = netdev_priv(netdev);
2222 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2223
2224 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2225 "stat_ack = 0x%02X\n", stat_ack);
2226
2227 if (stat_ack == stat_ack_not_ours ||
2228 stat_ack == stat_ack_not_present)
2229 return IRQ_NONE;
2230
2231
2232 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2233
2234
2235 if (stat_ack & stat_ack_rnr)
2236 nic->ru_running = RU_SUSPENDED;
2237
2238 if (likely(napi_schedule_prep(&nic->napi))) {
2239 e100_disable_irq(nic);
2240 __napi_schedule(&nic->napi);
2241 }
2242
2243 return IRQ_HANDLED;
2244}
2245
2246static int e100_poll(struct napi_struct *napi, int budget)
2247{
2248 struct nic *nic = container_of(napi, struct nic, napi);
2249 unsigned int work_done = 0;
2250
2251 e100_rx_clean(nic, &work_done, budget);
2252 e100_tx_clean(nic);
2253
2254
2255 if (work_done < budget) {
2256 napi_complete_done(napi, work_done);
2257 e100_enable_irq(nic);
2258 }
2259
2260 return work_done;
2261}
2262
2263#ifdef CONFIG_NET_POLL_CONTROLLER
2264static void e100_netpoll(struct net_device *netdev)
2265{
2266 struct nic *nic = netdev_priv(netdev);
2267
2268 e100_disable_irq(nic);
2269 e100_intr(nic->pdev->irq, netdev);
2270 e100_tx_clean(nic);
2271 e100_enable_irq(nic);
2272}
2273#endif
2274
2275static int e100_set_mac_address(struct net_device *netdev, void *p)
2276{
2277 struct nic *nic = netdev_priv(netdev);
2278 struct sockaddr *addr = p;
2279
2280 if (!is_valid_ether_addr(addr->sa_data))
2281 return -EADDRNOTAVAIL;
2282
2283 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2284 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2285
2286 return 0;
2287}
2288
2289static int e100_asf(struct nic *nic)
2290{
2291
2292 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2293 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2294 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2295 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2296}
2297
2298static int e100_up(struct nic *nic)
2299{
2300 int err;
2301
2302 if ((err = e100_rx_alloc_list(nic)))
2303 return err;
2304 if ((err = e100_alloc_cbs(nic)))
2305 goto err_rx_clean_list;
2306 if ((err = e100_hw_init(nic)))
2307 goto err_clean_cbs;
2308 e100_set_multicast_list(nic->netdev);
2309 e100_start_receiver(nic, NULL);
2310 mod_timer(&nic->watchdog, jiffies);
2311 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2312 nic->netdev->name, nic->netdev)))
2313 goto err_no_irq;
2314 netif_wake_queue(nic->netdev);
2315 napi_enable(&nic->napi);
2316
2317
2318 e100_enable_irq(nic);
2319 return 0;
2320
2321err_no_irq:
2322 del_timer_sync(&nic->watchdog);
2323err_clean_cbs:
2324 e100_clean_cbs(nic);
2325err_rx_clean_list:
2326 e100_rx_clean_list(nic);
2327 return err;
2328}
2329
2330static void e100_down(struct nic *nic)
2331{
2332
2333 napi_disable(&nic->napi);
2334 netif_stop_queue(nic->netdev);
2335 e100_hw_reset(nic);
2336 free_irq(nic->pdev->irq, nic->netdev);
2337 del_timer_sync(&nic->watchdog);
2338 netif_carrier_off(nic->netdev);
2339 e100_clean_cbs(nic);
2340 e100_rx_clean_list(nic);
2341}
2342
2343static void e100_tx_timeout(struct net_device *netdev)
2344{
2345 struct nic *nic = netdev_priv(netdev);
2346
2347
2348
2349 schedule_work(&nic->tx_timeout_task);
2350}
2351
2352static void e100_tx_timeout_task(struct work_struct *work)
2353{
2354 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2355 struct net_device *netdev = nic->netdev;
2356
2357 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2358 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2359
2360 rtnl_lock();
2361 if (netif_running(netdev)) {
2362 e100_down(netdev_priv(netdev));
2363 e100_up(netdev_priv(netdev));
2364 }
2365 rtnl_unlock();
2366}
2367
2368static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2369{
2370 int err;
2371 struct sk_buff *skb;
2372
2373
2374
2375
2376
2377
2378 if ((err = e100_rx_alloc_list(nic)))
2379 return err;
2380 if ((err = e100_alloc_cbs(nic)))
2381 goto err_clean_rx;
2382
2383
2384 if (nic->flags & ich && loopback_mode == lb_phy)
2385 loopback_mode = lb_mac;
2386
2387 nic->loopback = loopback_mode;
2388 if ((err = e100_hw_init(nic)))
2389 goto err_loopback_none;
2390
2391 if (loopback_mode == lb_phy)
2392 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2393 BMCR_LOOPBACK);
2394
2395 e100_start_receiver(nic, NULL);
2396
2397 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2398 err = -ENOMEM;
2399 goto err_loopback_none;
2400 }
2401 skb_put(skb, ETH_DATA_LEN);
2402 memset(skb->data, 0xFF, ETH_DATA_LEN);
2403 e100_xmit_frame(skb, nic->netdev);
2404
2405 msleep(10);
2406
2407 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2408 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2409
2410 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2411 skb->data, ETH_DATA_LEN))
2412 err = -EAGAIN;
2413
2414err_loopback_none:
2415 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2416 nic->loopback = lb_none;
2417 e100_clean_cbs(nic);
2418 e100_hw_reset(nic);
2419err_clean_rx:
2420 e100_rx_clean_list(nic);
2421 return err;
2422}
2423
2424#define MII_LED_CONTROL 0x1B
2425#define E100_82552_LED_OVERRIDE 0x19
2426#define E100_82552_LED_ON 0x000F
2427#define E100_82552_LED_OFF 0x000A
2428
2429static int e100_get_link_ksettings(struct net_device *netdev,
2430 struct ethtool_link_ksettings *cmd)
2431{
2432 struct nic *nic = netdev_priv(netdev);
2433
2434 mii_ethtool_get_link_ksettings(&nic->mii, cmd);
2435
2436 return 0;
2437}
2438
2439static int e100_set_link_ksettings(struct net_device *netdev,
2440 const struct ethtool_link_ksettings *cmd)
2441{
2442 struct nic *nic = netdev_priv(netdev);
2443 int err;
2444
2445 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2446 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
2447 e100_exec_cb(nic, NULL, e100_configure);
2448
2449 return err;
2450}
2451
2452static void e100_get_drvinfo(struct net_device *netdev,
2453 struct ethtool_drvinfo *info)
2454{
2455 struct nic *nic = netdev_priv(netdev);
2456 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2457 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2458 strlcpy(info->bus_info, pci_name(nic->pdev),
2459 sizeof(info->bus_info));
2460}
2461
2462#define E100_PHY_REGS 0x1C
2463static int e100_get_regs_len(struct net_device *netdev)
2464{
2465 struct nic *nic = netdev_priv(netdev);
2466 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2467}
2468
2469static void e100_get_regs(struct net_device *netdev,
2470 struct ethtool_regs *regs, void *p)
2471{
2472 struct nic *nic = netdev_priv(netdev);
2473 u32 *buff = p;
2474 int i;
2475
2476 regs->version = (1 << 24) | nic->pdev->revision;
2477 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2478 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2479 ioread16(&nic->csr->scb.status);
2480 for (i = E100_PHY_REGS; i >= 0; i--)
2481 buff[1 + E100_PHY_REGS - i] =
2482 mdio_read(netdev, nic->mii.phy_id, i);
2483 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2484 e100_exec_cb(nic, NULL, e100_dump);
2485 msleep(10);
2486 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2487 sizeof(nic->mem->dump_buf));
2488}
2489
2490static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2491{
2492 struct nic *nic = netdev_priv(netdev);
2493 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2494 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2495}
2496
2497static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2498{
2499 struct nic *nic = netdev_priv(netdev);
2500
2501 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2502 !device_can_wakeup(&nic->pdev->dev))
2503 return -EOPNOTSUPP;
2504
2505 if (wol->wolopts)
2506 nic->flags |= wol_magic;
2507 else
2508 nic->flags &= ~wol_magic;
2509
2510 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2511
2512 e100_exec_cb(nic, NULL, e100_configure);
2513
2514 return 0;
2515}
2516
2517static u32 e100_get_msglevel(struct net_device *netdev)
2518{
2519 struct nic *nic = netdev_priv(netdev);
2520 return nic->msg_enable;
2521}
2522
2523static void e100_set_msglevel(struct net_device *netdev, u32 value)
2524{
2525 struct nic *nic = netdev_priv(netdev);
2526 nic->msg_enable = value;
2527}
2528
2529static int e100_nway_reset(struct net_device *netdev)
2530{
2531 struct nic *nic = netdev_priv(netdev);
2532 return mii_nway_restart(&nic->mii);
2533}
2534
2535static u32 e100_get_link(struct net_device *netdev)
2536{
2537 struct nic *nic = netdev_priv(netdev);
2538 return mii_link_ok(&nic->mii);
2539}
2540
2541static int e100_get_eeprom_len(struct net_device *netdev)
2542{
2543 struct nic *nic = netdev_priv(netdev);
2544 return nic->eeprom_wc << 1;
2545}
2546
2547#define E100_EEPROM_MAGIC 0x1234
2548static int e100_get_eeprom(struct net_device *netdev,
2549 struct ethtool_eeprom *eeprom, u8 *bytes)
2550{
2551 struct nic *nic = netdev_priv(netdev);
2552
2553 eeprom->magic = E100_EEPROM_MAGIC;
2554 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2555
2556 return 0;
2557}
2558
2559static int e100_set_eeprom(struct net_device *netdev,
2560 struct ethtool_eeprom *eeprom, u8 *bytes)
2561{
2562 struct nic *nic = netdev_priv(netdev);
2563
2564 if (eeprom->magic != E100_EEPROM_MAGIC)
2565 return -EINVAL;
2566
2567 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2568
2569 return e100_eeprom_save(nic, eeprom->offset >> 1,
2570 (eeprom->len >> 1) + 1);
2571}
2572
2573static void e100_get_ringparam(struct net_device *netdev,
2574 struct ethtool_ringparam *ring)
2575{
2576 struct nic *nic = netdev_priv(netdev);
2577 struct param_range *rfds = &nic->params.rfds;
2578 struct param_range *cbs = &nic->params.cbs;
2579
2580 ring->rx_max_pending = rfds->max;
2581 ring->tx_max_pending = cbs->max;
2582 ring->rx_pending = rfds->count;
2583 ring->tx_pending = cbs->count;
2584}
2585
2586static int e100_set_ringparam(struct net_device *netdev,
2587 struct ethtool_ringparam *ring)
2588{
2589 struct nic *nic = netdev_priv(netdev);
2590 struct param_range *rfds = &nic->params.rfds;
2591 struct param_range *cbs = &nic->params.cbs;
2592
2593 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2594 return -EINVAL;
2595
2596 if (netif_running(netdev))
2597 e100_down(nic);
2598 rfds->count = max(ring->rx_pending, rfds->min);
2599 rfds->count = min(rfds->count, rfds->max);
2600 cbs->count = max(ring->tx_pending, cbs->min);
2601 cbs->count = min(cbs->count, cbs->max);
2602 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2603 rfds->count, cbs->count);
2604 if (netif_running(netdev))
2605 e100_up(nic);
2606
2607 return 0;
2608}
2609
2610static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2611 "Link test (on/offline)",
2612 "Eeprom test (on/offline)",
2613 "Self test (offline)",
2614 "Mac loopback (offline)",
2615 "Phy loopback (offline)",
2616};
2617#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2618
2619static void e100_diag_test(struct net_device *netdev,
2620 struct ethtool_test *test, u64 *data)
2621{
2622 struct ethtool_cmd cmd;
2623 struct nic *nic = netdev_priv(netdev);
2624 int i, err;
2625
2626 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2627 data[0] = !mii_link_ok(&nic->mii);
2628 data[1] = e100_eeprom_load(nic);
2629 if (test->flags & ETH_TEST_FL_OFFLINE) {
2630
2631
2632 err = mii_ethtool_gset(&nic->mii, &cmd);
2633
2634 if (netif_running(netdev))
2635 e100_down(nic);
2636 data[2] = e100_self_test(nic);
2637 data[3] = e100_loopback_test(nic, lb_mac);
2638 data[4] = e100_loopback_test(nic, lb_phy);
2639
2640
2641 err = mii_ethtool_sset(&nic->mii, &cmd);
2642
2643 if (netif_running(netdev))
2644 e100_up(nic);
2645 }
2646 for (i = 0; i < E100_TEST_LEN; i++)
2647 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2648
2649 msleep_interruptible(4 * 1000);
2650}
2651
2652static int e100_set_phys_id(struct net_device *netdev,
2653 enum ethtool_phys_id_state state)
2654{
2655 struct nic *nic = netdev_priv(netdev);
2656 enum led_state {
2657 led_on = 0x01,
2658 led_off = 0x04,
2659 led_on_559 = 0x05,
2660 led_on_557 = 0x07,
2661 };
2662 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2663 MII_LED_CONTROL;
2664 u16 leds = 0;
2665
2666 switch (state) {
2667 case ETHTOOL_ID_ACTIVE:
2668 return 2;
2669
2670 case ETHTOOL_ID_ON:
2671 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2672 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2673 break;
2674
2675 case ETHTOOL_ID_OFF:
2676 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2677 break;
2678
2679 case ETHTOOL_ID_INACTIVE:
2680 break;
2681 }
2682
2683 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2684 return 0;
2685}
2686
2687static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2688 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2689 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2690 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2691 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2692 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2693 "tx_heartbeat_errors", "tx_window_errors",
2694
2695 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2696 "tx_flow_control_pause", "rx_flow_control_pause",
2697 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2698 "rx_short_frame_errors", "rx_over_length_errors",
2699};
2700#define E100_NET_STATS_LEN 21
2701#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2702
2703static int e100_get_sset_count(struct net_device *netdev, int sset)
2704{
2705 switch (sset) {
2706 case ETH_SS_TEST:
2707 return E100_TEST_LEN;
2708 case ETH_SS_STATS:
2709 return E100_STATS_LEN;
2710 default:
2711 return -EOPNOTSUPP;
2712 }
2713}
2714
2715static void e100_get_ethtool_stats(struct net_device *netdev,
2716 struct ethtool_stats *stats, u64 *data)
2717{
2718 struct nic *nic = netdev_priv(netdev);
2719 int i;
2720
2721 for (i = 0; i < E100_NET_STATS_LEN; i++)
2722 data[i] = ((unsigned long *)&netdev->stats)[i];
2723
2724 data[i++] = nic->tx_deferred;
2725 data[i++] = nic->tx_single_collisions;
2726 data[i++] = nic->tx_multiple_collisions;
2727 data[i++] = nic->tx_fc_pause;
2728 data[i++] = nic->rx_fc_pause;
2729 data[i++] = nic->rx_fc_unsupported;
2730 data[i++] = nic->tx_tco_frames;
2731 data[i++] = nic->rx_tco_frames;
2732 data[i++] = nic->rx_short_frame_errors;
2733 data[i++] = nic->rx_over_length_errors;
2734}
2735
2736static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2737{
2738 switch (stringset) {
2739 case ETH_SS_TEST:
2740 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2741 break;
2742 case ETH_SS_STATS:
2743 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2744 break;
2745 }
2746}
2747
2748static const struct ethtool_ops e100_ethtool_ops = {
2749 .get_drvinfo = e100_get_drvinfo,
2750 .get_regs_len = e100_get_regs_len,
2751 .get_regs = e100_get_regs,
2752 .get_wol = e100_get_wol,
2753 .set_wol = e100_set_wol,
2754 .get_msglevel = e100_get_msglevel,
2755 .set_msglevel = e100_set_msglevel,
2756 .nway_reset = e100_nway_reset,
2757 .get_link = e100_get_link,
2758 .get_eeprom_len = e100_get_eeprom_len,
2759 .get_eeprom = e100_get_eeprom,
2760 .set_eeprom = e100_set_eeprom,
2761 .get_ringparam = e100_get_ringparam,
2762 .set_ringparam = e100_set_ringparam,
2763 .self_test = e100_diag_test,
2764 .get_strings = e100_get_strings,
2765 .set_phys_id = e100_set_phys_id,
2766 .get_ethtool_stats = e100_get_ethtool_stats,
2767 .get_sset_count = e100_get_sset_count,
2768 .get_ts_info = ethtool_op_get_ts_info,
2769 .get_link_ksettings = e100_get_link_ksettings,
2770 .set_link_ksettings = e100_set_link_ksettings,
2771};
2772
2773static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2774{
2775 struct nic *nic = netdev_priv(netdev);
2776
2777 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2778}
2779
2780static int e100_alloc(struct nic *nic)
2781{
2782 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2783 &nic->dma_addr);
2784 return nic->mem ? 0 : -ENOMEM;
2785}
2786
2787static void e100_free(struct nic *nic)
2788{
2789 if (nic->mem) {
2790 pci_free_consistent(nic->pdev, sizeof(struct mem),
2791 nic->mem, nic->dma_addr);
2792 nic->mem = NULL;
2793 }
2794}
2795
2796static int e100_open(struct net_device *netdev)
2797{
2798 struct nic *nic = netdev_priv(netdev);
2799 int err = 0;
2800
2801 netif_carrier_off(netdev);
2802 if ((err = e100_up(nic)))
2803 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2804 return err;
2805}
2806
2807static int e100_close(struct net_device *netdev)
2808{
2809 e100_down(netdev_priv(netdev));
2810 return 0;
2811}
2812
2813static int e100_set_features(struct net_device *netdev,
2814 netdev_features_t features)
2815{
2816 struct nic *nic = netdev_priv(netdev);
2817 netdev_features_t changed = features ^ netdev->features;
2818
2819 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2820 return 0;
2821
2822 netdev->features = features;
2823 e100_exec_cb(nic, NULL, e100_configure);
2824 return 0;
2825}
2826
2827static const struct net_device_ops e100_netdev_ops = {
2828 .ndo_open = e100_open,
2829 .ndo_stop = e100_close,
2830 .ndo_start_xmit = e100_xmit_frame,
2831 .ndo_validate_addr = eth_validate_addr,
2832 .ndo_set_rx_mode = e100_set_multicast_list,
2833 .ndo_set_mac_address = e100_set_mac_address,
2834 .ndo_do_ioctl = e100_do_ioctl,
2835 .ndo_tx_timeout = e100_tx_timeout,
2836#ifdef CONFIG_NET_POLL_CONTROLLER
2837 .ndo_poll_controller = e100_netpoll,
2838#endif
2839 .ndo_set_features = e100_set_features,
2840};
2841
2842static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2843{
2844 struct net_device *netdev;
2845 struct nic *nic;
2846 int err;
2847
2848 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2849 return -ENOMEM;
2850
2851 netdev->hw_features |= NETIF_F_RXFCS;
2852 netdev->priv_flags |= IFF_SUPP_NOFCS;
2853 netdev->hw_features |= NETIF_F_RXALL;
2854
2855 netdev->netdev_ops = &e100_netdev_ops;
2856 netdev->ethtool_ops = &e100_ethtool_ops;
2857 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2858 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2859
2860 nic = netdev_priv(netdev);
2861 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2862 nic->netdev = netdev;
2863 nic->pdev = pdev;
2864 nic->msg_enable = (1 << debug) - 1;
2865 nic->mdio_ctrl = mdio_ctrl_hw;
2866 pci_set_drvdata(pdev, netdev);
2867
2868 if ((err = pci_enable_device(pdev))) {
2869 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2870 goto err_out_free_dev;
2871 }
2872
2873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2874 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2875 err = -ENODEV;
2876 goto err_out_disable_pdev;
2877 }
2878
2879 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2880 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2881 goto err_out_disable_pdev;
2882 }
2883
2884 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2885 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2886 goto err_out_free_res;
2887 }
2888
2889 SET_NETDEV_DEV(netdev, &pdev->dev);
2890
2891 if (use_io)
2892 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2893
2894 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2895 if (!nic->csr) {
2896 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2897 err = -ENOMEM;
2898 goto err_out_free_res;
2899 }
2900
2901 if (ent->driver_data)
2902 nic->flags |= ich;
2903 else
2904 nic->flags &= ~ich;
2905
2906 e100_get_defaults(nic);
2907
2908
2909 if (nic->mac < mac_82558_D101_A4)
2910 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2911
2912
2913 spin_lock_init(&nic->cb_lock);
2914 spin_lock_init(&nic->cmd_lock);
2915 spin_lock_init(&nic->mdio_lock);
2916
2917
2918
2919
2920 e100_hw_reset(nic);
2921
2922 pci_set_master(pdev);
2923
2924 setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
2925
2926 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2927
2928 if ((err = e100_alloc(nic))) {
2929 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2930 goto err_out_iounmap;
2931 }
2932
2933 if ((err = e100_eeprom_load(nic)))
2934 goto err_out_free;
2935
2936 e100_phy_init(nic);
2937
2938 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2939 if (!is_valid_ether_addr(netdev->dev_addr)) {
2940 if (!eeprom_bad_csum_allow) {
2941 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2942 err = -EAGAIN;
2943 goto err_out_free;
2944 } else {
2945 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2946 }
2947 }
2948
2949
2950 if ((nic->mac >= mac_82558_D101_A4) &&
2951 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2952 nic->flags |= wol_magic;
2953 device_set_wakeup_enable(&pdev->dev, true);
2954 }
2955
2956
2957 pci_pme_active(pdev, false);
2958
2959 strcpy(netdev->name, "eth%d");
2960 if ((err = register_netdev(netdev))) {
2961 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2962 goto err_out_free;
2963 }
2964 nic->cbs_pool = pci_pool_create(netdev->name,
2965 nic->pdev,
2966 nic->params.cbs.max * sizeof(struct cb),
2967 sizeof(u32),
2968 0);
2969 if (!nic->cbs_pool) {
2970 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2971 err = -ENOMEM;
2972 goto err_out_pool;
2973 }
2974 netif_info(nic, probe, nic->netdev,
2975 "addr 0x%llx, irq %d, MAC addr %pM\n",
2976 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2977 pdev->irq, netdev->dev_addr);
2978
2979 return 0;
2980
2981err_out_pool:
2982 unregister_netdev(netdev);
2983err_out_free:
2984 e100_free(nic);
2985err_out_iounmap:
2986 pci_iounmap(pdev, nic->csr);
2987err_out_free_res:
2988 pci_release_regions(pdev);
2989err_out_disable_pdev:
2990 pci_disable_device(pdev);
2991err_out_free_dev:
2992 free_netdev(netdev);
2993 return err;
2994}
2995
2996static void e100_remove(struct pci_dev *pdev)
2997{
2998 struct net_device *netdev = pci_get_drvdata(pdev);
2999
3000 if (netdev) {
3001 struct nic *nic = netdev_priv(netdev);
3002 unregister_netdev(netdev);
3003 e100_free(nic);
3004 pci_iounmap(pdev, nic->csr);
3005 pci_pool_destroy(nic->cbs_pool);
3006 free_netdev(netdev);
3007 pci_release_regions(pdev);
3008 pci_disable_device(pdev);
3009 }
3010}
3011
3012#define E100_82552_SMARTSPEED 0x14
3013#define E100_82552_REV_ANEG 0x0200
3014#define E100_82552_ANEG_NOW 0x0400
3015static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3016{
3017 struct net_device *netdev = pci_get_drvdata(pdev);
3018 struct nic *nic = netdev_priv(netdev);
3019
3020 if (netif_running(netdev))
3021 e100_down(nic);
3022 netif_device_detach(netdev);
3023
3024 pci_save_state(pdev);
3025
3026 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3027
3028 if (nic->phy == phy_82552_v) {
3029 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3030 E100_82552_SMARTSPEED);
3031
3032 mdio_write(netdev, nic->mii.phy_id,
3033 E100_82552_SMARTSPEED, smartspeed |
3034 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3035 }
3036 *enable_wake = true;
3037 } else {
3038 *enable_wake = false;
3039 }
3040
3041 pci_clear_master(pdev);
3042}
3043
3044static int __e100_power_off(struct pci_dev *pdev, bool wake)
3045{
3046 if (wake)
3047 return pci_prepare_to_sleep(pdev);
3048
3049 pci_wake_from_d3(pdev, false);
3050 pci_set_power_state(pdev, PCI_D3hot);
3051
3052 return 0;
3053}
3054
3055#ifdef CONFIG_PM
3056static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3057{
3058 bool wake;
3059 __e100_shutdown(pdev, &wake);
3060 return __e100_power_off(pdev, wake);
3061}
3062
3063static int e100_resume(struct pci_dev *pdev)
3064{
3065 struct net_device *netdev = pci_get_drvdata(pdev);
3066 struct nic *nic = netdev_priv(netdev);
3067
3068 pci_set_power_state(pdev, PCI_D0);
3069 pci_restore_state(pdev);
3070
3071 pci_enable_wake(pdev, PCI_D0, 0);
3072
3073
3074 if (nic->phy == phy_82552_v) {
3075 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3076 E100_82552_SMARTSPEED);
3077
3078 mdio_write(netdev, nic->mii.phy_id,
3079 E100_82552_SMARTSPEED,
3080 smartspeed & ~(E100_82552_REV_ANEG));
3081 }
3082
3083 netif_device_attach(netdev);
3084 if (netif_running(netdev))
3085 e100_up(nic);
3086
3087 return 0;
3088}
3089#endif
3090
3091static void e100_shutdown(struct pci_dev *pdev)
3092{
3093 bool wake;
3094 __e100_shutdown(pdev, &wake);
3095 if (system_state == SYSTEM_POWER_OFF)
3096 __e100_power_off(pdev, wake);
3097}
3098
3099
3100
3101
3102
3103
3104
3105static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3106{
3107 struct net_device *netdev = pci_get_drvdata(pdev);
3108 struct nic *nic = netdev_priv(netdev);
3109
3110 netif_device_detach(netdev);
3111
3112 if (state == pci_channel_io_perm_failure)
3113 return PCI_ERS_RESULT_DISCONNECT;
3114
3115 if (netif_running(netdev))
3116 e100_down(nic);
3117 pci_disable_device(pdev);
3118
3119
3120 return PCI_ERS_RESULT_NEED_RESET;
3121}
3122
3123
3124
3125
3126
3127
3128
3129static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3130{
3131 struct net_device *netdev = pci_get_drvdata(pdev);
3132 struct nic *nic = netdev_priv(netdev);
3133
3134 if (pci_enable_device(pdev)) {
3135 pr_err("Cannot re-enable PCI device after reset\n");
3136 return PCI_ERS_RESULT_DISCONNECT;
3137 }
3138 pci_set_master(pdev);
3139
3140
3141 if (0 != PCI_FUNC(pdev->devfn))
3142 return PCI_ERS_RESULT_RECOVERED;
3143 e100_hw_reset(nic);
3144 e100_phy_init(nic);
3145
3146 return PCI_ERS_RESULT_RECOVERED;
3147}
3148
3149
3150
3151
3152
3153
3154
3155
3156static void e100_io_resume(struct pci_dev *pdev)
3157{
3158 struct net_device *netdev = pci_get_drvdata(pdev);
3159 struct nic *nic = netdev_priv(netdev);
3160
3161
3162 pci_enable_wake(pdev, PCI_D0, 0);
3163
3164 netif_device_attach(netdev);
3165 if (netif_running(netdev)) {
3166 e100_open(netdev);
3167 mod_timer(&nic->watchdog, jiffies);
3168 }
3169}
3170
3171static const struct pci_error_handlers e100_err_handler = {
3172 .error_detected = e100_io_error_detected,
3173 .slot_reset = e100_io_slot_reset,
3174 .resume = e100_io_resume,
3175};
3176
3177static struct pci_driver e100_driver = {
3178 .name = DRV_NAME,
3179 .id_table = e100_id_table,
3180 .probe = e100_probe,
3181 .remove = e100_remove,
3182#ifdef CONFIG_PM
3183
3184 .suspend = e100_suspend,
3185 .resume = e100_resume,
3186#endif
3187 .shutdown = e100_shutdown,
3188 .err_handler = &e100_err_handler,
3189};
3190
3191static int __init e100_init_module(void)
3192{
3193 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3194 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3195 pr_info("%s\n", DRV_COPYRIGHT);
3196 }
3197 return pci_register_driver(&e100_driver);
3198}
3199
3200static void __exit e100_cleanup_module(void)
3201{
3202 pci_unregister_driver(&e100_driver);
3203}
3204
3205module_init(e100_init_module);
3206module_exit(e100_cleanup_module);
3207