1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
60#include <linux/dma-mapping.h>
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/mdio.h>
65#include <linux/skbuff.h>
66#include <linux/init.h>
67#include <linux/delay.h>
68#include <linux/stddef.h>
69#include <linux/ioctl.h>
70#include <linux/timex.h>
71#include <linux/ethtool.h>
72#include <linux/workqueue.h>
73#include <linux/if_vlan.h>
74#include <linux/ip.h>
75#include <linux/tcp.h>
76#include <linux/uaccess.h>
77#include <linux/io.h>
78#include <linux/io-64-nonatomic-lo-hi.h>
79#include <linux/slab.h>
80#include <linux/prefetch.h>
81#include <net/tcp.h>
82#include <net/checksum.h>
83
84#include <asm/div64.h>
85#include <asm/irq.h>
86
87
88#include "s2io.h"
89#include "s2io-regs.h"
90
91#define DRV_VERSION "2.0.26.28"
92
93
94static const char s2io_driver_name[] = "Neterion";
95static const char s2io_driver_version[] = DRV_VERSION;
96
97static const int rxd_size[2] = {32, 48};
98static const int rxd_count[2] = {127, 85};
99
100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101{
102 int ret;
103
104 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
105 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
106
107 return ret;
108}
109
110
111
112
113
114
115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
116 (dev_type == XFRAME_I_DEVICE) ? \
117 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
118 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
119
120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
121 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
122
123static inline int is_s2io_card_up(const struct s2io_nic *sp)
124{
125 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126}
127
128
129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
130 "Register test\t(offline)",
131 "Eeprom test\t(offline)",
132 "Link test\t(online)",
133 "RLDRAM test\t(offline)",
134 "BIST Test\t(offline)"
135};
136
137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
138 {"tmac_frms"},
139 {"tmac_data_octets"},
140 {"tmac_drop_frms"},
141 {"tmac_mcst_frms"},
142 {"tmac_bcst_frms"},
143 {"tmac_pause_ctrl_frms"},
144 {"tmac_ttl_octets"},
145 {"tmac_ucst_frms"},
146 {"tmac_nucst_frms"},
147 {"tmac_any_err_frms"},
148 {"tmac_ttl_less_fb_octets"},
149 {"tmac_vld_ip_octets"},
150 {"tmac_vld_ip"},
151 {"tmac_drop_ip"},
152 {"tmac_icmp"},
153 {"tmac_rst_tcp"},
154 {"tmac_tcp"},
155 {"tmac_udp"},
156 {"rmac_vld_frms"},
157 {"rmac_data_octets"},
158 {"rmac_fcs_err_frms"},
159 {"rmac_drop_frms"},
160 {"rmac_vld_mcst_frms"},
161 {"rmac_vld_bcst_frms"},
162 {"rmac_in_rng_len_err_frms"},
163 {"rmac_out_rng_len_err_frms"},
164 {"rmac_long_frms"},
165 {"rmac_pause_ctrl_frms"},
166 {"rmac_unsup_ctrl_frms"},
167 {"rmac_ttl_octets"},
168 {"rmac_accepted_ucst_frms"},
169 {"rmac_accepted_nucst_frms"},
170 {"rmac_discarded_frms"},
171 {"rmac_drop_events"},
172 {"rmac_ttl_less_fb_octets"},
173 {"rmac_ttl_frms"},
174 {"rmac_usized_frms"},
175 {"rmac_osized_frms"},
176 {"rmac_frag_frms"},
177 {"rmac_jabber_frms"},
178 {"rmac_ttl_64_frms"},
179 {"rmac_ttl_65_127_frms"},
180 {"rmac_ttl_128_255_frms"},
181 {"rmac_ttl_256_511_frms"},
182 {"rmac_ttl_512_1023_frms"},
183 {"rmac_ttl_1024_1518_frms"},
184 {"rmac_ip"},
185 {"rmac_ip_octets"},
186 {"rmac_hdr_err_ip"},
187 {"rmac_drop_ip"},
188 {"rmac_icmp"},
189 {"rmac_tcp"},
190 {"rmac_udp"},
191 {"rmac_err_drp_udp"},
192 {"rmac_xgmii_err_sym"},
193 {"rmac_frms_q0"},
194 {"rmac_frms_q1"},
195 {"rmac_frms_q2"},
196 {"rmac_frms_q3"},
197 {"rmac_frms_q4"},
198 {"rmac_frms_q5"},
199 {"rmac_frms_q6"},
200 {"rmac_frms_q7"},
201 {"rmac_full_q0"},
202 {"rmac_full_q1"},
203 {"rmac_full_q2"},
204 {"rmac_full_q3"},
205 {"rmac_full_q4"},
206 {"rmac_full_q5"},
207 {"rmac_full_q6"},
208 {"rmac_full_q7"},
209 {"rmac_pause_cnt"},
210 {"rmac_xgmii_data_err_cnt"},
211 {"rmac_xgmii_ctrl_err_cnt"},
212 {"rmac_accepted_ip"},
213 {"rmac_err_tcp"},
214 {"rd_req_cnt"},
215 {"new_rd_req_cnt"},
216 {"new_rd_req_rtry_cnt"},
217 {"rd_rtry_cnt"},
218 {"wr_rtry_rd_ack_cnt"},
219 {"wr_req_cnt"},
220 {"new_wr_req_cnt"},
221 {"new_wr_req_rtry_cnt"},
222 {"wr_rtry_cnt"},
223 {"wr_disc_cnt"},
224 {"rd_rtry_wr_ack_cnt"},
225 {"txp_wr_cnt"},
226 {"txd_rd_cnt"},
227 {"txd_wr_cnt"},
228 {"rxd_rd_cnt"},
229 {"rxd_wr_cnt"},
230 {"txf_rd_cnt"},
231 {"rxf_wr_cnt"}
232};
233
234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
235 {"rmac_ttl_1519_4095_frms"},
236 {"rmac_ttl_4096_8191_frms"},
237 {"rmac_ttl_8192_max_frms"},
238 {"rmac_ttl_gt_max_frms"},
239 {"rmac_osized_alt_frms"},
240 {"rmac_jabber_alt_frms"},
241 {"rmac_gt_max_alt_frms"},
242 {"rmac_vlan_frms"},
243 {"rmac_len_discard"},
244 {"rmac_fcs_discard"},
245 {"rmac_pf_discard"},
246 {"rmac_da_discard"},
247 {"rmac_red_discard"},
248 {"rmac_rts_discard"},
249 {"rmac_ingm_full_discard"},
250 {"link_fault_cnt"}
251};
252
253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
254 {"\n DRIVER STATISTICS"},
255 {"single_bit_ecc_errs"},
256 {"double_bit_ecc_errs"},
257 {"parity_err_cnt"},
258 {"serious_err_cnt"},
259 {"soft_reset_cnt"},
260 {"fifo_full_cnt"},
261 {"ring_0_full_cnt"},
262 {"ring_1_full_cnt"},
263 {"ring_2_full_cnt"},
264 {"ring_3_full_cnt"},
265 {"ring_4_full_cnt"},
266 {"ring_5_full_cnt"},
267 {"ring_6_full_cnt"},
268 {"ring_7_full_cnt"},
269 {"alarm_transceiver_temp_high"},
270 {"alarm_transceiver_temp_low"},
271 {"alarm_laser_bias_current_high"},
272 {"alarm_laser_bias_current_low"},
273 {"alarm_laser_output_power_high"},
274 {"alarm_laser_output_power_low"},
275 {"warn_transceiver_temp_high"},
276 {"warn_transceiver_temp_low"},
277 {"warn_laser_bias_current_high"},
278 {"warn_laser_bias_current_low"},
279 {"warn_laser_output_power_high"},
280 {"warn_laser_output_power_low"},
281 {"lro_aggregated_pkts"},
282 {"lro_flush_both_count"},
283 {"lro_out_of_sequence_pkts"},
284 {"lro_flush_due_to_max_pkts"},
285 {"lro_avg_aggr_pkts"},
286 {"mem_alloc_fail_cnt"},
287 {"pci_map_fail_cnt"},
288 {"watchdog_timer_cnt"},
289 {"mem_allocated"},
290 {"mem_freed"},
291 {"link_up_cnt"},
292 {"link_down_cnt"},
293 {"link_up_time"},
294 {"link_down_time"},
295 {"tx_tcode_buf_abort_cnt"},
296 {"tx_tcode_desc_abort_cnt"},
297 {"tx_tcode_parity_err_cnt"},
298 {"tx_tcode_link_loss_cnt"},
299 {"tx_tcode_list_proc_err_cnt"},
300 {"rx_tcode_parity_err_cnt"},
301 {"rx_tcode_abort_cnt"},
302 {"rx_tcode_parity_abort_cnt"},
303 {"rx_tcode_rda_fail_cnt"},
304 {"rx_tcode_unkn_prot_cnt"},
305 {"rx_tcode_fcs_err_cnt"},
306 {"rx_tcode_buf_size_err_cnt"},
307 {"rx_tcode_rxd_corrupt_cnt"},
308 {"rx_tcode_unkn_err_cnt"},
309 {"tda_err_cnt"},
310 {"pfc_err_cnt"},
311 {"pcc_err_cnt"},
312 {"tti_err_cnt"},
313 {"tpa_err_cnt"},
314 {"sm_err_cnt"},
315 {"lso_err_cnt"},
316 {"mac_tmac_err_cnt"},
317 {"mac_rmac_err_cnt"},
318 {"xgxs_txgxs_err_cnt"},
319 {"xgxs_rxgxs_err_cnt"},
320 {"rc_err_cnt"},
321 {"prc_pcix_err_cnt"},
322 {"rpa_err_cnt"},
323 {"rda_err_cnt"},
324 {"rti_err_cnt"},
325 {"mc_err_cnt"}
326};
327
328#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
329#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
330#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
331
332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
334
335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
337
338#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
339#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
340
341
342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
343{
344 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
345 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
346 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
347 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
348 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
349 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
350}
351
352
353
354
355
356
357#define END_SIGN 0x0
358static const u64 herc_act_dtx_cfg[] = {
359
360 0x8000051536750000ULL, 0x80000515367500E0ULL,
361
362 0x8000051536750004ULL, 0x80000515367500E4ULL,
363
364 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
365
366 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
367
368 0x801205150D440000ULL, 0x801205150D4400E0ULL,
369
370 0x801205150D440004ULL, 0x801205150D4400E4ULL,
371
372 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
373
374 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
375
376 END_SIGN
377};
378
379static const u64 xena_dtx_cfg[] = {
380
381 0x8000051500000000ULL, 0x80000515000000E0ULL,
382
383 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
384
385 0x8001051500000000ULL, 0x80010515000000E0ULL,
386
387 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
388
389 0x8002051500000000ULL, 0x80020515000000E0ULL,
390
391 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
392 END_SIGN
393};
394
395
396
397
398
399static const u64 fix_mac[] = {
400 0x0060000000000000ULL, 0x0060600000000000ULL,
401 0x0040600000000000ULL, 0x0000600000000000ULL,
402 0x0020600000000000ULL, 0x0060600000000000ULL,
403 0x0020600000000000ULL, 0x0060600000000000ULL,
404 0x0020600000000000ULL, 0x0060600000000000ULL,
405 0x0020600000000000ULL, 0x0060600000000000ULL,
406 0x0020600000000000ULL, 0x0060600000000000ULL,
407 0x0020600000000000ULL, 0x0060600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0000600000000000ULL,
413 0x0040600000000000ULL, 0x0060600000000000ULL,
414 END_SIGN
415};
416
417MODULE_LICENSE("GPL");
418MODULE_VERSION(DRV_VERSION);
419
420
421
422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
423S2IO_PARM_INT(rx_ring_num, 1);
424S2IO_PARM_INT(multiq, 0);
425S2IO_PARM_INT(rx_ring_mode, 1);
426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
427S2IO_PARM_INT(rmac_pause_time, 0x100);
428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
430S2IO_PARM_INT(shared_splits, 0);
431S2IO_PARM_INT(tmac_util_period, 5);
432S2IO_PARM_INT(rmac_util_period, 5);
433S2IO_PARM_INT(l3l4hdr_size, 128);
434
435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
436
437S2IO_PARM_INT(rxsync_frequency, 3);
438
439S2IO_PARM_INT(intr_type, 2);
440
441
442
443
444
445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
446S2IO_PARM_INT(indicate_max_pkts, 0);
447
448S2IO_PARM_INT(napi, 1);
449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
450
451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
455static unsigned int rts_frm_len[MAX_RX_RINGS] =
456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
457
458module_param_array(tx_fifo_len, uint, NULL, 0);
459module_param_array(rx_ring_sz, uint, NULL, 0);
460module_param_array(rts_frm_len, uint, NULL, 0);
461
462
463
464
465
466static const struct pci_device_id s2io_tbl[] = {
467 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
468 PCI_ANY_ID, PCI_ANY_ID},
469 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
470 PCI_ANY_ID, PCI_ANY_ID},
471 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
472 PCI_ANY_ID, PCI_ANY_ID},
473 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
474 PCI_ANY_ID, PCI_ANY_ID},
475 {0,}
476};
477
478MODULE_DEVICE_TABLE(pci, s2io_tbl);
479
480static const struct pci_error_handlers s2io_err_handler = {
481 .error_detected = s2io_io_error_detected,
482 .slot_reset = s2io_io_slot_reset,
483 .resume = s2io_io_resume,
484};
485
486static struct pci_driver s2io_driver = {
487 .name = "S2IO",
488 .id_table = s2io_tbl,
489 .probe = s2io_init_nic,
490 .remove = s2io_rem_nic,
491 .err_handler = &s2io_err_handler,
492};
493
494
495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
496
497
498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
499{
500 if (!sp->config.multiq) {
501 int i;
502
503 for (i = 0; i < sp->config.tx_fifo_num; i++)
504 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
505 }
506 netif_tx_stop_all_queues(sp->dev);
507}
508
509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
510{
511 if (!sp->config.multiq)
512 sp->mac_control.fifos[fifo_no].queue_state =
513 FIFO_QUEUE_STOP;
514
515 netif_tx_stop_all_queues(sp->dev);
516}
517
518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
519{
520 if (!sp->config.multiq) {
521 int i;
522
523 for (i = 0; i < sp->config.tx_fifo_num; i++)
524 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
525 }
526 netif_tx_start_all_queues(sp->dev);
527}
528
529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
530{
531 if (!sp->config.multiq) {
532 int i;
533
534 for (i = 0; i < sp->config.tx_fifo_num; i++)
535 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
536 }
537 netif_tx_wake_all_queues(sp->dev);
538}
539
540static inline void s2io_wake_tx_queue(
541 struct fifo_info *fifo, int cnt, u8 multiq)
542{
543
544 if (multiq) {
545 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
546 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
547 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
548 if (netif_queue_stopped(fifo->dev)) {
549 fifo->queue_state = FIFO_QUEUE_START;
550 netif_wake_queue(fifo->dev);
551 }
552 }
553}
554
555
556
557
558
559
560
561
562
563static int init_shared_mem(struct s2io_nic *nic)
564{
565 u32 size;
566 void *tmp_v_addr, *tmp_v_addr_next;
567 dma_addr_t tmp_p_addr, tmp_p_addr_next;
568 struct RxD_block *pre_rxd_blk = NULL;
569 int i, j, blk_cnt;
570 int lst_size, lst_per_page;
571 struct net_device *dev = nic->dev;
572 unsigned long tmp;
573 struct buffAdd *ba;
574 struct config_param *config = &nic->config;
575 struct mac_info *mac_control = &nic->mac_control;
576 unsigned long long mem_allocated = 0;
577
578
579 size = 0;
580 for (i = 0; i < config->tx_fifo_num; i++) {
581 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
582
583 size += tx_cfg->fifo_len;
584 }
585 if (size > MAX_AVAILABLE_TXDS) {
586 DBG_PRINT(ERR_DBG,
587 "Too many TxDs requested: %d, max supported: %d\n",
588 size, MAX_AVAILABLE_TXDS);
589 return -EINVAL;
590 }
591
592 size = 0;
593 for (i = 0; i < config->tx_fifo_num; i++) {
594 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
595
596 size = tx_cfg->fifo_len;
597
598
599
600 if (size < 2) {
601 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
602 "Valid lengths are 2 through 8192\n",
603 i, size);
604 return -EINVAL;
605 }
606 }
607
608 lst_size = (sizeof(struct TxD) * config->max_txds);
609 lst_per_page = PAGE_SIZE / lst_size;
610
611 for (i = 0; i < config->tx_fifo_num; i++) {
612 struct fifo_info *fifo = &mac_control->fifos[i];
613 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
614 int fifo_len = tx_cfg->fifo_len;
615 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
616
617 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
618 if (!fifo->list_info) {
619 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
620 return -ENOMEM;
621 }
622 mem_allocated += list_holder_size;
623 }
624 for (i = 0; i < config->tx_fifo_num; i++) {
625 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
626 lst_per_page);
627 struct fifo_info *fifo = &mac_control->fifos[i];
628 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
629
630 fifo->tx_curr_put_info.offset = 0;
631 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
632 fifo->tx_curr_get_info.offset = 0;
633 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
634 fifo->fifo_no = i;
635 fifo->nic = nic;
636 fifo->max_txds = MAX_SKB_FRAGS + 2;
637 fifo->dev = dev;
638
639 for (j = 0; j < page_num; j++) {
640 int k = 0;
641 dma_addr_t tmp_p;
642 void *tmp_v;
643 tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
644 &tmp_p, GFP_KERNEL);
645 if (!tmp_v) {
646 DBG_PRINT(INFO_DBG,
647 "dma_alloc_coherent failed for TxDL\n");
648 return -ENOMEM;
649 }
650
651
652
653
654
655 if (!tmp_p) {
656 mac_control->zerodma_virt_addr = tmp_v;
657 DBG_PRINT(INIT_DBG,
658 "%s: Zero DMA address for TxDL. "
659 "Virtual address %p\n",
660 dev->name, tmp_v);
661 tmp_v = dma_alloc_coherent(&nic->pdev->dev,
662 PAGE_SIZE, &tmp_p,
663 GFP_KERNEL);
664 if (!tmp_v) {
665 DBG_PRINT(INFO_DBG,
666 "dma_alloc_coherent failed for TxDL\n");
667 return -ENOMEM;
668 }
669 mem_allocated += PAGE_SIZE;
670 }
671 while (k < lst_per_page) {
672 int l = (j * lst_per_page) + k;
673 if (l == tx_cfg->fifo_len)
674 break;
675 fifo->list_info[l].list_virt_addr =
676 tmp_v + (k * lst_size);
677 fifo->list_info[l].list_phy_addr =
678 tmp_p + (k * lst_size);
679 k++;
680 }
681 }
682 }
683
684 for (i = 0; i < config->tx_fifo_num; i++) {
685 struct fifo_info *fifo = &mac_control->fifos[i];
686 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
687
688 size = tx_cfg->fifo_len;
689 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
690 if (!fifo->ufo_in_band_v)
691 return -ENOMEM;
692 mem_allocated += (size * sizeof(u64));
693 }
694
695
696 size = 0;
697 for (i = 0; i < config->rx_ring_num; i++) {
698 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
699 struct ring_info *ring = &mac_control->rings[i];
700
701 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
702 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
703 "multiple of RxDs per Block\n",
704 dev->name, i);
705 return FAILURE;
706 }
707 size += rx_cfg->num_rxd;
708 ring->block_count = rx_cfg->num_rxd /
709 (rxd_count[nic->rxd_mode] + 1);
710 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
711 }
712 if (nic->rxd_mode == RXD_MODE_1)
713 size = (size * (sizeof(struct RxD1)));
714 else
715 size = (size * (sizeof(struct RxD3)));
716
717 for (i = 0; i < config->rx_ring_num; i++) {
718 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
719 struct ring_info *ring = &mac_control->rings[i];
720
721 ring->rx_curr_get_info.block_index = 0;
722 ring->rx_curr_get_info.offset = 0;
723 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
724 ring->rx_curr_put_info.block_index = 0;
725 ring->rx_curr_put_info.offset = 0;
726 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
727 ring->nic = nic;
728 ring->ring_no = i;
729
730 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
731
732 for (j = 0; j < blk_cnt; j++) {
733 struct rx_block_info *rx_blocks;
734 int l;
735
736 rx_blocks = &ring->rx_blocks[j];
737 size = SIZE_OF_BLOCK;
738 tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
739 &tmp_p_addr, GFP_KERNEL);
740 if (tmp_v_addr == NULL) {
741
742
743
744
745
746
747 rx_blocks->block_virt_addr = tmp_v_addr;
748 return -ENOMEM;
749 }
750 mem_allocated += size;
751
752 size = sizeof(struct rxd_info) *
753 rxd_count[nic->rxd_mode];
754 rx_blocks->block_virt_addr = tmp_v_addr;
755 rx_blocks->block_dma_addr = tmp_p_addr;
756 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
757 if (!rx_blocks->rxds)
758 return -ENOMEM;
759 mem_allocated += size;
760 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
761 rx_blocks->rxds[l].virt_addr =
762 rx_blocks->block_virt_addr +
763 (rxd_size[nic->rxd_mode] * l);
764 rx_blocks->rxds[l].dma_addr =
765 rx_blocks->block_dma_addr +
766 (rxd_size[nic->rxd_mode] * l);
767 }
768 }
769
770 for (j = 0; j < blk_cnt; j++) {
771 int next = (j + 1) % blk_cnt;
772 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
773 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
774 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
775 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
776
777 pre_rxd_blk = tmp_v_addr;
778 pre_rxd_blk->reserved_2_pNext_RxD_block =
779 (unsigned long)tmp_v_addr_next;
780 pre_rxd_blk->pNext_RxD_Blk_physical =
781 (u64)tmp_p_addr_next;
782 }
783 }
784 if (nic->rxd_mode == RXD_MODE_3B) {
785
786
787
788
789 for (i = 0; i < config->rx_ring_num; i++) {
790 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
791 struct ring_info *ring = &mac_control->rings[i];
792
793 blk_cnt = rx_cfg->num_rxd /
794 (rxd_count[nic->rxd_mode] + 1);
795 size = sizeof(struct buffAdd *) * blk_cnt;
796 ring->ba = kmalloc(size, GFP_KERNEL);
797 if (!ring->ba)
798 return -ENOMEM;
799 mem_allocated += size;
800 for (j = 0; j < blk_cnt; j++) {
801 int k = 0;
802
803 size = sizeof(struct buffAdd) *
804 (rxd_count[nic->rxd_mode] + 1);
805 ring->ba[j] = kmalloc(size, GFP_KERNEL);
806 if (!ring->ba[j])
807 return -ENOMEM;
808 mem_allocated += size;
809 while (k != rxd_count[nic->rxd_mode]) {
810 ba = &ring->ba[j][k];
811 size = BUF0_LEN + ALIGN_SIZE;
812 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
813 if (!ba->ba_0_org)
814 return -ENOMEM;
815 mem_allocated += size;
816 tmp = (unsigned long)ba->ba_0_org;
817 tmp += ALIGN_SIZE;
818 tmp &= ~((unsigned long)ALIGN_SIZE);
819 ba->ba_0 = (void *)tmp;
820
821 size = BUF1_LEN + ALIGN_SIZE;
822 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
823 if (!ba->ba_1_org)
824 return -ENOMEM;
825 mem_allocated += size;
826 tmp = (unsigned long)ba->ba_1_org;
827 tmp += ALIGN_SIZE;
828 tmp &= ~((unsigned long)ALIGN_SIZE);
829 ba->ba_1 = (void *)tmp;
830 k++;
831 }
832 }
833 }
834 }
835
836
837 size = sizeof(struct stat_block);
838 mac_control->stats_mem =
839 dma_alloc_coherent(&nic->pdev->dev, size,
840 &mac_control->stats_mem_phy, GFP_KERNEL);
841
842 if (!mac_control->stats_mem) {
843
844
845
846
847
848 return -ENOMEM;
849 }
850 mem_allocated += size;
851 mac_control->stats_mem_sz = size;
852
853 tmp_v_addr = mac_control->stats_mem;
854 mac_control->stats_info = tmp_v_addr;
855 memset(tmp_v_addr, 0, size);
856 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
857 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
858 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
859 return SUCCESS;
860}
861
862
863
864
865
866
867
868
869static void free_shared_mem(struct s2io_nic *nic)
870{
871 int i, j, blk_cnt, size;
872 void *tmp_v_addr;
873 dma_addr_t tmp_p_addr;
874 int lst_size, lst_per_page;
875 struct net_device *dev;
876 int page_num = 0;
877 struct config_param *config;
878 struct mac_info *mac_control;
879 struct stat_block *stats;
880 struct swStat *swstats;
881
882 if (!nic)
883 return;
884
885 dev = nic->dev;
886
887 config = &nic->config;
888 mac_control = &nic->mac_control;
889 stats = mac_control->stats_info;
890 swstats = &stats->sw_stat;
891
892 lst_size = sizeof(struct TxD) * config->max_txds;
893 lst_per_page = PAGE_SIZE / lst_size;
894
895 for (i = 0; i < config->tx_fifo_num; i++) {
896 struct fifo_info *fifo = &mac_control->fifos[i];
897 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
898
899 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
900 for (j = 0; j < page_num; j++) {
901 int mem_blks = (j * lst_per_page);
902 struct list_info_hold *fli;
903
904 if (!fifo->list_info)
905 return;
906
907 fli = &fifo->list_info[mem_blks];
908 if (!fli->list_virt_addr)
909 break;
910 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
911 fli->list_virt_addr,
912 fli->list_phy_addr);
913 swstats->mem_freed += PAGE_SIZE;
914 }
915
916
917
918 if (mac_control->zerodma_virt_addr) {
919 dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
920 mac_control->zerodma_virt_addr,
921 (dma_addr_t)0);
922 DBG_PRINT(INIT_DBG,
923 "%s: Freeing TxDL with zero DMA address. "
924 "Virtual address %p\n",
925 dev->name, mac_control->zerodma_virt_addr);
926 swstats->mem_freed += PAGE_SIZE;
927 }
928 kfree(fifo->list_info);
929 swstats->mem_freed += tx_cfg->fifo_len *
930 sizeof(struct list_info_hold);
931 }
932
933 size = SIZE_OF_BLOCK;
934 for (i = 0; i < config->rx_ring_num; i++) {
935 struct ring_info *ring = &mac_control->rings[i];
936
937 blk_cnt = ring->block_count;
938 for (j = 0; j < blk_cnt; j++) {
939 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
940 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
941 if (tmp_v_addr == NULL)
942 break;
943 dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
944 tmp_p_addr);
945 swstats->mem_freed += size;
946 kfree(ring->rx_blocks[j].rxds);
947 swstats->mem_freed += sizeof(struct rxd_info) *
948 rxd_count[nic->rxd_mode];
949 }
950 }
951
952 if (nic->rxd_mode == RXD_MODE_3B) {
953
954 for (i = 0; i < config->rx_ring_num; i++) {
955 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
956 struct ring_info *ring = &mac_control->rings[i];
957
958 blk_cnt = rx_cfg->num_rxd /
959 (rxd_count[nic->rxd_mode] + 1);
960 for (j = 0; j < blk_cnt; j++) {
961 int k = 0;
962 if (!ring->ba[j])
963 continue;
964 while (k != rxd_count[nic->rxd_mode]) {
965 struct buffAdd *ba = &ring->ba[j][k];
966 kfree(ba->ba_0_org);
967 swstats->mem_freed +=
968 BUF0_LEN + ALIGN_SIZE;
969 kfree(ba->ba_1_org);
970 swstats->mem_freed +=
971 BUF1_LEN + ALIGN_SIZE;
972 k++;
973 }
974 kfree(ring->ba[j]);
975 swstats->mem_freed += sizeof(struct buffAdd) *
976 (rxd_count[nic->rxd_mode] + 1);
977 }
978 kfree(ring->ba);
979 swstats->mem_freed += sizeof(struct buffAdd *) *
980 blk_cnt;
981 }
982 }
983
984 for (i = 0; i < nic->config.tx_fifo_num; i++) {
985 struct fifo_info *fifo = &mac_control->fifos[i];
986 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
987
988 if (fifo->ufo_in_band_v) {
989 swstats->mem_freed += tx_cfg->fifo_len *
990 sizeof(u64);
991 kfree(fifo->ufo_in_band_v);
992 }
993 }
994
995 if (mac_control->stats_mem) {
996 swstats->mem_freed += mac_control->stats_mem_sz;
997 dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
998 mac_control->stats_mem,
999 mac_control->stats_mem_phy);
1000 }
1001}
1002
1003
1004
1005
1006
1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008{
1009 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010 register u64 val64 = 0;
1011 int mode;
1012
1013 val64 = readq(&bar0->pci_mode);
1014 mode = (u8)GET_PCI_MODE(val64);
1015
1016 if (val64 & PCI_MODE_UNKNOWN_MODE)
1017 return -1;
1018 return mode;
1019}
1020
1021#define NEC_VENID 0x1033
1022#define NEC_DEVID 0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025 struct pci_dev *tdev = NULL;
1026 for_each_pci_dev(tdev) {
1027 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028 if (tdev->bus == s2io_pdev->bus->parent) {
1029 pci_dev_put(tdev);
1030 return 1;
1031 }
1032 }
1033 }
1034 return 0;
1035}
1036
1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038
1039
1040
1041static int s2io_print_pci_mode(struct s2io_nic *nic)
1042{
1043 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044 register u64 val64 = 0;
1045 int mode;
1046 struct config_param *config = &nic->config;
1047 const char *pcimode;
1048
1049 val64 = readq(&bar0->pci_mode);
1050 mode = (u8)GET_PCI_MODE(val64);
1051
1052 if (val64 & PCI_MODE_UNKNOWN_MODE)
1053 return -1;
1054
1055 config->bus_speed = bus_speed[mode];
1056
1057 if (s2io_on_nec_bridge(nic->pdev)) {
1058 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059 nic->dev->name);
1060 return mode;
1061 }
1062
1063 switch (mode) {
1064 case PCI_MODE_PCI_33:
1065 pcimode = "33MHz PCI bus";
1066 break;
1067 case PCI_MODE_PCI_66:
1068 pcimode = "66MHz PCI bus";
1069 break;
1070 case PCI_MODE_PCIX_M1_66:
1071 pcimode = "66MHz PCIX(M1) bus";
1072 break;
1073 case PCI_MODE_PCIX_M1_100:
1074 pcimode = "100MHz PCIX(M1) bus";
1075 break;
1076 case PCI_MODE_PCIX_M1_133:
1077 pcimode = "133MHz PCIX(M1) bus";
1078 break;
1079 case PCI_MODE_PCIX_M2_66:
1080 pcimode = "133MHz PCIX(M2) bus";
1081 break;
1082 case PCI_MODE_PCIX_M2_100:
1083 pcimode = "200MHz PCIX(M2) bus";
1084 break;
1085 case PCI_MODE_PCIX_M2_133:
1086 pcimode = "266MHz PCIX(M2) bus";
1087 break;
1088 default:
1089 pcimode = "unsupported bus!";
1090 mode = -1;
1091 }
1092
1093 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
1096 return mode;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109static int init_tti(struct s2io_nic *nic, int link)
1110{
1111 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112 register u64 val64 = 0;
1113 int i;
1114 struct config_param *config = &nic->config;
1115
1116 for (i = 0; i < config->tx_fifo_num; i++) {
1117
1118
1119
1120
1121
1122 if (nic->device_type == XFRAME_II_DEVICE) {
1123 int count = (nic->config.bus_speed * 125)/2;
1124 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125 } else
1126 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127
1128 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132 if (i == 0)
1133 if (use_continuous_tx_intrs && (link == LINK_UP))
1134 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135 writeq(val64, &bar0->tti_data1_mem);
1136
1137 if (nic->config.intr_type == MSI_X) {
1138 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141 TTI_DATA2_MEM_TX_UFC_D(0x300);
1142 } else {
1143 if ((nic->config.tx_steering_type ==
1144 TX_DEFAULT_STEERING) &&
1145 (config->tx_fifo_num > 1) &&
1146 (i >= nic->udp_fifo_idx) &&
1147 (i < (nic->udp_fifo_idx +
1148 nic->total_udp_fifos)))
1149 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152 TTI_DATA2_MEM_TX_UFC_D(0x120);
1153 else
1154 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157 TTI_DATA2_MEM_TX_UFC_D(0x80);
1158 }
1159
1160 writeq(val64, &bar0->tti_data2_mem);
1161
1162 val64 = TTI_CMD_MEM_WE |
1163 TTI_CMD_MEM_STROBE_NEW_CMD |
1164 TTI_CMD_MEM_OFFSET(i);
1165 writeq(val64, &bar0->tti_command_mem);
1166
1167 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168 TTI_CMD_MEM_STROBE_NEW_CMD,
1169 S2IO_BIT_RESET) != SUCCESS)
1170 return FAILURE;
1171 }
1172
1173 return SUCCESS;
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185static int init_nic(struct s2io_nic *nic)
1186{
1187 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188 struct net_device *dev = nic->dev;
1189 register u64 val64 = 0;
1190 void __iomem *add;
1191 u32 time;
1192 int i, j;
1193 int dtx_cnt = 0;
1194 unsigned long long mem_share;
1195 int mem_size;
1196 struct config_param *config = &nic->config;
1197 struct mac_info *mac_control = &nic->mac_control;
1198
1199
1200 if (s2io_set_swapper(nic)) {
1201 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202 return -EIO;
1203 }
1204
1205
1206
1207
1208 if (nic->device_type & XFRAME_II_DEVICE) {
1209 val64 = 0xA500000000ULL;
1210 writeq(val64, &bar0->sw_reset);
1211 msleep(500);
1212 val64 = readq(&bar0->sw_reset);
1213 }
1214
1215
1216 val64 = 0;
1217 writeq(val64, &bar0->sw_reset);
1218 msleep(500);
1219 val64 = readq(&bar0->sw_reset);
1220
1221
1222
1223
1224 if (nic->device_type == XFRAME_II_DEVICE) {
1225 for (i = 0; i < 50; i++) {
1226 val64 = readq(&bar0->adapter_status);
1227 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228 break;
1229 msleep(10);
1230 }
1231 if (i == 50)
1232 return -ENODEV;
1233 }
1234
1235
1236 add = &bar0->mac_cfg;
1237 val64 = readq(&bar0->mac_cfg);
1238 val64 |= MAC_RMAC_BCAST_ENABLE;
1239 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240 writel((u32)val64, add);
1241 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242 writel((u32) (val64 >> 32), (add + 4));
1243
1244
1245 val64 = readq(&bar0->mac_int_mask);
1246 val64 = readq(&bar0->mc_int_mask);
1247 val64 = readq(&bar0->xgxs_int_mask);
1248
1249
1250 val64 = dev->mtu;
1251 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252
1253 if (nic->device_type & XFRAME_II_DEVICE) {
1254 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256 &bar0->dtx_control, UF);
1257 if (dtx_cnt & 0x1)
1258 msleep(1);
1259 dtx_cnt++;
1260 }
1261 } else {
1262 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264 &bar0->dtx_control, UF);
1265 val64 = readq(&bar0->dtx_control);
1266 dtx_cnt++;
1267 }
1268 }
1269
1270
1271 val64 = 0;
1272 writeq(val64, &bar0->tx_fifo_partition_0);
1273 writeq(val64, &bar0->tx_fifo_partition_1);
1274 writeq(val64, &bar0->tx_fifo_partition_2);
1275 writeq(val64, &bar0->tx_fifo_partition_3);
1276
1277 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279
1280 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282
1283 if (i == (config->tx_fifo_num - 1)) {
1284 if (i % 2 == 0)
1285 i++;
1286 }
1287
1288 switch (i) {
1289 case 1:
1290 writeq(val64, &bar0->tx_fifo_partition_0);
1291 val64 = 0;
1292 j = 0;
1293 break;
1294 case 3:
1295 writeq(val64, &bar0->tx_fifo_partition_1);
1296 val64 = 0;
1297 j = 0;
1298 break;
1299 case 5:
1300 writeq(val64, &bar0->tx_fifo_partition_2);
1301 val64 = 0;
1302 j = 0;
1303 break;
1304 case 7:
1305 writeq(val64, &bar0->tx_fifo_partition_3);
1306 val64 = 0;
1307 j = 0;
1308 break;
1309 default:
1310 j++;
1311 break;
1312 }
1313 }
1314
1315
1316
1317
1318
1319 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321
1322 val64 = readq(&bar0->tx_fifo_partition_0);
1323 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325
1326
1327
1328
1329
1330 val64 = readq(&bar0->tx_pa_cfg);
1331 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332 TX_PA_CFG_IGNORE_SNAP_OUI |
1333 TX_PA_CFG_IGNORE_LLC_CTRL |
1334 TX_PA_CFG_IGNORE_L2_ERR;
1335 writeq(val64, &bar0->tx_pa_cfg);
1336
1337
1338 val64 = 0;
1339 for (i = 0; i < config->rx_ring_num; i++) {
1340 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341
1342 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343 }
1344 writeq(val64, &bar0->rx_queue_priority);
1345
1346
1347
1348
1349
1350 val64 = 0;
1351 if (nic->device_type & XFRAME_II_DEVICE)
1352 mem_size = 32;
1353 else
1354 mem_size = 64;
1355
1356 for (i = 0; i < config->rx_ring_num; i++) {
1357 switch (i) {
1358 case 0:
1359 mem_share = (mem_size / config->rx_ring_num +
1360 mem_size % config->rx_ring_num);
1361 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362 continue;
1363 case 1:
1364 mem_share = (mem_size / config->rx_ring_num);
1365 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366 continue;
1367 case 2:
1368 mem_share = (mem_size / config->rx_ring_num);
1369 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370 continue;
1371 case 3:
1372 mem_share = (mem_size / config->rx_ring_num);
1373 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374 continue;
1375 case 4:
1376 mem_share = (mem_size / config->rx_ring_num);
1377 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378 continue;
1379 case 5:
1380 mem_share = (mem_size / config->rx_ring_num);
1381 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382 continue;
1383 case 6:
1384 mem_share = (mem_size / config->rx_ring_num);
1385 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386 continue;
1387 case 7:
1388 mem_share = (mem_size / config->rx_ring_num);
1389 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390 continue;
1391 }
1392 }
1393 writeq(val64, &bar0->rx_queue_cfg);
1394
1395
1396
1397
1398
1399 switch (config->tx_fifo_num) {
1400 case 1:
1401 val64 = 0x0;
1402 writeq(val64, &bar0->tx_w_round_robin_0);
1403 writeq(val64, &bar0->tx_w_round_robin_1);
1404 writeq(val64, &bar0->tx_w_round_robin_2);
1405 writeq(val64, &bar0->tx_w_round_robin_3);
1406 writeq(val64, &bar0->tx_w_round_robin_4);
1407 break;
1408 case 2:
1409 val64 = 0x0001000100010001ULL;
1410 writeq(val64, &bar0->tx_w_round_robin_0);
1411 writeq(val64, &bar0->tx_w_round_robin_1);
1412 writeq(val64, &bar0->tx_w_round_robin_2);
1413 writeq(val64, &bar0->tx_w_round_robin_3);
1414 val64 = 0x0001000100000000ULL;
1415 writeq(val64, &bar0->tx_w_round_robin_4);
1416 break;
1417 case 3:
1418 val64 = 0x0001020001020001ULL;
1419 writeq(val64, &bar0->tx_w_round_robin_0);
1420 val64 = 0x0200010200010200ULL;
1421 writeq(val64, &bar0->tx_w_round_robin_1);
1422 val64 = 0x0102000102000102ULL;
1423 writeq(val64, &bar0->tx_w_round_robin_2);
1424 val64 = 0x0001020001020001ULL;
1425 writeq(val64, &bar0->tx_w_round_robin_3);
1426 val64 = 0x0200010200000000ULL;
1427 writeq(val64, &bar0->tx_w_round_robin_4);
1428 break;
1429 case 4:
1430 val64 = 0x0001020300010203ULL;
1431 writeq(val64, &bar0->tx_w_round_robin_0);
1432 writeq(val64, &bar0->tx_w_round_robin_1);
1433 writeq(val64, &bar0->tx_w_round_robin_2);
1434 writeq(val64, &bar0->tx_w_round_robin_3);
1435 val64 = 0x0001020300000000ULL;
1436 writeq(val64, &bar0->tx_w_round_robin_4);
1437 break;
1438 case 5:
1439 val64 = 0x0001020304000102ULL;
1440 writeq(val64, &bar0->tx_w_round_robin_0);
1441 val64 = 0x0304000102030400ULL;
1442 writeq(val64, &bar0->tx_w_round_robin_1);
1443 val64 = 0x0102030400010203ULL;
1444 writeq(val64, &bar0->tx_w_round_robin_2);
1445 val64 = 0x0400010203040001ULL;
1446 writeq(val64, &bar0->tx_w_round_robin_3);
1447 val64 = 0x0203040000000000ULL;
1448 writeq(val64, &bar0->tx_w_round_robin_4);
1449 break;
1450 case 6:
1451 val64 = 0x0001020304050001ULL;
1452 writeq(val64, &bar0->tx_w_round_robin_0);
1453 val64 = 0x0203040500010203ULL;
1454 writeq(val64, &bar0->tx_w_round_robin_1);
1455 val64 = 0x0405000102030405ULL;
1456 writeq(val64, &bar0->tx_w_round_robin_2);
1457 val64 = 0x0001020304050001ULL;
1458 writeq(val64, &bar0->tx_w_round_robin_3);
1459 val64 = 0x0203040500000000ULL;
1460 writeq(val64, &bar0->tx_w_round_robin_4);
1461 break;
1462 case 7:
1463 val64 = 0x0001020304050600ULL;
1464 writeq(val64, &bar0->tx_w_round_robin_0);
1465 val64 = 0x0102030405060001ULL;
1466 writeq(val64, &bar0->tx_w_round_robin_1);
1467 val64 = 0x0203040506000102ULL;
1468 writeq(val64, &bar0->tx_w_round_robin_2);
1469 val64 = 0x0304050600010203ULL;
1470 writeq(val64, &bar0->tx_w_round_robin_3);
1471 val64 = 0x0405060000000000ULL;
1472 writeq(val64, &bar0->tx_w_round_robin_4);
1473 break;
1474 case 8:
1475 val64 = 0x0001020304050607ULL;
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 val64 = 0x0001020300000000ULL;
1481 writeq(val64, &bar0->tx_w_round_robin_4);
1482 break;
1483 }
1484
1485
1486 val64 = readq(&bar0->tx_fifo_partition_0);
1487 val64 |= (TX_FIFO_PARTITION_EN);
1488 writeq(val64, &bar0->tx_fifo_partition_0);
1489
1490
1491
1492
1493
1494 switch (config->rx_ring_num) {
1495 case 1:
1496 val64 = 0x0;
1497 writeq(val64, &bar0->rx_w_round_robin_0);
1498 writeq(val64, &bar0->rx_w_round_robin_1);
1499 writeq(val64, &bar0->rx_w_round_robin_2);
1500 writeq(val64, &bar0->rx_w_round_robin_3);
1501 writeq(val64, &bar0->rx_w_round_robin_4);
1502
1503 val64 = 0x8080808080808080ULL;
1504 writeq(val64, &bar0->rts_qos_steering);
1505 break;
1506 case 2:
1507 val64 = 0x0001000100010001ULL;
1508 writeq(val64, &bar0->rx_w_round_robin_0);
1509 writeq(val64, &bar0->rx_w_round_robin_1);
1510 writeq(val64, &bar0->rx_w_round_robin_2);
1511 writeq(val64, &bar0->rx_w_round_robin_3);
1512 val64 = 0x0001000100000000ULL;
1513 writeq(val64, &bar0->rx_w_round_robin_4);
1514
1515 val64 = 0x8080808040404040ULL;
1516 writeq(val64, &bar0->rts_qos_steering);
1517 break;
1518 case 3:
1519 val64 = 0x0001020001020001ULL;
1520 writeq(val64, &bar0->rx_w_round_robin_0);
1521 val64 = 0x0200010200010200ULL;
1522 writeq(val64, &bar0->rx_w_round_robin_1);
1523 val64 = 0x0102000102000102ULL;
1524 writeq(val64, &bar0->rx_w_round_robin_2);
1525 val64 = 0x0001020001020001ULL;
1526 writeq(val64, &bar0->rx_w_round_robin_3);
1527 val64 = 0x0200010200000000ULL;
1528 writeq(val64, &bar0->rx_w_round_robin_4);
1529
1530 val64 = 0x8080804040402020ULL;
1531 writeq(val64, &bar0->rts_qos_steering);
1532 break;
1533 case 4:
1534 val64 = 0x0001020300010203ULL;
1535 writeq(val64, &bar0->rx_w_round_robin_0);
1536 writeq(val64, &bar0->rx_w_round_robin_1);
1537 writeq(val64, &bar0->rx_w_round_robin_2);
1538 writeq(val64, &bar0->rx_w_round_robin_3);
1539 val64 = 0x0001020300000000ULL;
1540 writeq(val64, &bar0->rx_w_round_robin_4);
1541
1542 val64 = 0x8080404020201010ULL;
1543 writeq(val64, &bar0->rts_qos_steering);
1544 break;
1545 case 5:
1546 val64 = 0x0001020304000102ULL;
1547 writeq(val64, &bar0->rx_w_round_robin_0);
1548 val64 = 0x0304000102030400ULL;
1549 writeq(val64, &bar0->rx_w_round_robin_1);
1550 val64 = 0x0102030400010203ULL;
1551 writeq(val64, &bar0->rx_w_round_robin_2);
1552 val64 = 0x0400010203040001ULL;
1553 writeq(val64, &bar0->rx_w_round_robin_3);
1554 val64 = 0x0203040000000000ULL;
1555 writeq(val64, &bar0->rx_w_round_robin_4);
1556
1557 val64 = 0x8080404020201008ULL;
1558 writeq(val64, &bar0->rts_qos_steering);
1559 break;
1560 case 6:
1561 val64 = 0x0001020304050001ULL;
1562 writeq(val64, &bar0->rx_w_round_robin_0);
1563 val64 = 0x0203040500010203ULL;
1564 writeq(val64, &bar0->rx_w_round_robin_1);
1565 val64 = 0x0405000102030405ULL;
1566 writeq(val64, &bar0->rx_w_round_robin_2);
1567 val64 = 0x0001020304050001ULL;
1568 writeq(val64, &bar0->rx_w_round_robin_3);
1569 val64 = 0x0203040500000000ULL;
1570 writeq(val64, &bar0->rx_w_round_robin_4);
1571
1572 val64 = 0x8080404020100804ULL;
1573 writeq(val64, &bar0->rts_qos_steering);
1574 break;
1575 case 7:
1576 val64 = 0x0001020304050600ULL;
1577 writeq(val64, &bar0->rx_w_round_robin_0);
1578 val64 = 0x0102030405060001ULL;
1579 writeq(val64, &bar0->rx_w_round_robin_1);
1580 val64 = 0x0203040506000102ULL;
1581 writeq(val64, &bar0->rx_w_round_robin_2);
1582 val64 = 0x0304050600010203ULL;
1583 writeq(val64, &bar0->rx_w_round_robin_3);
1584 val64 = 0x0405060000000000ULL;
1585 writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587 val64 = 0x8080402010080402ULL;
1588 writeq(val64, &bar0->rts_qos_steering);
1589 break;
1590 case 8:
1591 val64 = 0x0001020304050607ULL;
1592 writeq(val64, &bar0->rx_w_round_robin_0);
1593 writeq(val64, &bar0->rx_w_round_robin_1);
1594 writeq(val64, &bar0->rx_w_round_robin_2);
1595 writeq(val64, &bar0->rx_w_round_robin_3);
1596 val64 = 0x0001020300000000ULL;
1597 writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599 val64 = 0x8040201008040201ULL;
1600 writeq(val64, &bar0->rts_qos_steering);
1601 break;
1602 }
1603
1604
1605 val64 = 0;
1606 for (i = 0; i < 8; i++)
1607 writeq(val64, &bar0->rts_frm_len_n[i]);
1608
1609
1610 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611 for (i = 0 ; i < config->rx_ring_num ; i++)
1612 writeq(val64, &bar0->rts_frm_len_n[i]);
1613
1614
1615
1616
1617 for (i = 0; i < config->rx_ring_num; i++) {
1618
1619
1620
1621
1622
1623
1624 if (rts_frm_len[i] != 0) {
1625 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626 &bar0->rts_frm_len_n[i]);
1627 }
1628 }
1629
1630
1631 for (i = 0; i < 64; i++) {
1632 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633 DBG_PRINT(ERR_DBG,
1634 "%s: rts_ds_steer failed on codepoint %d\n",
1635 dev->name, i);
1636 return -ENODEV;
1637 }
1638 }
1639
1640
1641 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642
1643 if (nic->device_type == XFRAME_II_DEVICE) {
1644 val64 = STAT_BC(0x320);
1645 writeq(val64, &bar0->stat_byte_cnt);
1646 }
1647
1648
1649
1650
1651
1652 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654 writeq(val64, &bar0->mac_link_util);
1655
1656
1657
1658
1659
1660
1661
1662 if (SUCCESS != init_tti(nic, nic->last_link_state))
1663 return -ENODEV;
1664
1665
1666 if (nic->device_type == XFRAME_II_DEVICE) {
1667
1668
1669
1670
1671 int count = (nic->config.bus_speed * 125)/4;
1672 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673 } else
1674 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679
1680 writeq(val64, &bar0->rti_data1_mem);
1681
1682 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684 if (nic->config.intr_type == MSI_X)
1685 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686 RTI_DATA2_MEM_RX_UFC_D(0x40));
1687 else
1688 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689 RTI_DATA2_MEM_RX_UFC_D(0x80));
1690 writeq(val64, &bar0->rti_data2_mem);
1691
1692 for (i = 0; i < config->rx_ring_num; i++) {
1693 val64 = RTI_CMD_MEM_WE |
1694 RTI_CMD_MEM_STROBE_NEW_CMD |
1695 RTI_CMD_MEM_OFFSET(i);
1696 writeq(val64, &bar0->rti_command_mem);
1697
1698
1699
1700
1701
1702
1703
1704
1705 time = 0;
1706 while (true) {
1707 val64 = readq(&bar0->rti_command_mem);
1708 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709 break;
1710
1711 if (time > 10) {
1712 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713 dev->name);
1714 return -ENODEV;
1715 }
1716 time++;
1717 msleep(50);
1718 }
1719 }
1720
1721
1722
1723
1724
1725 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727
1728
1729 add = &bar0->mac_cfg;
1730 val64 = readq(&bar0->mac_cfg);
1731 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733 writel((u32) (val64), add);
1734 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735 writel((u32) (val64 >> 32), (add + 4));
1736 val64 = readq(&bar0->mac_cfg);
1737
1738
1739 add = &bar0->mac_cfg;
1740 val64 = readq(&bar0->mac_cfg);
1741 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742 if (nic->device_type == XFRAME_II_DEVICE)
1743 writeq(val64, &bar0->mac_cfg);
1744 else {
1745 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746 writel((u32) (val64), add);
1747 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748 writel((u32) (val64 >> 32), (add + 4));
1749 }
1750
1751
1752
1753
1754
1755 val64 = readq(&bar0->rmac_pause_cfg);
1756 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758 writeq(val64, &bar0->rmac_pause_cfg);
1759
1760
1761
1762
1763
1764
1765
1766 val64 = 0;
1767 for (i = 0; i < 4; i++) {
1768 val64 |= (((u64)0xFF00 |
1769 nic->mac_control.mc_pause_threshold_q0q3)
1770 << (i * 2 * 8));
1771 }
1772 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773
1774 val64 = 0;
1775 for (i = 0; i < 4; i++) {
1776 val64 |= (((u64)0xFF00 |
1777 nic->mac_control.mc_pause_threshold_q4q7)
1778 << (i * 2 * 8));
1779 }
1780 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781
1782
1783
1784
1785
1786 val64 = readq(&bar0->pic_control);
1787 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788 writeq(val64, &bar0->pic_control);
1789
1790 if (nic->config.bus_speed == 266) {
1791 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792 writeq(0x0, &bar0->read_retry_delay);
1793 writeq(0x0, &bar0->write_retry_delay);
1794 }
1795
1796
1797
1798
1799
1800 if (nic->device_type == XFRAME_II_DEVICE) {
1801 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802 MISC_LINK_STABILITY_PRD(3);
1803 writeq(val64, &bar0->misc_control);
1804 val64 = readq(&bar0->pic_control2);
1805 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806 writeq(val64, &bar0->pic_control2);
1807 }
1808 if (strstr(nic->product_name, "CX4")) {
1809 val64 = TMAC_AVG_IPG(0x17);
1810 writeq(val64, &bar0->tmac_avg_ipg);
1811 }
1812
1813 return SUCCESS;
1814}
1815#define LINK_UP_DOWN_INTERRUPT 1
1816#define MAC_RMAC_ERR_TIMER 2
1817
1818static int s2io_link_fault_indication(struct s2io_nic *nic)
1819{
1820 if (nic->device_type == XFRAME_II_DEVICE)
1821 return LINK_UP_DOWN_INTERRUPT;
1822 else
1823 return MAC_RMAC_ERR_TIMER;
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836{
1837 u64 temp64;
1838
1839 temp64 = readq(addr);
1840
1841 if (flag == ENABLE_INTRS)
1842 temp64 &= ~((u64)value);
1843 else
1844 temp64 |= ((u64)value);
1845 writeq(temp64, addr);
1846}
1847
1848static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849{
1850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851 register u64 gen_int_mask = 0;
1852 u64 interruptible;
1853
1854 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855 if (mask & TX_DMA_INTR) {
1856 gen_int_mask |= TXDMA_INT_M;
1857
1858 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859 TXDMA_PCC_INT | TXDMA_TTI_INT |
1860 TXDMA_LSO_INT | TXDMA_TPA_INT |
1861 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862
1863 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866 &bar0->pfc_err_mask);
1867
1868 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871
1872 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874 PCC_N_SERR | PCC_6_COF_OV_ERR |
1875 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877 PCC_TXB_ECC_SG_ERR,
1878 flag, &bar0->pcc_err_mask);
1879
1880 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882
1883 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886 flag, &bar0->lso_err_mask);
1887
1888 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889 flag, &bar0->tpa_err_mask);
1890
1891 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892 }
1893
1894 if (mask & TX_MAC_INTR) {
1895 gen_int_mask |= TXMAC_INT_M;
1896 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897 &bar0->mac_int_mask);
1898 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901 flag, &bar0->mac_tmac_err_mask);
1902 }
1903
1904 if (mask & TX_XGXS_INTR) {
1905 gen_int_mask |= TXXGXS_INT_M;
1906 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907 &bar0->xgxs_int_mask);
1908 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910 flag, &bar0->xgxs_txgxs_err_mask);
1911 }
1912
1913 if (mask & RX_DMA_INTR) {
1914 gen_int_mask |= RXDMA_INT_M;
1915 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917 flag, &bar0->rxdma_int_mask);
1918 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925 &bar0->prc_pcix_err_mask);
1926 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928 &bar0->rpa_err_mask);
1929 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932 RDA_FRM_ECC_SG_ERR |
1933 RDA_MISC_ERR|RDA_PCIX_ERR,
1934 flag, &bar0->rda_err_mask);
1935 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937 flag, &bar0->rti_err_mask);
1938 }
1939
1940 if (mask & RX_MAC_INTR) {
1941 gen_int_mask |= RXMAC_INT_M;
1942 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943 &bar0->mac_int_mask);
1944 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946 RMAC_DOUBLE_ECC_ERR);
1947 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949 do_s2io_write_bits(interruptible,
1950 flag, &bar0->mac_rmac_err_mask);
1951 }
1952
1953 if (mask & RX_XGXS_INTR) {
1954 gen_int_mask |= RXXGXS_INT_M;
1955 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956 &bar0->xgxs_int_mask);
1957 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958 &bar0->xgxs_rxgxs_err_mask);
1959 }
1960
1961 if (mask & MC_INTR) {
1962 gen_int_mask |= MC_INT_M;
1963 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964 flag, &bar0->mc_int_mask);
1965 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967 &bar0->mc_err_mask);
1968 }
1969 nic->general_int_mask = gen_int_mask;
1970
1971
1972 nic->general_int_mask = 0;
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987{
1988 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989 register u64 temp64 = 0, intr_mask = 0;
1990
1991 intr_mask = nic->general_int_mask;
1992
1993
1994
1995 if (mask & TX_PIC_INTR) {
1996
1997 intr_mask |= TXPIC_INT_M;
1998 if (flag == ENABLE_INTRS) {
1999
2000
2001
2002
2003
2004
2005 if (s2io_link_fault_indication(nic) ==
2006 LINK_UP_DOWN_INTERRUPT) {
2007 do_s2io_write_bits(PIC_INT_GPIO, flag,
2008 &bar0->pic_int_mask);
2009 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010 &bar0->gpio_int_mask);
2011 } else
2012 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013 } else if (flag == DISABLE_INTRS) {
2014
2015
2016
2017
2018 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019 }
2020 }
2021
2022
2023 if (mask & TX_TRAFFIC_INTR) {
2024 intr_mask |= TXTRAFFIC_INT_M;
2025 if (flag == ENABLE_INTRS) {
2026
2027
2028
2029
2030 writeq(0x0, &bar0->tx_traffic_mask);
2031 } else if (flag == DISABLE_INTRS) {
2032
2033
2034
2035
2036 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037 }
2038 }
2039
2040
2041 if (mask & RX_TRAFFIC_INTR) {
2042 intr_mask |= RXTRAFFIC_INT_M;
2043 if (flag == ENABLE_INTRS) {
2044
2045 writeq(0x0, &bar0->rx_traffic_mask);
2046 } else if (flag == DISABLE_INTRS) {
2047
2048
2049
2050
2051 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052 }
2053 }
2054
2055 temp64 = readq(&bar0->general_int_mask);
2056 if (flag == ENABLE_INTRS)
2057 temp64 &= ~((u64)intr_mask);
2058 else
2059 temp64 = DISABLE_ALL_INTRS;
2060 writeq(temp64, &bar0->general_int_mask);
2061
2062 nic->general_int_mask = readq(&bar0->general_int_mask);
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2074{
2075 int ret = 0, herc;
2076 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2077 u64 val64 = readq(&bar0->adapter_status);
2078
2079 herc = (sp->device_type == XFRAME_II_DEVICE);
2080
2081 if (flag == false) {
2082 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2083 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2084 ret = 1;
2085 } else {
2086 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2087 ret = 1;
2088 }
2089 } else {
2090 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2091 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2092 ADAPTER_STATUS_RMAC_PCC_IDLE))
2093 ret = 1;
2094 } else {
2095 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2096 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2097 ret = 1;
2098 }
2099 }
2100
2101 return ret;
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static int verify_xena_quiescence(struct s2io_nic *sp)
2116{
2117 int mode;
2118 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2119 u64 val64 = readq(&bar0->adapter_status);
2120 mode = s2io_verify_pci_mode(sp);
2121
2122 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2123 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2124 return 0;
2125 }
2126 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2127 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2128 return 0;
2129 }
2130 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2131 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2132 return 0;
2133 }
2134 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2135 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2136 return 0;
2137 }
2138 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2139 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2140 return 0;
2141 }
2142 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2143 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2144 return 0;
2145 }
2146 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2147 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2148 return 0;
2149 }
2150 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2151 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2152 return 0;
2153 }
2154
2155
2156
2157
2158
2159
2160 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2161 sp->device_type == XFRAME_II_DEVICE &&
2162 mode != PCI_MODE_PCI_33) {
2163 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2164 return 0;
2165 }
2166 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2167 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2168 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2169 return 0;
2170 }
2171 return 1;
2172}
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182static void fix_mac_address(struct s2io_nic *sp)
2183{
2184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185 int i = 0;
2186
2187 while (fix_mac[i] != END_SIGN) {
2188 writeq(fix_mac[i++], &bar0->gpio_control);
2189 udelay(10);
2190 (void) readq(&bar0->gpio_control);
2191 }
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static int start_nic(struct s2io_nic *nic)
2208{
2209 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2210 struct net_device *dev = nic->dev;
2211 register u64 val64 = 0;
2212 u16 subid, i;
2213 struct config_param *config = &nic->config;
2214 struct mac_info *mac_control = &nic->mac_control;
2215
2216
2217 for (i = 0; i < config->rx_ring_num; i++) {
2218 struct ring_info *ring = &mac_control->rings[i];
2219
2220 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2221 &bar0->prc_rxd0_n[i]);
2222
2223 val64 = readq(&bar0->prc_ctrl_n[i]);
2224 if (nic->rxd_mode == RXD_MODE_1)
2225 val64 |= PRC_CTRL_RC_ENABLED;
2226 else
2227 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2228 if (nic->device_type == XFRAME_II_DEVICE)
2229 val64 |= PRC_CTRL_GROUP_READS;
2230 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2231 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2232 writeq(val64, &bar0->prc_ctrl_n[i]);
2233 }
2234
2235 if (nic->rxd_mode == RXD_MODE_3B) {
2236
2237 val64 = readq(&bar0->rx_pa_cfg);
2238 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2239 writeq(val64, &bar0->rx_pa_cfg);
2240 }
2241
2242 if (vlan_tag_strip == 0) {
2243 val64 = readq(&bar0->rx_pa_cfg);
2244 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2245 writeq(val64, &bar0->rx_pa_cfg);
2246 nic->vlan_strip_flag = 0;
2247 }
2248
2249
2250
2251
2252
2253
2254 val64 = readq(&bar0->mc_rldram_mrs);
2255 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2256 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2257 val64 = readq(&bar0->mc_rldram_mrs);
2258
2259 msleep(100);
2260
2261
2262 val64 = readq(&bar0->adapter_control);
2263 val64 &= ~ADAPTER_ECC_EN;
2264 writeq(val64, &bar0->adapter_control);
2265
2266
2267
2268
2269
2270 val64 = readq(&bar0->adapter_status);
2271 if (!verify_xena_quiescence(nic)) {
2272 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2273 "Adapter status reads: 0x%llx\n",
2274 dev->name, (unsigned long long)val64);
2275 return FAILURE;
2276 }
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287 val64 = readq(&bar0->adapter_control);
2288 val64 |= ADAPTER_EOI_TX_ON;
2289 writeq(val64, &bar0->adapter_control);
2290
2291 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2292
2293
2294
2295
2296 schedule_work(&nic->set_link_task);
2297 }
2298
2299 subid = nic->pdev->subsystem_device;
2300 if (((subid & 0xFF) >= 0x07) &&
2301 (nic->device_type == XFRAME_I_DEVICE)) {
2302 val64 = readq(&bar0->gpio_control);
2303 val64 |= 0x0000800000000000ULL;
2304 writeq(val64, &bar0->gpio_control);
2305 val64 = 0x0411040400000000ULL;
2306 writeq(val64, (void __iomem *)bar0 + 0x2700);
2307 }
2308
2309 return SUCCESS;
2310}
2311
2312
2313
2314
2315
2316
2317static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2318 struct TxD *txdlp, int get_off)
2319{
2320 struct s2io_nic *nic = fifo_data->nic;
2321 struct sk_buff *skb;
2322 struct TxD *txds;
2323 u16 j, frg_cnt;
2324
2325 txds = txdlp;
2326 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2327 dma_unmap_single(&nic->pdev->dev,
2328 (dma_addr_t)txds->Buffer_Pointer,
2329 sizeof(u64), DMA_TO_DEVICE);
2330 txds++;
2331 }
2332
2333 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2334 if (!skb) {
2335 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2336 return NULL;
2337 }
2338 dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2339 skb_headlen(skb), DMA_TO_DEVICE);
2340 frg_cnt = skb_shinfo(skb)->nr_frags;
2341 if (frg_cnt) {
2342 txds++;
2343 for (j = 0; j < frg_cnt; j++, txds++) {
2344 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2345 if (!txds->Buffer_Pointer)
2346 break;
2347 dma_unmap_page(&nic->pdev->dev,
2348 (dma_addr_t)txds->Buffer_Pointer,
2349 skb_frag_size(frag), DMA_TO_DEVICE);
2350 }
2351 }
2352 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353 return skb;
2354}
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364static void free_tx_buffers(struct s2io_nic *nic)
2365{
2366 struct net_device *dev = nic->dev;
2367 struct sk_buff *skb;
2368 struct TxD *txdp;
2369 int i, j;
2370 int cnt = 0;
2371 struct config_param *config = &nic->config;
2372 struct mac_info *mac_control = &nic->mac_control;
2373 struct stat_block *stats = mac_control->stats_info;
2374 struct swStat *swstats = &stats->sw_stat;
2375
2376 for (i = 0; i < config->tx_fifo_num; i++) {
2377 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378 struct fifo_info *fifo = &mac_control->fifos[i];
2379 unsigned long flags;
2380
2381 spin_lock_irqsave(&fifo->tx_lock, flags);
2382 for (j = 0; j < tx_cfg->fifo_len; j++) {
2383 txdp = fifo->list_info[j].list_virt_addr;
2384 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385 if (skb) {
2386 swstats->mem_freed += skb->truesize;
2387 dev_kfree_skb(skb);
2388 cnt++;
2389 }
2390 }
2391 DBG_PRINT(INTR_DBG,
2392 "%s: forcibly freeing %d skbs on FIFO%d\n",
2393 dev->name, cnt, i);
2394 fifo->tx_curr_get_info.offset = 0;
2395 fifo->tx_curr_put_info.offset = 0;
2396 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2397 }
2398}
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410static void stop_nic(struct s2io_nic *nic)
2411{
2412 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2413 register u64 val64 = 0;
2414 u16 interruptible;
2415
2416
2417 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2418 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2419 interruptible |= TX_PIC_INTR;
2420 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421
2422
2423 val64 = readq(&bar0->adapter_control);
2424 val64 &= ~(ADAPTER_CNTL_EN);
2425 writeq(val64, &bar0->adapter_control);
2426}
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2452 int from_card_up)
2453{
2454 struct sk_buff *skb;
2455 struct RxD_t *rxdp;
2456 int off, size, block_no, block_no1;
2457 u32 alloc_tab = 0;
2458 u32 alloc_cnt;
2459 u64 tmp;
2460 struct buffAdd *ba;
2461 struct RxD_t *first_rxdp = NULL;
2462 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2463 struct RxD1 *rxdp1;
2464 struct RxD3 *rxdp3;
2465 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2466
2467 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2468
2469 block_no1 = ring->rx_curr_get_info.block_index;
2470 while (alloc_tab < alloc_cnt) {
2471 block_no = ring->rx_curr_put_info.block_index;
2472
2473 off = ring->rx_curr_put_info.offset;
2474
2475 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2476
2477 if ((block_no == block_no1) &&
2478 (off == ring->rx_curr_get_info.offset) &&
2479 (rxdp->Host_Control)) {
2480 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2481 ring->dev->name);
2482 goto end;
2483 }
2484 if (off && (off == ring->rxd_count)) {
2485 ring->rx_curr_put_info.block_index++;
2486 if (ring->rx_curr_put_info.block_index ==
2487 ring->block_count)
2488 ring->rx_curr_put_info.block_index = 0;
2489 block_no = ring->rx_curr_put_info.block_index;
2490 off = 0;
2491 ring->rx_curr_put_info.offset = off;
2492 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2493 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2494 ring->dev->name, rxdp);
2495
2496 }
2497
2498 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2499 ((ring->rxd_mode == RXD_MODE_3B) &&
2500 (rxdp->Control_2 & s2BIT(0)))) {
2501 ring->rx_curr_put_info.offset = off;
2502 goto end;
2503 }
2504
2505 size = ring->mtu +
2506 HEADER_ETHERNET_II_802_3_SIZE +
2507 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2508 if (ring->rxd_mode == RXD_MODE_1)
2509 size += NET_IP_ALIGN;
2510 else
2511 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2512
2513
2514 skb = netdev_alloc_skb(nic->dev, size);
2515 if (!skb) {
2516 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2517 ring->dev->name);
2518 if (first_rxdp) {
2519 dma_wmb();
2520 first_rxdp->Control_1 |= RXD_OWN_XENA;
2521 }
2522 swstats->mem_alloc_fail_cnt++;
2523
2524 return -ENOMEM ;
2525 }
2526 swstats->mem_allocated += skb->truesize;
2527
2528 if (ring->rxd_mode == RXD_MODE_1) {
2529
2530 rxdp1 = (struct RxD1 *)rxdp;
2531 memset(rxdp, 0, sizeof(struct RxD1));
2532 skb_reserve(skb, NET_IP_ALIGN);
2533 rxdp1->Buffer0_ptr =
2534 dma_map_single(&ring->pdev->dev, skb->data,
2535 size - NET_IP_ALIGN,
2536 DMA_FROM_DEVICE);
2537 if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2538 goto pci_map_failed;
2539
2540 rxdp->Control_2 =
2541 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2542 rxdp->Host_Control = (unsigned long)skb;
2543 } else if (ring->rxd_mode == RXD_MODE_3B) {
2544
2545
2546
2547
2548
2549
2550 rxdp3 = (struct RxD3 *)rxdp;
2551
2552 Buffer0_ptr = rxdp3->Buffer0_ptr;
2553 Buffer1_ptr = rxdp3->Buffer1_ptr;
2554 memset(rxdp, 0, sizeof(struct RxD3));
2555
2556 rxdp3->Buffer0_ptr = Buffer0_ptr;
2557 rxdp3->Buffer1_ptr = Buffer1_ptr;
2558
2559 ba = &ring->ba[block_no][off];
2560 skb_reserve(skb, BUF0_LEN);
2561 tmp = (u64)(unsigned long)skb->data;
2562 tmp += ALIGN_SIZE;
2563 tmp &= ~ALIGN_SIZE;
2564 skb->data = (void *) (unsigned long)tmp;
2565 skb_reset_tail_pointer(skb);
2566
2567 if (from_card_up) {
2568 rxdp3->Buffer0_ptr =
2569 dma_map_single(&ring->pdev->dev,
2570 ba->ba_0, BUF0_LEN,
2571 DMA_FROM_DEVICE);
2572 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2573 goto pci_map_failed;
2574 } else
2575 dma_sync_single_for_device(&ring->pdev->dev,
2576 (dma_addr_t)rxdp3->Buffer0_ptr,
2577 BUF0_LEN,
2578 DMA_FROM_DEVICE);
2579
2580 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2581 if (ring->rxd_mode == RXD_MODE_3B) {
2582
2583
2584
2585
2586
2587
2588 rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2589 skb->data,
2590 ring->mtu + 4,
2591 DMA_FROM_DEVICE);
2592
2593 if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2594 goto pci_map_failed;
2595
2596 if (from_card_up) {
2597 rxdp3->Buffer1_ptr =
2598 dma_map_single(&ring->pdev->dev,
2599 ba->ba_1,
2600 BUF1_LEN,
2601 DMA_FROM_DEVICE);
2602
2603 if (dma_mapping_error(&nic->pdev->dev,
2604 rxdp3->Buffer1_ptr)) {
2605 dma_unmap_single(&ring->pdev->dev,
2606 (dma_addr_t)(unsigned long)
2607 skb->data,
2608 ring->mtu + 4,
2609 DMA_FROM_DEVICE);
2610 goto pci_map_failed;
2611 }
2612 }
2613 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2614 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2615 (ring->mtu + 4);
2616 }
2617 rxdp->Control_2 |= s2BIT(0);
2618 rxdp->Host_Control = (unsigned long) (skb);
2619 }
2620 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2621 rxdp->Control_1 |= RXD_OWN_XENA;
2622 off++;
2623 if (off == (ring->rxd_count + 1))
2624 off = 0;
2625 ring->rx_curr_put_info.offset = off;
2626
2627 rxdp->Control_2 |= SET_RXD_MARKER;
2628 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2629 if (first_rxdp) {
2630 dma_wmb();
2631 first_rxdp->Control_1 |= RXD_OWN_XENA;
2632 }
2633 first_rxdp = rxdp;
2634 }
2635 ring->rx_bufs_left += 1;
2636 alloc_tab++;
2637 }
2638
2639end:
2640
2641
2642
2643
2644 if (first_rxdp) {
2645 dma_wmb();
2646 first_rxdp->Control_1 |= RXD_OWN_XENA;
2647 }
2648
2649 return SUCCESS;
2650
2651pci_map_failed:
2652 swstats->pci_map_fail_cnt++;
2653 swstats->mem_freed += skb->truesize;
2654 dev_kfree_skb_irq(skb);
2655 return -ENOMEM;
2656}
2657
2658static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2659{
2660 struct net_device *dev = sp->dev;
2661 int j;
2662 struct sk_buff *skb;
2663 struct RxD_t *rxdp;
2664 struct RxD1 *rxdp1;
2665 struct RxD3 *rxdp3;
2666 struct mac_info *mac_control = &sp->mac_control;
2667 struct stat_block *stats = mac_control->stats_info;
2668 struct swStat *swstats = &stats->sw_stat;
2669
2670 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2671 rxdp = mac_control->rings[ring_no].
2672 rx_blocks[blk].rxds[j].virt_addr;
2673 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2674 if (!skb)
2675 continue;
2676 if (sp->rxd_mode == RXD_MODE_1) {
2677 rxdp1 = (struct RxD1 *)rxdp;
2678 dma_unmap_single(&sp->pdev->dev,
2679 (dma_addr_t)rxdp1->Buffer0_ptr,
2680 dev->mtu +
2681 HEADER_ETHERNET_II_802_3_SIZE +
2682 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2683 DMA_FROM_DEVICE);
2684 memset(rxdp, 0, sizeof(struct RxD1));
2685 } else if (sp->rxd_mode == RXD_MODE_3B) {
2686 rxdp3 = (struct RxD3 *)rxdp;
2687 dma_unmap_single(&sp->pdev->dev,
2688 (dma_addr_t)rxdp3->Buffer0_ptr,
2689 BUF0_LEN, DMA_FROM_DEVICE);
2690 dma_unmap_single(&sp->pdev->dev,
2691 (dma_addr_t)rxdp3->Buffer1_ptr,
2692 BUF1_LEN, DMA_FROM_DEVICE);
2693 dma_unmap_single(&sp->pdev->dev,
2694 (dma_addr_t)rxdp3->Buffer2_ptr,
2695 dev->mtu + 4, DMA_FROM_DEVICE);
2696 memset(rxdp, 0, sizeof(struct RxD3));
2697 }
2698 swstats->mem_freed += skb->truesize;
2699 dev_kfree_skb(skb);
2700 mac_control->rings[ring_no].rx_bufs_left -= 1;
2701 }
2702}
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713static void free_rx_buffers(struct s2io_nic *sp)
2714{
2715 struct net_device *dev = sp->dev;
2716 int i, blk = 0, buf_cnt = 0;
2717 struct config_param *config = &sp->config;
2718 struct mac_info *mac_control = &sp->mac_control;
2719
2720 for (i = 0; i < config->rx_ring_num; i++) {
2721 struct ring_info *ring = &mac_control->rings[i];
2722
2723 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2724 free_rxd_blk(sp, i, blk);
2725
2726 ring->rx_curr_put_info.block_index = 0;
2727 ring->rx_curr_get_info.block_index = 0;
2728 ring->rx_curr_put_info.offset = 0;
2729 ring->rx_curr_get_info.offset = 0;
2730 ring->rx_bufs_left = 0;
2731 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2732 dev->name, buf_cnt, i);
2733 }
2734}
2735
2736static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2737{
2738 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2739 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2740 ring->dev->name);
2741 }
2742 return 0;
2743}
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758static int s2io_poll_msix(struct napi_struct *napi, int budget)
2759{
2760 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2761 struct net_device *dev = ring->dev;
2762 int pkts_processed = 0;
2763 u8 __iomem *addr = NULL;
2764 u8 val8 = 0;
2765 struct s2io_nic *nic = netdev_priv(dev);
2766 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2767 int budget_org = budget;
2768
2769 if (unlikely(!is_s2io_card_up(nic)))
2770 return 0;
2771
2772 pkts_processed = rx_intr_handler(ring, budget);
2773 s2io_chk_rx_buffers(nic, ring);
2774
2775 if (pkts_processed < budget_org) {
2776 napi_complete_done(napi, pkts_processed);
2777
2778 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2779 addr += 7 - ring->ring_no;
2780 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2781 writeb(val8, addr);
2782 val8 = readb(addr);
2783 }
2784 return pkts_processed;
2785}
2786
2787static int s2io_poll_inta(struct napi_struct *napi, int budget)
2788{
2789 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2790 int pkts_processed = 0;
2791 int ring_pkts_processed, i;
2792 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2793 int budget_org = budget;
2794 struct config_param *config = &nic->config;
2795 struct mac_info *mac_control = &nic->mac_control;
2796
2797 if (unlikely(!is_s2io_card_up(nic)))
2798 return 0;
2799
2800 for (i = 0; i < config->rx_ring_num; i++) {
2801 struct ring_info *ring = &mac_control->rings[i];
2802 ring_pkts_processed = rx_intr_handler(ring, budget);
2803 s2io_chk_rx_buffers(nic, ring);
2804 pkts_processed += ring_pkts_processed;
2805 budget -= ring_pkts_processed;
2806 if (budget <= 0)
2807 break;
2808 }
2809 if (pkts_processed < budget_org) {
2810 napi_complete_done(napi, pkts_processed);
2811
2812 writeq(0, &bar0->rx_traffic_mask);
2813 readl(&bar0->rx_traffic_mask);
2814 }
2815 return pkts_processed;
2816}
2817
2818#ifdef CONFIG_NET_POLL_CONTROLLER
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828static void s2io_netpoll(struct net_device *dev)
2829{
2830 struct s2io_nic *nic = netdev_priv(dev);
2831 const int irq = nic->pdev->irq;
2832 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2833 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2834 int i;
2835 struct config_param *config = &nic->config;
2836 struct mac_info *mac_control = &nic->mac_control;
2837
2838 if (pci_channel_offline(nic->pdev))
2839 return;
2840
2841 disable_irq(irq);
2842
2843 writeq(val64, &bar0->rx_traffic_int);
2844 writeq(val64, &bar0->tx_traffic_int);
2845
2846
2847
2848
2849
2850 for (i = 0; i < config->tx_fifo_num; i++)
2851 tx_intr_handler(&mac_control->fifos[i]);
2852
2853
2854 for (i = 0; i < config->rx_ring_num; i++) {
2855 struct ring_info *ring = &mac_control->rings[i];
2856
2857 rx_intr_handler(ring, 0);
2858 }
2859
2860 for (i = 0; i < config->rx_ring_num; i++) {
2861 struct ring_info *ring = &mac_control->rings[i];
2862
2863 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2864 DBG_PRINT(INFO_DBG,
2865 "%s: Out of memory in Rx Netpoll!!\n",
2866 dev->name);
2867 break;
2868 }
2869 }
2870 enable_irq(irq);
2871}
2872#endif
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887static int rx_intr_handler(struct ring_info *ring_data, int budget)
2888{
2889 int get_block, put_block;
2890 struct rx_curr_get_info get_info, put_info;
2891 struct RxD_t *rxdp;
2892 struct sk_buff *skb;
2893 int pkt_cnt = 0, napi_pkts = 0;
2894 int i;
2895 struct RxD1 *rxdp1;
2896 struct RxD3 *rxdp3;
2897
2898 if (budget <= 0)
2899 return napi_pkts;
2900
2901 get_info = ring_data->rx_curr_get_info;
2902 get_block = get_info.block_index;
2903 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2904 put_block = put_info.block_index;
2905 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2906
2907 while (RXD_IS_UP2DT(rxdp)) {
2908
2909
2910
2911
2912 if ((get_block == put_block) &&
2913 (get_info.offset + 1) == put_info.offset) {
2914 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2915 ring_data->dev->name);
2916 break;
2917 }
2918 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2919 if (skb == NULL) {
2920 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2921 ring_data->dev->name);
2922 return 0;
2923 }
2924 if (ring_data->rxd_mode == RXD_MODE_1) {
2925 rxdp1 = (struct RxD1 *)rxdp;
2926 dma_unmap_single(&ring_data->pdev->dev,
2927 (dma_addr_t)rxdp1->Buffer0_ptr,
2928 ring_data->mtu +
2929 HEADER_ETHERNET_II_802_3_SIZE +
2930 HEADER_802_2_SIZE +
2931 HEADER_SNAP_SIZE,
2932 DMA_FROM_DEVICE);
2933 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2934 rxdp3 = (struct RxD3 *)rxdp;
2935 dma_sync_single_for_cpu(&ring_data->pdev->dev,
2936 (dma_addr_t)rxdp3->Buffer0_ptr,
2937 BUF0_LEN, DMA_FROM_DEVICE);
2938 dma_unmap_single(&ring_data->pdev->dev,
2939 (dma_addr_t)rxdp3->Buffer2_ptr,
2940 ring_data->mtu + 4, DMA_FROM_DEVICE);
2941 }
2942 prefetch(skb->data);
2943 rx_osm_handler(ring_data, rxdp);
2944 get_info.offset++;
2945 ring_data->rx_curr_get_info.offset = get_info.offset;
2946 rxdp = ring_data->rx_blocks[get_block].
2947 rxds[get_info.offset].virt_addr;
2948 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2949 get_info.offset = 0;
2950 ring_data->rx_curr_get_info.offset = get_info.offset;
2951 get_block++;
2952 if (get_block == ring_data->block_count)
2953 get_block = 0;
2954 ring_data->rx_curr_get_info.block_index = get_block;
2955 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2956 }
2957
2958 if (ring_data->nic->config.napi) {
2959 budget--;
2960 napi_pkts++;
2961 if (!budget)
2962 break;
2963 }
2964 pkt_cnt++;
2965 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2966 break;
2967 }
2968 if (ring_data->lro) {
2969
2970 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2971 struct lro *lro = &ring_data->lro0_n[i];
2972 if (lro->in_use) {
2973 update_L3L4_header(ring_data->nic, lro);
2974 queue_rx_frame(lro->parent, lro->vlan_tag);
2975 clear_lro_session(lro);
2976 }
2977 }
2978 }
2979 return napi_pkts;
2980}
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994static void tx_intr_handler(struct fifo_info *fifo_data)
2995{
2996 struct s2io_nic *nic = fifo_data->nic;
2997 struct tx_curr_get_info get_info, put_info;
2998 struct sk_buff *skb = NULL;
2999 struct TxD *txdlp;
3000 int pkt_cnt = 0;
3001 unsigned long flags = 0;
3002 u8 err_mask;
3003 struct stat_block *stats = nic->mac_control.stats_info;
3004 struct swStat *swstats = &stats->sw_stat;
3005
3006 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3007 return;
3008
3009 get_info = fifo_data->tx_curr_get_info;
3010 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3011 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3012 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3013 (get_info.offset != put_info.offset) &&
3014 (txdlp->Host_Control)) {
3015
3016 if (txdlp->Control_1 & TXD_T_CODE) {
3017 unsigned long long err;
3018 err = txdlp->Control_1 & TXD_T_CODE;
3019 if (err & 0x1) {
3020 swstats->parity_err_cnt++;
3021 }
3022
3023
3024 err_mask = err >> 48;
3025 switch (err_mask) {
3026 case 2:
3027 swstats->tx_buf_abort_cnt++;
3028 break;
3029
3030 case 3:
3031 swstats->tx_desc_abort_cnt++;
3032 break;
3033
3034 case 7:
3035 swstats->tx_parity_err_cnt++;
3036 break;
3037
3038 case 10:
3039 swstats->tx_link_loss_cnt++;
3040 break;
3041
3042 case 15:
3043 swstats->tx_list_proc_err_cnt++;
3044 break;
3045 }
3046 }
3047
3048 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3049 if (skb == NULL) {
3050 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3051 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3052 __func__);
3053 return;
3054 }
3055 pkt_cnt++;
3056
3057
3058 swstats->mem_freed += skb->truesize;
3059 dev_consume_skb_irq(skb);
3060
3061 get_info.offset++;
3062 if (get_info.offset == get_info.fifo_len + 1)
3063 get_info.offset = 0;
3064 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3065 fifo_data->tx_curr_get_info.offset = get_info.offset;
3066 }
3067
3068 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3069
3070 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3071}
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3084 struct net_device *dev)
3085{
3086 u64 val64;
3087 struct s2io_nic *sp = netdev_priv(dev);
3088 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3089
3090
3091 val64 = MDIO_MMD_INDX_ADDR(addr) |
3092 MDIO_MMD_DEV_ADDR(mmd_type) |
3093 MDIO_MMS_PRT_ADDR(0x0);
3094 writeq(val64, &bar0->mdio_control);
3095 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3096 writeq(val64, &bar0->mdio_control);
3097 udelay(100);
3098
3099
3100 val64 = MDIO_MMD_INDX_ADDR(addr) |
3101 MDIO_MMD_DEV_ADDR(mmd_type) |
3102 MDIO_MMS_PRT_ADDR(0x0) |
3103 MDIO_MDIO_DATA(value) |
3104 MDIO_OP(MDIO_OP_WRITE_TRANS);
3105 writeq(val64, &bar0->mdio_control);
3106 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107 writeq(val64, &bar0->mdio_control);
3108 udelay(100);
3109
3110 val64 = MDIO_MMD_INDX_ADDR(addr) |
3111 MDIO_MMD_DEV_ADDR(mmd_type) |
3112 MDIO_MMS_PRT_ADDR(0x0) |
3113 MDIO_OP(MDIO_OP_READ_TRANS);
3114 writeq(val64, &bar0->mdio_control);
3115 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3116 writeq(val64, &bar0->mdio_control);
3117 udelay(100);
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3130{
3131 u64 val64 = 0x0;
3132 u64 rval64 = 0x0;
3133 struct s2io_nic *sp = netdev_priv(dev);
3134 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3135
3136
3137 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3138 | MDIO_MMD_DEV_ADDR(mmd_type)
3139 | MDIO_MMS_PRT_ADDR(0x0));
3140 writeq(val64, &bar0->mdio_control);
3141 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3142 writeq(val64, &bar0->mdio_control);
3143 udelay(100);
3144
3145
3146 val64 = MDIO_MMD_INDX_ADDR(addr) |
3147 MDIO_MMD_DEV_ADDR(mmd_type) |
3148 MDIO_MMS_PRT_ADDR(0x0) |
3149 MDIO_OP(MDIO_OP_READ_TRANS);
3150 writeq(val64, &bar0->mdio_control);
3151 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3152 writeq(val64, &bar0->mdio_control);
3153 udelay(100);
3154
3155
3156 rval64 = readq(&bar0->mdio_control);
3157 rval64 = rval64 & 0xFFFF0000;
3158 rval64 = rval64 >> 16;
3159 return rval64;
3160}
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3175 u16 flag, u16 type)
3176{
3177 u64 mask = 0x3;
3178 u64 val64;
3179 int i;
3180 for (i = 0; i < index; i++)
3181 mask = mask << 0x2;
3182
3183 if (flag > 0) {
3184 *counter = *counter + 1;
3185 val64 = *regs_stat & mask;
3186 val64 = val64 >> (index * 0x2);
3187 val64 = val64 + 1;
3188 if (val64 == 3) {
3189 switch (type) {
3190 case 1:
3191 DBG_PRINT(ERR_DBG,
3192 "Take Xframe NIC out of service.\n");
3193 DBG_PRINT(ERR_DBG,
3194"Excessive temperatures may result in premature transceiver failure.\n");
3195 break;
3196 case 2:
3197 DBG_PRINT(ERR_DBG,
3198 "Take Xframe NIC out of service.\n");
3199 DBG_PRINT(ERR_DBG,
3200"Excessive bias currents may indicate imminent laser diode failure.\n");
3201 break;
3202 case 3:
3203 DBG_PRINT(ERR_DBG,
3204 "Take Xframe NIC out of service.\n");
3205 DBG_PRINT(ERR_DBG,
3206"Excessive laser output power may saturate far-end receiver.\n");
3207 break;
3208 default:
3209 DBG_PRINT(ERR_DBG,
3210 "Incorrect XPAK Alarm type\n");
3211 }
3212 val64 = 0x0;
3213 }
3214 val64 = val64 << (index * 0x2);
3215 *regs_stat = (*regs_stat & (~mask)) | (val64);
3216
3217 } else {
3218 *regs_stat = *regs_stat & (~mask);
3219 }
3220}
3221
3222
3223
3224
3225
3226
3227
3228
3229static void s2io_updt_xpak_counter(struct net_device *dev)
3230{
3231 u16 flag = 0x0;
3232 u16 type = 0x0;
3233 u16 val16 = 0x0;
3234 u64 val64 = 0x0;
3235 u64 addr = 0x0;
3236
3237 struct s2io_nic *sp = netdev_priv(dev);
3238 struct stat_block *stats = sp->mac_control.stats_info;
3239 struct xpakStat *xstats = &stats->xpak_stat;
3240
3241
3242 addr = MDIO_CTRL1;
3243 val64 = 0x0;
3244 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3245 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3246 DBG_PRINT(ERR_DBG,
3247 "ERR: MDIO slave access failed - Returned %llx\n",
3248 (unsigned long long)val64);
3249 return;
3250 }
3251
3252
3253 if (val64 != MDIO_CTRL1_SPEED10G) {
3254 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3255 "Returned: %llx- Expected: 0x%x\n",
3256 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3257 return;
3258 }
3259
3260
3261 addr = 0xA100;
3262 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3263 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3264
3265
3266 addr = 0xA070;
3267 val64 = 0x0;
3268 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3269
3270 flag = CHECKBIT(val64, 0x7);
3271 type = 1;
3272 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3273 &xstats->xpak_regs_stat,
3274 0x0, flag, type);
3275
3276 if (CHECKBIT(val64, 0x6))
3277 xstats->alarm_transceiver_temp_low++;
3278
3279 flag = CHECKBIT(val64, 0x3);
3280 type = 2;
3281 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3282 &xstats->xpak_regs_stat,
3283 0x2, flag, type);
3284
3285 if (CHECKBIT(val64, 0x2))
3286 xstats->alarm_laser_bias_current_low++;
3287
3288 flag = CHECKBIT(val64, 0x1);
3289 type = 3;
3290 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3291 &xstats->xpak_regs_stat,
3292 0x4, flag, type);
3293
3294 if (CHECKBIT(val64, 0x0))
3295 xstats->alarm_laser_output_power_low++;
3296
3297
3298 addr = 0xA074;
3299 val64 = 0x0;
3300 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3301
3302 if (CHECKBIT(val64, 0x7))
3303 xstats->warn_transceiver_temp_high++;
3304
3305 if (CHECKBIT(val64, 0x6))
3306 xstats->warn_transceiver_temp_low++;
3307
3308 if (CHECKBIT(val64, 0x3))
3309 xstats->warn_laser_bias_current_high++;
3310
3311 if (CHECKBIT(val64, 0x2))
3312 xstats->warn_laser_bias_current_low++;
3313
3314 if (CHECKBIT(val64, 0x1))
3315 xstats->warn_laser_output_power_high++;
3316
3317 if (CHECKBIT(val64, 0x0))
3318 xstats->warn_laser_output_power_low++;
3319}
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3334 int bit_state)
3335{
3336 int ret = FAILURE, cnt = 0, delay = 1;
3337 u64 val64;
3338
3339 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3340 return FAILURE;
3341
3342 do {
3343 val64 = readq(addr);
3344 if (bit_state == S2IO_BIT_RESET) {
3345 if (!(val64 & busy_bit)) {
3346 ret = SUCCESS;
3347 break;
3348 }
3349 } else {
3350 if (val64 & busy_bit) {
3351 ret = SUCCESS;
3352 break;
3353 }
3354 }
3355
3356 if (in_interrupt())
3357 mdelay(delay);
3358 else
3359 msleep(delay);
3360
3361 if (++cnt >= 10)
3362 delay = 50;
3363 } while (cnt < 20);
3364 return ret;
3365}
3366
3367
3368
3369
3370
3371
3372static u16 check_pci_device_id(u16 id)
3373{
3374 switch (id) {
3375 case PCI_DEVICE_ID_HERC_WIN:
3376 case PCI_DEVICE_ID_HERC_UNI:
3377 return XFRAME_II_DEVICE;
3378 case PCI_DEVICE_ID_S2IO_UNI:
3379 case PCI_DEVICE_ID_S2IO_WIN:
3380 return XFRAME_I_DEVICE;
3381 default:
3382 return PCI_ANY_ID;
3383 }
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396static void s2io_reset(struct s2io_nic *sp)
3397{
3398 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3399 u64 val64;
3400 u16 subid, pci_cmd;
3401 int i;
3402 u16 val16;
3403 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3404 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3405 struct stat_block *stats;
3406 struct swStat *swstats;
3407
3408 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3409 __func__, pci_name(sp->pdev));
3410
3411
3412 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3413
3414 val64 = SW_RESET_ALL;
3415 writeq(val64, &bar0->sw_reset);
3416 if (strstr(sp->product_name, "CX4"))
3417 msleep(750);
3418 msleep(250);
3419 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3420
3421
3422 pci_restore_state(sp->pdev);
3423 pci_save_state(sp->pdev);
3424 pci_read_config_word(sp->pdev, 0x2, &val16);
3425 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3426 break;
3427 msleep(200);
3428 }
3429
3430 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3431 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3432
3433 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3434
3435 s2io_init_pci(sp);
3436
3437
3438 s2io_set_swapper(sp);
3439
3440
3441 do_s2io_restore_unicast_mc(sp);
3442
3443
3444 restore_xmsi_data(sp);
3445
3446
3447 if (sp->device_type == XFRAME_II_DEVICE) {
3448
3449 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3450
3451
3452 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3453
3454
3455 writeq(s2BIT(62), &bar0->txpic_int_reg);
3456 }
3457
3458
3459 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3460
3461 stats = sp->mac_control.stats_info;
3462 swstats = &stats->sw_stat;
3463
3464
3465 up_cnt = swstats->link_up_cnt;
3466 down_cnt = swstats->link_down_cnt;
3467 up_time = swstats->link_up_time;
3468 down_time = swstats->link_down_time;
3469 reset_cnt = swstats->soft_reset_cnt;
3470 mem_alloc_cnt = swstats->mem_allocated;
3471 mem_free_cnt = swstats->mem_freed;
3472 watchdog_cnt = swstats->watchdog_timer_cnt;
3473
3474 memset(stats, 0, sizeof(struct stat_block));
3475
3476
3477 swstats->link_up_cnt = up_cnt;
3478 swstats->link_down_cnt = down_cnt;
3479 swstats->link_up_time = up_time;
3480 swstats->link_down_time = down_time;
3481 swstats->soft_reset_cnt = reset_cnt;
3482 swstats->mem_allocated = mem_alloc_cnt;
3483 swstats->mem_freed = mem_free_cnt;
3484 swstats->watchdog_timer_cnt = watchdog_cnt;
3485
3486
3487 subid = sp->pdev->subsystem_device;
3488 if (((subid & 0xFF) >= 0x07) &&
3489 (sp->device_type == XFRAME_I_DEVICE)) {
3490 val64 = readq(&bar0->gpio_control);
3491 val64 |= 0x0000800000000000ULL;
3492 writeq(val64, &bar0->gpio_control);
3493 val64 = 0x0411040400000000ULL;
3494 writeq(val64, (void __iomem *)bar0 + 0x2700);
3495 }
3496
3497
3498
3499
3500
3501 if (sp->device_type == XFRAME_II_DEVICE) {
3502 val64 = readq(&bar0->pcc_err_reg);
3503 writeq(val64, &bar0->pcc_err_reg);
3504 }
3505
3506 sp->device_enabled_once = false;
3507}
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519static int s2io_set_swapper(struct s2io_nic *sp)
3520{
3521 struct net_device *dev = sp->dev;
3522 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3523 u64 val64, valt, valr;
3524
3525
3526
3527
3528
3529
3530 val64 = readq(&bar0->pif_rd_swapper_fb);
3531 if (val64 != 0x0123456789ABCDEFULL) {
3532 int i = 0;
3533 static const u64 value[] = {
3534 0xC30000C3C30000C3ULL,
3535 0x8100008181000081ULL,
3536 0x4200004242000042ULL,
3537 0
3538 };
3539
3540 while (i < 4) {
3541 writeq(value[i], &bar0->swapper_ctrl);
3542 val64 = readq(&bar0->pif_rd_swapper_fb);
3543 if (val64 == 0x0123456789ABCDEFULL)
3544 break;
3545 i++;
3546 }
3547 if (i == 4) {
3548 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3549 "feedback read %llx\n",
3550 dev->name, (unsigned long long)val64);
3551 return FAILURE;
3552 }
3553 valr = value[i];
3554 } else {
3555 valr = readq(&bar0->swapper_ctrl);
3556 }
3557
3558 valt = 0x0123456789ABCDEFULL;
3559 writeq(valt, &bar0->xmsi_address);
3560 val64 = readq(&bar0->xmsi_address);
3561
3562 if (val64 != valt) {
3563 int i = 0;
3564 static const u64 value[] = {
3565 0x00C3C30000C3C300ULL,
3566 0x0081810000818100ULL,
3567 0x0042420000424200ULL,
3568 0
3569 };
3570
3571 while (i < 4) {
3572 writeq((value[i] | valr), &bar0->swapper_ctrl);
3573 writeq(valt, &bar0->xmsi_address);
3574 val64 = readq(&bar0->xmsi_address);
3575 if (val64 == valt)
3576 break;
3577 i++;
3578 }
3579 if (i == 4) {
3580 unsigned long long x = val64;
3581 DBG_PRINT(ERR_DBG,
3582 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3583 return FAILURE;
3584 }
3585 }
3586 val64 = readq(&bar0->swapper_ctrl);
3587 val64 &= 0xFFFF000000000000ULL;
3588
3589#ifdef __BIG_ENDIAN
3590
3591
3592
3593
3594 val64 |= (SWAPPER_CTRL_TXP_FE |
3595 SWAPPER_CTRL_TXP_SE |
3596 SWAPPER_CTRL_TXD_R_FE |
3597 SWAPPER_CTRL_TXD_W_FE |
3598 SWAPPER_CTRL_TXF_R_FE |
3599 SWAPPER_CTRL_RXD_R_FE |
3600 SWAPPER_CTRL_RXD_W_FE |
3601 SWAPPER_CTRL_RXF_W_FE |
3602 SWAPPER_CTRL_XMSI_FE |
3603 SWAPPER_CTRL_STATS_FE |
3604 SWAPPER_CTRL_STATS_SE);
3605 if (sp->config.intr_type == INTA)
3606 val64 |= SWAPPER_CTRL_XMSI_SE;
3607 writeq(val64, &bar0->swapper_ctrl);
3608#else
3609
3610
3611
3612
3613
3614 val64 |= (SWAPPER_CTRL_TXP_FE |
3615 SWAPPER_CTRL_TXP_SE |
3616 SWAPPER_CTRL_TXD_R_FE |
3617 SWAPPER_CTRL_TXD_R_SE |
3618 SWAPPER_CTRL_TXD_W_FE |
3619 SWAPPER_CTRL_TXD_W_SE |
3620 SWAPPER_CTRL_TXF_R_FE |
3621 SWAPPER_CTRL_RXD_R_FE |
3622 SWAPPER_CTRL_RXD_R_SE |
3623 SWAPPER_CTRL_RXD_W_FE |
3624 SWAPPER_CTRL_RXD_W_SE |
3625 SWAPPER_CTRL_RXF_W_FE |
3626 SWAPPER_CTRL_XMSI_FE |
3627 SWAPPER_CTRL_STATS_FE |
3628 SWAPPER_CTRL_STATS_SE);
3629 if (sp->config.intr_type == INTA)
3630 val64 |= SWAPPER_CTRL_XMSI_SE;
3631 writeq(val64, &bar0->swapper_ctrl);
3632#endif
3633 val64 = readq(&bar0->swapper_ctrl);
3634
3635
3636
3637
3638
3639 val64 = readq(&bar0->pif_rd_swapper_fb);
3640 if (val64 != 0x0123456789ABCDEFULL) {
3641
3642 DBG_PRINT(ERR_DBG,
3643 "%s: Endian settings are wrong, feedback read %llx\n",
3644 dev->name, (unsigned long long)val64);
3645 return FAILURE;
3646 }
3647
3648 return SUCCESS;
3649}
3650
3651static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3652{
3653 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3654 u64 val64;
3655 int ret = 0, cnt = 0;
3656
3657 do {
3658 val64 = readq(&bar0->xmsi_access);
3659 if (!(val64 & s2BIT(15)))
3660 break;
3661 mdelay(1);
3662 cnt++;
3663 } while (cnt < 5);
3664 if (cnt == 5) {
3665 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3666 ret = 1;
3667 }
3668
3669 return ret;
3670}
3671
3672static void restore_xmsi_data(struct s2io_nic *nic)
3673{
3674 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3675 u64 val64;
3676 int i, msix_index;
3677
3678 if (nic->device_type == XFRAME_I_DEVICE)
3679 return;
3680
3681 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3682 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3683 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3684 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3685 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3686 writeq(val64, &bar0->xmsi_access);
3687 if (wait_for_msix_trans(nic, msix_index))
3688 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3689 __func__, msix_index);
3690 }
3691}
3692
3693static void store_xmsi_data(struct s2io_nic *nic)
3694{
3695 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3696 u64 val64, addr, data;
3697 int i, msix_index;
3698
3699 if (nic->device_type == XFRAME_I_DEVICE)
3700 return;
3701
3702
3703 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3704 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3705 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3706 writeq(val64, &bar0->xmsi_access);
3707 if (wait_for_msix_trans(nic, msix_index)) {
3708 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3709 __func__, msix_index);
3710 continue;
3711 }
3712 addr = readq(&bar0->xmsi_address);
3713 data = readq(&bar0->xmsi_data);
3714 if (addr && data) {
3715 nic->msix_info[i].addr = addr;
3716 nic->msix_info[i].data = data;
3717 }
3718 }
3719}
3720
3721static int s2io_enable_msi_x(struct s2io_nic *nic)
3722{
3723 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3724 u64 rx_mat;
3725 u16 msi_control;
3726 int ret, i, j, msix_indx = 1;
3727 int size;
3728 struct stat_block *stats = nic->mac_control.stats_info;
3729 struct swStat *swstats = &stats->sw_stat;
3730
3731 size = nic->num_entries * sizeof(struct msix_entry);
3732 nic->entries = kzalloc(size, GFP_KERNEL);
3733 if (!nic->entries) {
3734 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3735 __func__);
3736 swstats->mem_alloc_fail_cnt++;
3737 return -ENOMEM;
3738 }
3739 swstats->mem_allocated += size;
3740
3741 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3742 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3743 if (!nic->s2io_entries) {
3744 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3745 __func__);
3746 swstats->mem_alloc_fail_cnt++;
3747 kfree(nic->entries);
3748 swstats->mem_freed
3749 += (nic->num_entries * sizeof(struct msix_entry));
3750 return -ENOMEM;
3751 }
3752 swstats->mem_allocated += size;
3753
3754 nic->entries[0].entry = 0;
3755 nic->s2io_entries[0].entry = 0;
3756 nic->s2io_entries[0].in_use = MSIX_FLG;
3757 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3758 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3759
3760 for (i = 1; i < nic->num_entries; i++) {
3761 nic->entries[i].entry = ((i - 1) * 8) + 1;
3762 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3763 nic->s2io_entries[i].arg = NULL;
3764 nic->s2io_entries[i].in_use = 0;
3765 }
3766
3767 rx_mat = readq(&bar0->rx_mat);
3768 for (j = 0; j < nic->config.rx_ring_num; j++) {
3769 rx_mat |= RX_MAT_SET(j, msix_indx);
3770 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3771 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3772 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3773 msix_indx += 8;
3774 }
3775 writeq(rx_mat, &bar0->rx_mat);
3776 readq(&bar0->rx_mat);
3777
3778 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3779 nic->num_entries, nic->num_entries);
3780
3781 if (ret < 0) {
3782 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3783 kfree(nic->entries);
3784 swstats->mem_freed += nic->num_entries *
3785 sizeof(struct msix_entry);
3786 kfree(nic->s2io_entries);
3787 swstats->mem_freed += nic->num_entries *
3788 sizeof(struct s2io_msix_entry);
3789 nic->entries = NULL;
3790 nic->s2io_entries = NULL;
3791 return -ENOMEM;
3792 }
3793
3794
3795
3796
3797
3798 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3799 msi_control |= 0x1;
3800 pci_write_config_word(nic->pdev, 0x42, msi_control);
3801
3802 return 0;
3803}
3804
3805
3806static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3807{
3808 struct s2io_nic *sp = dev_id;
3809
3810 sp->msi_detected = 1;
3811 wake_up(&sp->msi_wait);
3812
3813 return IRQ_HANDLED;
3814}
3815
3816
3817static int s2io_test_msi(struct s2io_nic *sp)
3818{
3819 struct pci_dev *pdev = sp->pdev;
3820 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3821 int err;
3822 u64 val64, saved64;
3823
3824 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3825 sp->name, sp);
3826 if (err) {
3827 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3828 sp->dev->name, pci_name(pdev), pdev->irq);
3829 return err;
3830 }
3831
3832 init_waitqueue_head(&sp->msi_wait);
3833 sp->msi_detected = 0;
3834
3835 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3836 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3837 val64 |= SCHED_INT_CTRL_TIMER_EN;
3838 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3839 writeq(val64, &bar0->scheduled_int_ctrl);
3840
3841 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3842
3843 if (!sp->msi_detected) {
3844
3845 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3846 "using MSI(X) during test\n",
3847 sp->dev->name, pci_name(pdev));
3848
3849 err = -EOPNOTSUPP;
3850 }
3851
3852 free_irq(sp->entries[1].vector, sp);
3853
3854 writeq(saved64, &bar0->scheduled_int_ctrl);
3855
3856 return err;
3857}
3858
3859static void remove_msix_isr(struct s2io_nic *sp)
3860{
3861 int i;
3862 u16 msi_control;
3863
3864 for (i = 0; i < sp->num_entries; i++) {
3865 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3866 int vector = sp->entries[i].vector;
3867 void *arg = sp->s2io_entries[i].arg;
3868 free_irq(vector, arg);
3869 }
3870 }
3871
3872 kfree(sp->entries);
3873 kfree(sp->s2io_entries);
3874 sp->entries = NULL;
3875 sp->s2io_entries = NULL;
3876
3877 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3878 msi_control &= 0xFFFE;
3879 pci_write_config_word(sp->pdev, 0x42, msi_control);
3880
3881 pci_disable_msix(sp->pdev);
3882}
3883
3884static void remove_inta_isr(struct s2io_nic *sp)
3885{
3886 free_irq(sp->pdev->irq, sp->dev);
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905static int s2io_open(struct net_device *dev)
3906{
3907 struct s2io_nic *sp = netdev_priv(dev);
3908 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3909 int err = 0;
3910
3911
3912
3913
3914
3915 netif_carrier_off(dev);
3916 sp->last_link_state = 0;
3917
3918
3919 err = s2io_card_up(sp);
3920 if (err) {
3921 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3922 dev->name);
3923 goto hw_init_failed;
3924 }
3925
3926 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3927 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3928 s2io_card_down(sp);
3929 err = -ENODEV;
3930 goto hw_init_failed;
3931 }
3932 s2io_start_all_tx_queue(sp);
3933 return 0;
3934
3935hw_init_failed:
3936 if (sp->config.intr_type == MSI_X) {
3937 if (sp->entries) {
3938 kfree(sp->entries);
3939 swstats->mem_freed += sp->num_entries *
3940 sizeof(struct msix_entry);
3941 }
3942 if (sp->s2io_entries) {
3943 kfree(sp->s2io_entries);
3944 swstats->mem_freed += sp->num_entries *
3945 sizeof(struct s2io_msix_entry);
3946 }
3947 }
3948 return err;
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964static int s2io_close(struct net_device *dev)
3965{
3966 struct s2io_nic *sp = netdev_priv(dev);
3967 struct config_param *config = &sp->config;
3968 u64 tmp64;
3969 int offset;
3970
3971
3972
3973
3974 if (!is_s2io_card_up(sp))
3975 return 0;
3976
3977 s2io_stop_all_tx_queue(sp);
3978
3979 for (offset = 1; offset < config->max_mc_addr; offset++) {
3980 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3981 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3982 do_s2io_delete_unicast_mc(sp, tmp64);
3983 }
3984
3985 s2io_card_down(sp);
3986
3987 return 0;
3988}
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4004{
4005 struct s2io_nic *sp = netdev_priv(dev);
4006 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4007 register u64 val64;
4008 struct TxD *txdp;
4009 struct TxFIFO_element __iomem *tx_fifo;
4010 unsigned long flags = 0;
4011 u16 vlan_tag = 0;
4012 struct fifo_info *fifo = NULL;
4013 int offload_type;
4014 int enable_per_list_interrupt = 0;
4015 struct config_param *config = &sp->config;
4016 struct mac_info *mac_control = &sp->mac_control;
4017 struct stat_block *stats = mac_control->stats_info;
4018 struct swStat *swstats = &stats->sw_stat;
4019
4020 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4021
4022 if (unlikely(skb->len <= 0)) {
4023 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4024 dev_kfree_skb_any(skb);
4025 return NETDEV_TX_OK;
4026 }
4027
4028 if (!is_s2io_card_up(sp)) {
4029 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4030 dev->name);
4031 dev_kfree_skb_any(skb);
4032 return NETDEV_TX_OK;
4033 }
4034
4035 queue = 0;
4036 if (skb_vlan_tag_present(skb))
4037 vlan_tag = skb_vlan_tag_get(skb);
4038 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4039 if (skb->protocol == htons(ETH_P_IP)) {
4040 struct iphdr *ip;
4041 struct tcphdr *th;
4042 ip = ip_hdr(skb);
4043
4044 if (!ip_is_fragment(ip)) {
4045 th = (struct tcphdr *)(((unsigned char *)ip) +
4046 ip->ihl*4);
4047
4048 if (ip->protocol == IPPROTO_TCP) {
4049 queue_len = sp->total_tcp_fifos;
4050 queue = (ntohs(th->source) +
4051 ntohs(th->dest)) &
4052 sp->fifo_selector[queue_len - 1];
4053 if (queue >= queue_len)
4054 queue = queue_len - 1;
4055 } else if (ip->protocol == IPPROTO_UDP) {
4056 queue_len = sp->total_udp_fifos;
4057 queue = (ntohs(th->source) +
4058 ntohs(th->dest)) &
4059 sp->fifo_selector[queue_len - 1];
4060 if (queue >= queue_len)
4061 queue = queue_len - 1;
4062 queue += sp->udp_fifo_idx;
4063 if (skb->len > 1024)
4064 enable_per_list_interrupt = 1;
4065 }
4066 }
4067 }
4068 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4069
4070 queue = config->fifo_mapping
4071 [skb->priority & (MAX_TX_FIFOS - 1)];
4072 fifo = &mac_control->fifos[queue];
4073
4074 spin_lock_irqsave(&fifo->tx_lock, flags);
4075
4076 if (sp->config.multiq) {
4077 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4078 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4079 return NETDEV_TX_BUSY;
4080 }
4081 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4082 if (netif_queue_stopped(dev)) {
4083 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4084 return NETDEV_TX_BUSY;
4085 }
4086 }
4087
4088 put_off = (u16)fifo->tx_curr_put_info.offset;
4089 get_off = (u16)fifo->tx_curr_get_info.offset;
4090 txdp = fifo->list_info[put_off].list_virt_addr;
4091
4092 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4093
4094 if (txdp->Host_Control ||
4095 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4096 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4097 s2io_stop_tx_queue(sp, fifo->fifo_no);
4098 dev_kfree_skb_any(skb);
4099 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4100 return NETDEV_TX_OK;
4101 }
4102
4103 offload_type = s2io_offload_type(skb);
4104 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4105 txdp->Control_1 |= TXD_TCP_LSO_EN;
4106 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4107 }
4108 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4109 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4110 TXD_TX_CKO_TCP_EN |
4111 TXD_TX_CKO_UDP_EN);
4112 }
4113 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4114 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4115 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4116 if (enable_per_list_interrupt)
4117 if (put_off & (queue_len >> 5))
4118 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4119 if (vlan_tag) {
4120 txdp->Control_2 |= TXD_VLAN_ENABLE;
4121 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4122 }
4123
4124 frg_len = skb_headlen(skb);
4125 txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4126 frg_len, DMA_TO_DEVICE);
4127 if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4128 goto pci_map_failed;
4129
4130 txdp->Host_Control = (unsigned long)skb;
4131 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4132
4133 frg_cnt = skb_shinfo(skb)->nr_frags;
4134
4135 for (i = 0; i < frg_cnt; i++) {
4136 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4137
4138 if (!skb_frag_size(frag))
4139 continue;
4140 txdp++;
4141 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4142 frag, 0,
4143 skb_frag_size(frag),
4144 DMA_TO_DEVICE);
4145 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4146 }
4147 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4148
4149 tx_fifo = mac_control->tx_FIFO_start[queue];
4150 val64 = fifo->list_info[put_off].list_phy_addr;
4151 writeq(val64, &tx_fifo->TxDL_Pointer);
4152
4153 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4154 TX_FIFO_LAST_LIST);
4155 if (offload_type)
4156 val64 |= TX_FIFO_SPECIAL_FUNC;
4157
4158 writeq(val64, &tx_fifo->List_Control);
4159
4160 put_off++;
4161 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4162 put_off = 0;
4163 fifo->tx_curr_put_info.offset = put_off;
4164
4165
4166 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4167 swstats->fifo_full_cnt++;
4168 DBG_PRINT(TX_DBG,
4169 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4170 put_off, get_off);
4171 s2io_stop_tx_queue(sp, fifo->fifo_no);
4172 }
4173 swstats->mem_allocated += skb->truesize;
4174 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175
4176 if (sp->config.intr_type == MSI_X)
4177 tx_intr_handler(fifo);
4178
4179 return NETDEV_TX_OK;
4180
4181pci_map_failed:
4182 swstats->pci_map_fail_cnt++;
4183 s2io_stop_tx_queue(sp, fifo->fifo_no);
4184 swstats->mem_freed += skb->truesize;
4185 dev_kfree_skb_any(skb);
4186 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4187 return NETDEV_TX_OK;
4188}
4189
4190static void
4191s2io_alarm_handle(struct timer_list *t)
4192{
4193 struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4194 struct net_device *dev = sp->dev;
4195
4196 s2io_handle_errors(dev);
4197 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4198}
4199
4200static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4201{
4202 struct ring_info *ring = (struct ring_info *)dev_id;
4203 struct s2io_nic *sp = ring->nic;
4204 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4205
4206 if (unlikely(!is_s2io_card_up(sp)))
4207 return IRQ_HANDLED;
4208
4209 if (sp->config.napi) {
4210 u8 __iomem *addr = NULL;
4211 u8 val8 = 0;
4212
4213 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4214 addr += (7 - ring->ring_no);
4215 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4216 writeb(val8, addr);
4217 val8 = readb(addr);
4218 napi_schedule(&ring->napi);
4219 } else {
4220 rx_intr_handler(ring, 0);
4221 s2io_chk_rx_buffers(sp, ring);
4222 }
4223
4224 return IRQ_HANDLED;
4225}
4226
4227static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4228{
4229 int i;
4230 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4231 struct s2io_nic *sp = fifos->nic;
4232 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4233 struct config_param *config = &sp->config;
4234 u64 reason;
4235
4236 if (unlikely(!is_s2io_card_up(sp)))
4237 return IRQ_NONE;
4238
4239 reason = readq(&bar0->general_int_status);
4240 if (unlikely(reason == S2IO_MINUS_ONE))
4241
4242 return IRQ_HANDLED;
4243
4244 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4245 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4246
4247 if (reason & GEN_INTR_TXPIC)
4248 s2io_txpic_intr_handle(sp);
4249
4250 if (reason & GEN_INTR_TXTRAFFIC)
4251 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4252
4253 for (i = 0; i < config->tx_fifo_num; i++)
4254 tx_intr_handler(&fifos[i]);
4255
4256 writeq(sp->general_int_mask, &bar0->general_int_mask);
4257 readl(&bar0->general_int_status);
4258 return IRQ_HANDLED;
4259 }
4260
4261 return IRQ_NONE;
4262}
4263
4264static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4265{
4266 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4267 u64 val64;
4268
4269 val64 = readq(&bar0->pic_int_status);
4270 if (val64 & PIC_INT_GPIO) {
4271 val64 = readq(&bar0->gpio_int_reg);
4272 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4273 (val64 & GPIO_INT_REG_LINK_UP)) {
4274
4275
4276
4277
4278 val64 |= GPIO_INT_REG_LINK_DOWN;
4279 val64 |= GPIO_INT_REG_LINK_UP;
4280 writeq(val64, &bar0->gpio_int_reg);
4281 val64 = readq(&bar0->gpio_int_mask);
4282 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4283 GPIO_INT_MASK_LINK_DOWN);
4284 writeq(val64, &bar0->gpio_int_mask);
4285 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4286 val64 = readq(&bar0->adapter_status);
4287
4288 val64 = readq(&bar0->adapter_control);
4289 val64 |= ADAPTER_CNTL_EN;
4290 writeq(val64, &bar0->adapter_control);
4291 val64 |= ADAPTER_LED_ON;
4292 writeq(val64, &bar0->adapter_control);
4293 if (!sp->device_enabled_once)
4294 sp->device_enabled_once = 1;
4295
4296 s2io_link(sp, LINK_UP);
4297
4298
4299
4300
4301 val64 = readq(&bar0->gpio_int_mask);
4302 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4303 val64 |= GPIO_INT_MASK_LINK_UP;
4304 writeq(val64, &bar0->gpio_int_mask);
4305
4306 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4307 val64 = readq(&bar0->adapter_status);
4308 s2io_link(sp, LINK_DOWN);
4309
4310 val64 = readq(&bar0->gpio_int_mask);
4311 val64 &= ~GPIO_INT_MASK_LINK_UP;
4312 val64 |= GPIO_INT_MASK_LINK_DOWN;
4313 writeq(val64, &bar0->gpio_int_mask);
4314
4315
4316 val64 = readq(&bar0->adapter_control);
4317 val64 = val64 & (~ADAPTER_LED_ON);
4318 writeq(val64, &bar0->adapter_control);
4319 }
4320 }
4321 val64 = readq(&bar0->gpio_int_mask);
4322}
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4335 unsigned long long *cnt)
4336{
4337 u64 val64;
4338 val64 = readq(addr);
4339 if (val64 & value) {
4340 writeq(val64, addr);
4341 (*cnt)++;
4342 return 1;
4343 }
4344 return 0;
4345
4346}
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356static void s2io_handle_errors(void *dev_id)
4357{
4358 struct net_device *dev = (struct net_device *)dev_id;
4359 struct s2io_nic *sp = netdev_priv(dev);
4360 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361 u64 temp64 = 0, val64 = 0;
4362 int i = 0;
4363
4364 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4365 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4366
4367 if (!is_s2io_card_up(sp))
4368 return;
4369
4370 if (pci_channel_offline(sp->pdev))
4371 return;
4372
4373 memset(&sw_stat->ring_full_cnt, 0,
4374 sizeof(sw_stat->ring_full_cnt));
4375
4376
4377 if (stats->xpak_timer_count < 72000) {
4378
4379 stats->xpak_timer_count++;
4380 } else {
4381 s2io_updt_xpak_counter(dev);
4382
4383 stats->xpak_timer_count = 0;
4384 }
4385
4386
4387 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4388 val64 = readq(&bar0->mac_rmac_err_reg);
4389 writeq(val64, &bar0->mac_rmac_err_reg);
4390 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4391 schedule_work(&sp->set_link_task);
4392 }
4393
4394
4395 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4396 &sw_stat->serious_err_cnt))
4397 goto reset;
4398
4399
4400 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4401 &sw_stat->parity_err_cnt))
4402 goto reset;
4403
4404
4405 if (sp->device_type == XFRAME_II_DEVICE) {
4406 val64 = readq(&bar0->ring_bump_counter1);
4407 for (i = 0; i < 4; i++) {
4408 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4409 temp64 >>= 64 - ((i+1)*16);
4410 sw_stat->ring_full_cnt[i] += temp64;
4411 }
4412
4413 val64 = readq(&bar0->ring_bump_counter2);
4414 for (i = 0; i < 4; i++) {
4415 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4416 temp64 >>= 64 - ((i+1)*16);
4417 sw_stat->ring_full_cnt[i+4] += temp64;
4418 }
4419 }
4420
4421 val64 = readq(&bar0->txdma_int_status);
4422
4423 if (val64 & TXDMA_PFC_INT) {
4424 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4425 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4426 PFC_PCIX_ERR,
4427 &bar0->pfc_err_reg,
4428 &sw_stat->pfc_err_cnt))
4429 goto reset;
4430 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4431 &bar0->pfc_err_reg,
4432 &sw_stat->pfc_err_cnt);
4433 }
4434
4435
4436 if (val64 & TXDMA_TDA_INT) {
4437 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4438 TDA_SM0_ERR_ALARM |
4439 TDA_SM1_ERR_ALARM,
4440 &bar0->tda_err_reg,
4441 &sw_stat->tda_err_cnt))
4442 goto reset;
4443 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4444 &bar0->tda_err_reg,
4445 &sw_stat->tda_err_cnt);
4446 }
4447
4448 if (val64 & TXDMA_PCC_INT) {
4449 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4450 PCC_N_SERR | PCC_6_COF_OV_ERR |
4451 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4452 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4453 PCC_TXB_ECC_DB_ERR,
4454 &bar0->pcc_err_reg,
4455 &sw_stat->pcc_err_cnt))
4456 goto reset;
4457 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4458 &bar0->pcc_err_reg,
4459 &sw_stat->pcc_err_cnt);
4460 }
4461
4462
4463 if (val64 & TXDMA_TTI_INT) {
4464 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4465 &bar0->tti_err_reg,
4466 &sw_stat->tti_err_cnt))
4467 goto reset;
4468 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4469 &bar0->tti_err_reg,
4470 &sw_stat->tti_err_cnt);
4471 }
4472
4473
4474 if (val64 & TXDMA_LSO_INT) {
4475 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4476 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4477 &bar0->lso_err_reg,
4478 &sw_stat->lso_err_cnt))
4479 goto reset;
4480 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4481 &bar0->lso_err_reg,
4482 &sw_stat->lso_err_cnt);
4483 }
4484
4485
4486 if (val64 & TXDMA_TPA_INT) {
4487 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4488 &bar0->tpa_err_reg,
4489 &sw_stat->tpa_err_cnt))
4490 goto reset;
4491 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4492 &bar0->tpa_err_reg,
4493 &sw_stat->tpa_err_cnt);
4494 }
4495
4496
4497 if (val64 & TXDMA_SM_INT) {
4498 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4499 &bar0->sm_err_reg,
4500 &sw_stat->sm_err_cnt))
4501 goto reset;
4502 }
4503
4504 val64 = readq(&bar0->mac_int_status);
4505 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4506 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4507 &bar0->mac_tmac_err_reg,
4508 &sw_stat->mac_tmac_err_cnt))
4509 goto reset;
4510 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4511 TMAC_DESC_ECC_SG_ERR |
4512 TMAC_DESC_ECC_DB_ERR,
4513 &bar0->mac_tmac_err_reg,
4514 &sw_stat->mac_tmac_err_cnt);
4515 }
4516
4517 val64 = readq(&bar0->xgxs_int_status);
4518 if (val64 & XGXS_INT_STATUS_TXGXS) {
4519 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4520 &bar0->xgxs_txgxs_err_reg,
4521 &sw_stat->xgxs_txgxs_err_cnt))
4522 goto reset;
4523 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4524 &bar0->xgxs_txgxs_err_reg,
4525 &sw_stat->xgxs_txgxs_err_cnt);
4526 }
4527
4528 val64 = readq(&bar0->rxdma_int_status);
4529 if (val64 & RXDMA_INT_RC_INT_M) {
4530 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4531 RC_FTC_ECC_DB_ERR |
4532 RC_PRCn_SM_ERR_ALARM |
4533 RC_FTC_SM_ERR_ALARM,
4534 &bar0->rc_err_reg,
4535 &sw_stat->rc_err_cnt))
4536 goto reset;
4537 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4538 RC_FTC_ECC_SG_ERR |
4539 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4540 &sw_stat->rc_err_cnt);
4541 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4542 PRC_PCI_AB_WR_Rn |
4543 PRC_PCI_AB_F_WR_Rn,
4544 &bar0->prc_pcix_err_reg,
4545 &sw_stat->prc_pcix_err_cnt))
4546 goto reset;
4547 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4548 PRC_PCI_DP_WR_Rn |
4549 PRC_PCI_DP_F_WR_Rn,
4550 &bar0->prc_pcix_err_reg,
4551 &sw_stat->prc_pcix_err_cnt);
4552 }
4553
4554 if (val64 & RXDMA_INT_RPA_INT_M) {
4555 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4556 &bar0->rpa_err_reg,
4557 &sw_stat->rpa_err_cnt))
4558 goto reset;
4559 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4560 &bar0->rpa_err_reg,
4561 &sw_stat->rpa_err_cnt);
4562 }
4563
4564 if (val64 & RXDMA_INT_RDA_INT_M) {
4565 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4566 RDA_FRM_ECC_DB_N_AERR |
4567 RDA_SM1_ERR_ALARM |
4568 RDA_SM0_ERR_ALARM |
4569 RDA_RXD_ECC_DB_SERR,
4570 &bar0->rda_err_reg,
4571 &sw_stat->rda_err_cnt))
4572 goto reset;
4573 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4574 RDA_FRM_ECC_SG_ERR |
4575 RDA_MISC_ERR |
4576 RDA_PCIX_ERR,
4577 &bar0->rda_err_reg,
4578 &sw_stat->rda_err_cnt);
4579 }
4580
4581 if (val64 & RXDMA_INT_RTI_INT_M) {
4582 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4583 &bar0->rti_err_reg,
4584 &sw_stat->rti_err_cnt))
4585 goto reset;
4586 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4587 &bar0->rti_err_reg,
4588 &sw_stat->rti_err_cnt);
4589 }
4590
4591 val64 = readq(&bar0->mac_int_status);
4592 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4593 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4594 &bar0->mac_rmac_err_reg,
4595 &sw_stat->mac_rmac_err_cnt))
4596 goto reset;
4597 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4598 RMAC_SINGLE_ECC_ERR |
4599 RMAC_DOUBLE_ECC_ERR,
4600 &bar0->mac_rmac_err_reg,
4601 &sw_stat->mac_rmac_err_cnt);
4602 }
4603
4604 val64 = readq(&bar0->xgxs_int_status);
4605 if (val64 & XGXS_INT_STATUS_RXGXS) {
4606 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4607 &bar0->xgxs_rxgxs_err_reg,
4608 &sw_stat->xgxs_rxgxs_err_cnt))
4609 goto reset;
4610 }
4611
4612 val64 = readq(&bar0->mc_int_status);
4613 if (val64 & MC_INT_STATUS_MC_INT) {
4614 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4615 &bar0->mc_err_reg,
4616 &sw_stat->mc_err_cnt))
4617 goto reset;
4618
4619
4620 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4621 writeq(val64, &bar0->mc_err_reg);
4622 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4623 sw_stat->double_ecc_errs++;
4624 if (sp->device_type != XFRAME_II_DEVICE) {
4625
4626
4627
4628 if (val64 &
4629 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4630 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4631 goto reset;
4632 }
4633 } else
4634 sw_stat->single_ecc_errs++;
4635 }
4636 }
4637 return;
4638
4639reset:
4640 s2io_stop_all_tx_queue(sp);
4641 schedule_work(&sp->rst_timer_task);
4642 sw_stat->soft_reset_cnt++;
4643}
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658static irqreturn_t s2io_isr(int irq, void *dev_id)
4659{
4660 struct net_device *dev = (struct net_device *)dev_id;
4661 struct s2io_nic *sp = netdev_priv(dev);
4662 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4663 int i;
4664 u64 reason = 0;
4665 struct mac_info *mac_control;
4666 struct config_param *config;
4667
4668
4669 if (pci_channel_offline(sp->pdev))
4670 return IRQ_NONE;
4671
4672 if (!is_s2io_card_up(sp))
4673 return IRQ_NONE;
4674
4675 config = &sp->config;
4676 mac_control = &sp->mac_control;
4677
4678
4679
4680
4681
4682
4683
4684
4685 reason = readq(&bar0->general_int_status);
4686
4687 if (unlikely(reason == S2IO_MINUS_ONE))
4688 return IRQ_HANDLED;
4689
4690 if (reason &
4691 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4692 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4693
4694 if (config->napi) {
4695 if (reason & GEN_INTR_RXTRAFFIC) {
4696 napi_schedule(&sp->napi);
4697 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4698 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4699 readl(&bar0->rx_traffic_int);
4700 }
4701 } else {
4702
4703
4704
4705
4706
4707 if (reason & GEN_INTR_RXTRAFFIC)
4708 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4709
4710 for (i = 0; i < config->rx_ring_num; i++) {
4711 struct ring_info *ring = &mac_control->rings[i];
4712
4713 rx_intr_handler(ring, 0);
4714 }
4715 }
4716
4717
4718
4719
4720
4721
4722 if (reason & GEN_INTR_TXTRAFFIC)
4723 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4724
4725 for (i = 0; i < config->tx_fifo_num; i++)
4726 tx_intr_handler(&mac_control->fifos[i]);
4727
4728 if (reason & GEN_INTR_TXPIC)
4729 s2io_txpic_intr_handle(sp);
4730
4731
4732
4733
4734 if (!config->napi) {
4735 for (i = 0; i < config->rx_ring_num; i++) {
4736 struct ring_info *ring = &mac_control->rings[i];
4737
4738 s2io_chk_rx_buffers(sp, ring);
4739 }
4740 }
4741 writeq(sp->general_int_mask, &bar0->general_int_mask);
4742 readl(&bar0->general_int_status);
4743
4744 return IRQ_HANDLED;
4745
4746 } else if (!reason) {
4747
4748 return IRQ_NONE;
4749 }
4750
4751 return IRQ_HANDLED;
4752}
4753
4754
4755
4756
4757static void s2io_updt_stats(struct s2io_nic *sp)
4758{
4759 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4760 u64 val64;
4761 int cnt = 0;
4762
4763 if (is_s2io_card_up(sp)) {
4764
4765 val64 = SET_UPDT_CLICKS(10) |
4766 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4767 writeq(val64, &bar0->stat_cfg);
4768 do {
4769 udelay(100);
4770 val64 = readq(&bar0->stat_cfg);
4771 if (!(val64 & s2BIT(0)))
4772 break;
4773 cnt++;
4774 if (cnt == 5)
4775 break;
4776 } while (1);
4777 }
4778}
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4790{
4791 struct s2io_nic *sp = netdev_priv(dev);
4792 struct mac_info *mac_control = &sp->mac_control;
4793 struct stat_block *stats = mac_control->stats_info;
4794 u64 delta;
4795
4796
4797 s2io_updt_stats(sp);
4798
4799
4800
4801
4802
4803
4804
4805
4806 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4807 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4808 sp->stats.rx_packets += delta;
4809 dev->stats.rx_packets += delta;
4810
4811 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4812 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4813 sp->stats.tx_packets += delta;
4814 dev->stats.tx_packets += delta;
4815
4816 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4817 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4818 sp->stats.rx_bytes += delta;
4819 dev->stats.rx_bytes += delta;
4820
4821 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4822 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4823 sp->stats.tx_bytes += delta;
4824 dev->stats.tx_bytes += delta;
4825
4826 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4827 sp->stats.rx_errors += delta;
4828 dev->stats.rx_errors += delta;
4829
4830 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4831 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4832 sp->stats.tx_errors += delta;
4833 dev->stats.tx_errors += delta;
4834
4835 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4836 sp->stats.rx_dropped += delta;
4837 dev->stats.rx_dropped += delta;
4838
4839 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4840 sp->stats.tx_dropped += delta;
4841 dev->stats.tx_dropped += delta;
4842
4843
4844
4845
4846
4847
4848 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4849 le32_to_cpu(stats->rmac_vld_mcst_frms);
4850 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4851 delta -= sp->stats.multicast;
4852 sp->stats.multicast += delta;
4853 dev->stats.multicast += delta;
4854
4855 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4856 le32_to_cpu(stats->rmac_usized_frms)) +
4857 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4858 sp->stats.rx_length_errors += delta;
4859 dev->stats.rx_length_errors += delta;
4860
4861 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4862 sp->stats.rx_crc_errors += delta;
4863 dev->stats.rx_crc_errors += delta;
4864
4865 return &dev->stats;
4866}
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881static void s2io_set_multicast(struct net_device *dev)
4882{
4883 int i, j, prev_cnt;
4884 struct netdev_hw_addr *ha;
4885 struct s2io_nic *sp = netdev_priv(dev);
4886 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4887 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4888 0xfeffffffffffULL;
4889 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4890 void __iomem *add;
4891 struct config_param *config = &sp->config;
4892
4893 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4894
4895 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4896 &bar0->rmac_addr_data0_mem);
4897 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4898 &bar0->rmac_addr_data1_mem);
4899 val64 = RMAC_ADDR_CMD_MEM_WE |
4900 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4901 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4902 writeq(val64, &bar0->rmac_addr_cmd_mem);
4903
4904 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4905 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4906 S2IO_BIT_RESET);
4907
4908 sp->m_cast_flg = 1;
4909 sp->all_multi_pos = config->max_mc_addr - 1;
4910 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4911
4912 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4913 &bar0->rmac_addr_data0_mem);
4914 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4915 &bar0->rmac_addr_data1_mem);
4916 val64 = RMAC_ADDR_CMD_MEM_WE |
4917 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4918 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4919 writeq(val64, &bar0->rmac_addr_cmd_mem);
4920
4921 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4922 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4923 S2IO_BIT_RESET);
4924
4925 sp->m_cast_flg = 0;
4926 sp->all_multi_pos = 0;
4927 }
4928
4929 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4930
4931 add = &bar0->mac_cfg;
4932 val64 = readq(&bar0->mac_cfg);
4933 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4934
4935 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4936 writel((u32)val64, add);
4937 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4938 writel((u32) (val64 >> 32), (add + 4));
4939
4940 if (vlan_tag_strip != 1) {
4941 val64 = readq(&bar0->rx_pa_cfg);
4942 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4943 writeq(val64, &bar0->rx_pa_cfg);
4944 sp->vlan_strip_flag = 0;
4945 }
4946
4947 val64 = readq(&bar0->mac_cfg);
4948 sp->promisc_flg = 1;
4949 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4950 dev->name);
4951 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4952
4953 add = &bar0->mac_cfg;
4954 val64 = readq(&bar0->mac_cfg);
4955 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4956
4957 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4958 writel((u32)val64, add);
4959 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4960 writel((u32) (val64 >> 32), (add + 4));
4961
4962 if (vlan_tag_strip != 0) {
4963 val64 = readq(&bar0->rx_pa_cfg);
4964 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4965 writeq(val64, &bar0->rx_pa_cfg);
4966 sp->vlan_strip_flag = 1;
4967 }
4968
4969 val64 = readq(&bar0->mac_cfg);
4970 sp->promisc_flg = 0;
4971 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4972 }
4973
4974
4975 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4976 if (netdev_mc_count(dev) >
4977 (config->max_mc_addr - config->max_mac_addr)) {
4978 DBG_PRINT(ERR_DBG,
4979 "%s: No more Rx filters can be added - "
4980 "please enable ALL_MULTI instead\n",
4981 dev->name);
4982 return;
4983 }
4984
4985 prev_cnt = sp->mc_addr_count;
4986 sp->mc_addr_count = netdev_mc_count(dev);
4987
4988
4989 for (i = 0; i < prev_cnt; i++) {
4990 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4991 &bar0->rmac_addr_data0_mem);
4992 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4993 &bar0->rmac_addr_data1_mem);
4994 val64 = RMAC_ADDR_CMD_MEM_WE |
4995 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4996 RMAC_ADDR_CMD_MEM_OFFSET
4997 (config->mc_start_offset + i);
4998 writeq(val64, &bar0->rmac_addr_cmd_mem);
4999
5000
5001 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5002 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5003 S2IO_BIT_RESET)) {
5004 DBG_PRINT(ERR_DBG,
5005 "%s: Adding Multicasts failed\n",
5006 dev->name);
5007 return;
5008 }
5009 }
5010
5011
5012 i = 0;
5013 netdev_for_each_mc_addr(ha, dev) {
5014 mac_addr = 0;
5015 for (j = 0; j < ETH_ALEN; j++) {
5016 mac_addr |= ha->addr[j];
5017 mac_addr <<= 8;
5018 }
5019 mac_addr >>= 8;
5020 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5021 &bar0->rmac_addr_data0_mem);
5022 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5023 &bar0->rmac_addr_data1_mem);
5024 val64 = RMAC_ADDR_CMD_MEM_WE |
5025 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5026 RMAC_ADDR_CMD_MEM_OFFSET
5027 (i + config->mc_start_offset);
5028 writeq(val64, &bar0->rmac_addr_cmd_mem);
5029
5030
5031 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5032 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5033 S2IO_BIT_RESET)) {
5034 DBG_PRINT(ERR_DBG,
5035 "%s: Adding Multicasts failed\n",
5036 dev->name);
5037 return;
5038 }
5039 i++;
5040 }
5041 }
5042}
5043
5044
5045
5046
5047static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5048{
5049 int offset;
5050 u64 mac_addr = 0x0;
5051 struct config_param *config = &sp->config;
5052
5053
5054 for (offset = 0; offset < config->max_mc_addr; offset++) {
5055 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5056
5057 if (mac_addr == FAILURE)
5058 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5059 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5060 }
5061}
5062
5063
5064static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5065{
5066 int offset;
5067 struct config_param *config = &sp->config;
5068
5069 for (offset = 0; offset < config->max_mac_addr; offset++)
5070 do_s2io_prog_unicast(sp->dev,
5071 sp->def_mac_addr[offset].mac_addr);
5072
5073
5074 for (offset = config->mc_start_offset;
5075 offset < config->max_mc_addr; offset++)
5076 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5077}
5078
5079
5080static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5081{
5082 int i;
5083 u64 mac_addr = 0;
5084 struct config_param *config = &sp->config;
5085
5086 for (i = 0; i < ETH_ALEN; i++) {
5087 mac_addr <<= 8;
5088 mac_addr |= addr[i];
5089 }
5090 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5091 return SUCCESS;
5092
5093
5094 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5095 u64 tmp64;
5096 tmp64 = do_s2io_read_unicast_mc(sp, i);
5097 if (tmp64 == S2IO_DISABLE_MAC_ENTRY)
5098 break;
5099
5100 if (tmp64 == mac_addr)
5101 return SUCCESS;
5102 }
5103 if (i == config->max_mc_addr) {
5104 DBG_PRINT(ERR_DBG,
5105 "CAM full no space left for multicast MAC\n");
5106 return FAILURE;
5107 }
5108
5109 do_s2io_copy_mac_addr(sp, i, mac_addr);
5110
5111 return do_s2io_add_mac(sp, mac_addr, i);
5112}
5113
5114
5115static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5116{
5117 u64 val64;
5118 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5119
5120 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5121 &bar0->rmac_addr_data0_mem);
5122
5123 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5124 RMAC_ADDR_CMD_MEM_OFFSET(off);
5125 writeq(val64, &bar0->rmac_addr_cmd_mem);
5126
5127
5128 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5129 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5130 S2IO_BIT_RESET)) {
5131 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5132 return FAILURE;
5133 }
5134 return SUCCESS;
5135}
5136
5137static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5138{
5139 int offset;
5140 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5141 struct config_param *config = &sp->config;
5142
5143 for (offset = 1;
5144 offset < config->max_mc_addr; offset++) {
5145 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5146 if (tmp64 == addr) {
5147
5148 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5149 return FAILURE;
5150
5151 do_s2io_store_unicast_mc(sp);
5152 return SUCCESS;
5153 }
5154 }
5155 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5156 (unsigned long long)addr);
5157 return FAILURE;
5158}
5159
5160
5161static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5162{
5163 u64 tmp64, val64;
5164 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5165
5166
5167 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5168 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5169 writeq(val64, &bar0->rmac_addr_cmd_mem);
5170
5171
5172 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5173 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5174 S2IO_BIT_RESET)) {
5175 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5176 return FAILURE;
5177 }
5178 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5179
5180 return tmp64 >> 16;
5181}
5182
5183
5184
5185
5186
5187static int s2io_set_mac_addr(struct net_device *dev, void *p)
5188{
5189 struct sockaddr *addr = p;
5190
5191 if (!is_valid_ether_addr(addr->sa_data))
5192 return -EADDRNOTAVAIL;
5193
5194 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5195
5196
5197 return do_s2io_prog_unicast(dev, dev->dev_addr);
5198}
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5210{
5211 struct s2io_nic *sp = netdev_priv(dev);
5212 register u64 mac_addr = 0, perm_addr = 0;
5213 int i;
5214 u64 tmp64;
5215 struct config_param *config = &sp->config;
5216
5217
5218
5219
5220
5221
5222 for (i = 0; i < ETH_ALEN; i++) {
5223 mac_addr <<= 8;
5224 mac_addr |= addr[i];
5225 perm_addr <<= 8;
5226 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5227 }
5228
5229
5230 if (mac_addr == perm_addr)
5231 return SUCCESS;
5232
5233
5234 for (i = 1; i < config->max_mac_addr; i++) {
5235 tmp64 = do_s2io_read_unicast_mc(sp, i);
5236 if (tmp64 == S2IO_DISABLE_MAC_ENTRY)
5237 break;
5238
5239 if (tmp64 == mac_addr) {
5240 DBG_PRINT(INFO_DBG,
5241 "MAC addr:0x%llx already present in CAM\n",
5242 (unsigned long long)mac_addr);
5243 return SUCCESS;
5244 }
5245 }
5246 if (i == config->max_mac_addr) {
5247 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5248 return FAILURE;
5249 }
5250
5251 do_s2io_copy_mac_addr(sp, i, mac_addr);
5252
5253 return do_s2io_add_mac(sp, mac_addr, i);
5254}
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268static int
5269s2io_ethtool_set_link_ksettings(struct net_device *dev,
5270 const struct ethtool_link_ksettings *cmd)
5271{
5272 struct s2io_nic *sp = netdev_priv(dev);
5273 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5274 (cmd->base.speed != SPEED_10000) ||
5275 (cmd->base.duplex != DUPLEX_FULL))
5276 return -EINVAL;
5277 else {
5278 s2io_close(sp->dev);
5279 s2io_open(sp->dev);
5280 }
5281
5282 return 0;
5283}
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296static int
5297s2io_ethtool_get_link_ksettings(struct net_device *dev,
5298 struct ethtool_link_ksettings *cmd)
5299{
5300 struct s2io_nic *sp = netdev_priv(dev);
5301
5302 ethtool_link_ksettings_zero_link_mode(cmd, supported);
5303 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5304 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5305
5306 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5307 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5308 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5309
5310 cmd->base.port = PORT_FIBRE;
5311
5312 if (netif_carrier_ok(sp->dev)) {
5313 cmd->base.speed = SPEED_10000;
5314 cmd->base.duplex = DUPLEX_FULL;
5315 } else {
5316 cmd->base.speed = SPEED_UNKNOWN;
5317 cmd->base.duplex = DUPLEX_UNKNOWN;
5318 }
5319
5320 cmd->base.autoneg = AUTONEG_DISABLE;
5321 return 0;
5322}
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5336 struct ethtool_drvinfo *info)
5337{
5338 struct s2io_nic *sp = netdev_priv(dev);
5339
5340 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5341 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5342 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5343}
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358static void s2io_ethtool_gregs(struct net_device *dev,
5359 struct ethtool_regs *regs, void *space)
5360{
5361 int i;
5362 u64 reg;
5363 u8 *reg_space = (u8 *)space;
5364 struct s2io_nic *sp = netdev_priv(dev);
5365
5366 regs->len = XENA_REG_SPACE;
5367 regs->version = sp->pdev->subsystem_device;
5368
5369 for (i = 0; i < regs->len; i += 8) {
5370 reg = readq(sp->bar0 + i);
5371 memcpy((reg_space + i), ®, 8);
5372 }
5373}
5374
5375
5376
5377
5378static void s2io_set_led(struct s2io_nic *sp, bool on)
5379{
5380 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5381 u16 subid = sp->pdev->subsystem_device;
5382 u64 val64;
5383
5384 if ((sp->device_type == XFRAME_II_DEVICE) ||
5385 ((subid & 0xFF) >= 0x07)) {
5386 val64 = readq(&bar0->gpio_control);
5387 if (on)
5388 val64 |= GPIO_CTRL_GPIO_0;
5389 else
5390 val64 &= ~GPIO_CTRL_GPIO_0;
5391
5392 writeq(val64, &bar0->gpio_control);
5393 } else {
5394 val64 = readq(&bar0->adapter_control);
5395 if (on)
5396 val64 |= ADAPTER_LED_ON;
5397 else
5398 val64 &= ~ADAPTER_LED_ON;
5399
5400 writeq(val64, &bar0->adapter_control);
5401 }
5402
5403}
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417static int s2io_ethtool_set_led(struct net_device *dev,
5418 enum ethtool_phys_id_state state)
5419{
5420 struct s2io_nic *sp = netdev_priv(dev);
5421 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5422 u16 subid = sp->pdev->subsystem_device;
5423
5424 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5425 u64 val64 = readq(&bar0->adapter_control);
5426 if (!(val64 & ADAPTER_CNTL_EN)) {
5427 pr_err("Adapter Link down, cannot blink LED\n");
5428 return -EAGAIN;
5429 }
5430 }
5431
5432 switch (state) {
5433 case ETHTOOL_ID_ACTIVE:
5434 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5435 return 1;
5436
5437 case ETHTOOL_ID_ON:
5438 s2io_set_led(sp, true);
5439 break;
5440
5441 case ETHTOOL_ID_OFF:
5442 s2io_set_led(sp, false);
5443 break;
5444
5445 case ETHTOOL_ID_INACTIVE:
5446 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5447 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5448 }
5449
5450 return 0;
5451}
5452
5453static void s2io_ethtool_gringparam(struct net_device *dev,
5454 struct ethtool_ringparam *ering)
5455{
5456 struct s2io_nic *sp = netdev_priv(dev);
5457 int i, tx_desc_count = 0, rx_desc_count = 0;
5458
5459 if (sp->rxd_mode == RXD_MODE_1) {
5460 ering->rx_max_pending = MAX_RX_DESC_1;
5461 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5462 } else {
5463 ering->rx_max_pending = MAX_RX_DESC_2;
5464 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5465 }
5466
5467 ering->tx_max_pending = MAX_TX_DESC;
5468
5469 for (i = 0; i < sp->config.rx_ring_num; i++)
5470 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5471 ering->rx_pending = rx_desc_count;
5472 ering->rx_jumbo_pending = rx_desc_count;
5473
5474 for (i = 0; i < sp->config.tx_fifo_num; i++)
5475 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5476 ering->tx_pending = tx_desc_count;
5477 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5478}
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489static void s2io_ethtool_getpause_data(struct net_device *dev,
5490 struct ethtool_pauseparam *ep)
5491{
5492 u64 val64;
5493 struct s2io_nic *sp = netdev_priv(dev);
5494 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5495
5496 val64 = readq(&bar0->rmac_pause_cfg);
5497 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5498 ep->tx_pause = true;
5499 if (val64 & RMAC_PAUSE_RX_ENABLE)
5500 ep->rx_pause = true;
5501 ep->autoneg = false;
5502}
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515static int s2io_ethtool_setpause_data(struct net_device *dev,
5516 struct ethtool_pauseparam *ep)
5517{
5518 u64 val64;
5519 struct s2io_nic *sp = netdev_priv(dev);
5520 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5521
5522 val64 = readq(&bar0->rmac_pause_cfg);
5523 if (ep->tx_pause)
5524 val64 |= RMAC_PAUSE_GEN_ENABLE;
5525 else
5526 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5527 if (ep->rx_pause)
5528 val64 |= RMAC_PAUSE_RX_ENABLE;
5529 else
5530 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5531 writeq(val64, &bar0->rmac_pause_cfg);
5532 return 0;
5533}
5534
5535#define S2IO_DEV_ID 5
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5552{
5553 int ret = -1;
5554 u32 exit_cnt = 0;
5555 u64 val64;
5556 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5557
5558 if (sp->device_type == XFRAME_I_DEVICE) {
5559 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5560 I2C_CONTROL_ADDR(off) |
5561 I2C_CONTROL_BYTE_CNT(0x3) |
5562 I2C_CONTROL_READ |
5563 I2C_CONTROL_CNTL_START;
5564 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5565
5566 while (exit_cnt < 5) {
5567 val64 = readq(&bar0->i2c_control);
5568 if (I2C_CONTROL_CNTL_END(val64)) {
5569 *data = I2C_CONTROL_GET_DATA(val64);
5570 ret = 0;
5571 break;
5572 }
5573 msleep(50);
5574 exit_cnt++;
5575 }
5576 }
5577
5578 if (sp->device_type == XFRAME_II_DEVICE) {
5579 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5580 SPI_CONTROL_BYTECNT(0x3) |
5581 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5582 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5583 val64 |= SPI_CONTROL_REQ;
5584 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5585 while (exit_cnt < 5) {
5586 val64 = readq(&bar0->spi_control);
5587 if (val64 & SPI_CONTROL_NACK) {
5588 ret = 1;
5589 break;
5590 } else if (val64 & SPI_CONTROL_DONE) {
5591 *data = readq(&bar0->spi_data);
5592 *data &= 0xffffff;
5593 ret = 0;
5594 break;
5595 }
5596 msleep(50);
5597 exit_cnt++;
5598 }
5599 }
5600 return ret;
5601}
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5619{
5620 int exit_cnt = 0, ret = -1;
5621 u64 val64;
5622 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5623
5624 if (sp->device_type == XFRAME_I_DEVICE) {
5625 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5626 I2C_CONTROL_ADDR(off) |
5627 I2C_CONTROL_BYTE_CNT(cnt) |
5628 I2C_CONTROL_SET_DATA((u32)data) |
5629 I2C_CONTROL_CNTL_START;
5630 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5631
5632 while (exit_cnt < 5) {
5633 val64 = readq(&bar0->i2c_control);
5634 if (I2C_CONTROL_CNTL_END(val64)) {
5635 if (!(val64 & I2C_CONTROL_NACK))
5636 ret = 0;
5637 break;
5638 }
5639 msleep(50);
5640 exit_cnt++;
5641 }
5642 }
5643
5644 if (sp->device_type == XFRAME_II_DEVICE) {
5645 int write_cnt = (cnt == 8) ? 0 : cnt;
5646 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5647
5648 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5649 SPI_CONTROL_BYTECNT(write_cnt) |
5650 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5651 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5652 val64 |= SPI_CONTROL_REQ;
5653 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5654 while (exit_cnt < 5) {
5655 val64 = readq(&bar0->spi_control);
5656 if (val64 & SPI_CONTROL_NACK) {
5657 ret = 1;
5658 break;
5659 } else if (val64 & SPI_CONTROL_DONE) {
5660 ret = 0;
5661 break;
5662 }
5663 msleep(50);
5664 exit_cnt++;
5665 }
5666 }
5667 return ret;
5668}
5669static void s2io_vpd_read(struct s2io_nic *nic)
5670{
5671 u8 *vpd_data;
5672 u8 data;
5673 int i = 0, cnt, len, fail = 0;
5674 int vpd_addr = 0x80;
5675 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5676
5677 if (nic->device_type == XFRAME_II_DEVICE) {
5678 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5679 vpd_addr = 0x80;
5680 } else {
5681 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5682 vpd_addr = 0x50;
5683 }
5684 strcpy(nic->serial_num, "NOT AVAILABLE");
5685
5686 vpd_data = kmalloc(256, GFP_KERNEL);
5687 if (!vpd_data) {
5688 swstats->mem_alloc_fail_cnt++;
5689 return;
5690 }
5691 swstats->mem_allocated += 256;
5692
5693 for (i = 0; i < 256; i += 4) {
5694 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5695 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5696 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5697 for (cnt = 0; cnt < 5; cnt++) {
5698 msleep(2);
5699 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5700 if (data == 0x80)
5701 break;
5702 }
5703 if (cnt >= 5) {
5704 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5705 fail = 1;
5706 break;
5707 }
5708 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5709 (u32 *)&vpd_data[i]);
5710 }
5711
5712 if (!fail) {
5713
5714 for (cnt = 0; cnt < 252; cnt++) {
5715 if ((vpd_data[cnt] == 'S') &&
5716 (vpd_data[cnt+1] == 'N')) {
5717 len = vpd_data[cnt+2];
5718 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5719 memcpy(nic->serial_num,
5720 &vpd_data[cnt + 3],
5721 len);
5722 memset(nic->serial_num+len,
5723 0,
5724 VPD_STRING_LEN-len);
5725 break;
5726 }
5727 }
5728 }
5729 }
5730
5731 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5732 len = vpd_data[1];
5733 memcpy(nic->product_name, &vpd_data[3], len);
5734 nic->product_name[len] = 0;
5735 }
5736 kfree(vpd_data);
5737 swstats->mem_freed += 256;
5738}
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753static int s2io_ethtool_geeprom(struct net_device *dev,
5754 struct ethtool_eeprom *eeprom, u8 * data_buf)
5755{
5756 u32 i, valid;
5757 u64 data;
5758 struct s2io_nic *sp = netdev_priv(dev);
5759
5760 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5761
5762 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5763 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5764
5765 for (i = 0; i < eeprom->len; i += 4) {
5766 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5767 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5768 return -EFAULT;
5769 }
5770 valid = INV(data);
5771 memcpy((data_buf + i), &valid, 4);
5772 }
5773 return 0;
5774}
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789static int s2io_ethtool_seeprom(struct net_device *dev,
5790 struct ethtool_eeprom *eeprom,
5791 u8 *data_buf)
5792{
5793 int len = eeprom->len, cnt = 0;
5794 u64 valid = 0, data;
5795 struct s2io_nic *sp = netdev_priv(dev);
5796
5797 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5798 DBG_PRINT(ERR_DBG,
5799 "ETHTOOL_WRITE_EEPROM Err: "
5800 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5801 (sp->pdev->vendor | (sp->pdev->device << 16)),
5802 eeprom->magic);
5803 return -EFAULT;
5804 }
5805
5806 while (len) {
5807 data = (u32)data_buf[cnt] & 0x000000FF;
5808 if (data)
5809 valid = (u32)(data << 24);
5810 else
5811 valid = data;
5812
5813 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5814 DBG_PRINT(ERR_DBG,
5815 "ETHTOOL_WRITE_EEPROM Err: "
5816 "Cannot write into the specified offset\n");
5817 return -EFAULT;
5818 }
5819 cnt++;
5820 len--;
5821 }
5822
5823 return 0;
5824}
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5840{
5841 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5842 u64 val64 = 0, exp_val;
5843 int fail = 0;
5844
5845 val64 = readq(&bar0->pif_rd_swapper_fb);
5846 if (val64 != 0x123456789abcdefULL) {
5847 fail = 1;
5848 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5849 }
5850
5851 val64 = readq(&bar0->rmac_pause_cfg);
5852 if (val64 != 0xc000ffff00000000ULL) {
5853 fail = 1;
5854 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5855 }
5856
5857 val64 = readq(&bar0->rx_queue_cfg);
5858 if (sp->device_type == XFRAME_II_DEVICE)
5859 exp_val = 0x0404040404040404ULL;
5860 else
5861 exp_val = 0x0808080808080808ULL;
5862 if (val64 != exp_val) {
5863 fail = 1;
5864 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5865 }
5866
5867 val64 = readq(&bar0->xgxs_efifo_cfg);
5868 if (val64 != 0x000000001923141EULL) {
5869 fail = 1;
5870 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5871 }
5872
5873 val64 = 0x5A5A5A5A5A5A5A5AULL;
5874 writeq(val64, &bar0->xmsi_data);
5875 val64 = readq(&bar0->xmsi_data);
5876 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5877 fail = 1;
5878 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5879 }
5880
5881 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5882 writeq(val64, &bar0->xmsi_data);
5883 val64 = readq(&bar0->xmsi_data);
5884 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5885 fail = 1;
5886 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5887 }
5888
5889 *data = fail;
5890 return fail;
5891}
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5907{
5908 int fail = 0;
5909 u64 ret_data, org_4F0, org_7F0;
5910 u8 saved_4F0 = 0, saved_7F0 = 0;
5911 struct net_device *dev = sp->dev;
5912
5913
5914
5915
5916
5917 if (sp->device_type == XFRAME_I_DEVICE)
5918 if (!write_eeprom(sp, 0, 0, 3))
5919 fail = 1;
5920
5921
5922 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5923 saved_4F0 = 1;
5924 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5925 saved_7F0 = 1;
5926
5927
5928 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5929 fail = 1;
5930 if (read_eeprom(sp, 0x4F0, &ret_data))
5931 fail = 1;
5932
5933 if (ret_data != 0x012345) {
5934 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5935 "Data written %llx Data read %llx\n",
5936 dev->name, (unsigned long long)0x12345,
5937 (unsigned long long)ret_data);
5938 fail = 1;
5939 }
5940
5941
5942 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5943
5944
5945 if (sp->device_type == XFRAME_I_DEVICE)
5946 if (!write_eeprom(sp, 0x07C, 0, 3))
5947 fail = 1;
5948
5949
5950 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5951 fail = 1;
5952 if (read_eeprom(sp, 0x7F0, &ret_data))
5953 fail = 1;
5954
5955 if (ret_data != 0x012345) {
5956 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5957 "Data written %llx Data read %llx\n",
5958 dev->name, (unsigned long long)0x12345,
5959 (unsigned long long)ret_data);
5960 fail = 1;
5961 }
5962
5963
5964 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5965
5966 if (sp->device_type == XFRAME_I_DEVICE) {
5967
5968 if (!write_eeprom(sp, 0x080, 0, 3))
5969 fail = 1;
5970
5971
5972 if (!write_eeprom(sp, 0x0FC, 0, 3))
5973 fail = 1;
5974
5975
5976 if (!write_eeprom(sp, 0x100, 0, 3))
5977 fail = 1;
5978
5979
5980 if (!write_eeprom(sp, 0x4EC, 0, 3))
5981 fail = 1;
5982 }
5983
5984
5985 if (saved_4F0)
5986 write_eeprom(sp, 0x4F0, org_4F0, 3);
5987 if (saved_7F0)
5988 write_eeprom(sp, 0x7F0, org_7F0, 3);
5989
5990 *data = fail;
5991 return fail;
5992}
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6009{
6010 u8 bist = 0;
6011 int cnt = 0, ret = -1;
6012
6013 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6014 bist |= PCI_BIST_START;
6015 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6016
6017 while (cnt < 20) {
6018 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6019 if (!(bist & PCI_BIST_START)) {
6020 *data = (bist & PCI_BIST_CODE_MASK);
6021 ret = 0;
6022 break;
6023 }
6024 msleep(100);
6025 cnt++;
6026 }
6027
6028 return ret;
6029}
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6045{
6046 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6047 u64 val64;
6048
6049 val64 = readq(&bar0->adapter_status);
6050 if (!(LINK_IS_UP(val64)))
6051 *data = 1;
6052 else
6053 *data = 0;
6054
6055 return *data;
6056}
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070
6071static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6072{
6073 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6074 u64 val64;
6075 int cnt, iteration = 0, test_fail = 0;
6076
6077 val64 = readq(&bar0->adapter_control);
6078 val64 &= ~ADAPTER_ECC_EN;
6079 writeq(val64, &bar0->adapter_control);
6080
6081 val64 = readq(&bar0->mc_rldram_test_ctrl);
6082 val64 |= MC_RLDRAM_TEST_MODE;
6083 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6084
6085 val64 = readq(&bar0->mc_rldram_mrs);
6086 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6087 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6088
6089 val64 |= MC_RLDRAM_MRS_ENABLE;
6090 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6091
6092 while (iteration < 2) {
6093 val64 = 0x55555555aaaa0000ULL;
6094 if (iteration == 1)
6095 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6096 writeq(val64, &bar0->mc_rldram_test_d0);
6097
6098 val64 = 0xaaaa5a5555550000ULL;
6099 if (iteration == 1)
6100 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6101 writeq(val64, &bar0->mc_rldram_test_d1);
6102
6103 val64 = 0x55aaaaaaaa5a0000ULL;
6104 if (iteration == 1)
6105 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6106 writeq(val64, &bar0->mc_rldram_test_d2);
6107
6108 val64 = (u64) (0x0000003ffffe0100ULL);
6109 writeq(val64, &bar0->mc_rldram_test_add);
6110
6111 val64 = MC_RLDRAM_TEST_MODE |
6112 MC_RLDRAM_TEST_WRITE |
6113 MC_RLDRAM_TEST_GO;
6114 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6115
6116 for (cnt = 0; cnt < 5; cnt++) {
6117 val64 = readq(&bar0->mc_rldram_test_ctrl);
6118 if (val64 & MC_RLDRAM_TEST_DONE)
6119 break;
6120 msleep(200);
6121 }
6122
6123 if (cnt == 5)
6124 break;
6125
6126 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6127 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6128
6129 for (cnt = 0; cnt < 5; cnt++) {
6130 val64 = readq(&bar0->mc_rldram_test_ctrl);
6131 if (val64 & MC_RLDRAM_TEST_DONE)
6132 break;
6133 msleep(500);
6134 }
6135
6136 if (cnt == 5)
6137 break;
6138
6139 val64 = readq(&bar0->mc_rldram_test_ctrl);
6140 if (!(val64 & MC_RLDRAM_TEST_PASS))
6141 test_fail = 1;
6142
6143 iteration++;
6144 }
6145
6146 *data = test_fail;
6147
6148
6149 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6150
6151 return test_fail;
6152}
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168static void s2io_ethtool_test(struct net_device *dev,
6169 struct ethtool_test *ethtest,
6170 uint64_t *data)
6171{
6172 struct s2io_nic *sp = netdev_priv(dev);
6173 int orig_state = netif_running(sp->dev);
6174
6175 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6176
6177 if (orig_state)
6178 s2io_close(sp->dev);
6179
6180 if (s2io_register_test(sp, &data[0]))
6181 ethtest->flags |= ETH_TEST_FL_FAILED;
6182
6183 s2io_reset(sp);
6184
6185 if (s2io_rldram_test(sp, &data[3]))
6186 ethtest->flags |= ETH_TEST_FL_FAILED;
6187
6188 s2io_reset(sp);
6189
6190 if (s2io_eeprom_test(sp, &data[1]))
6191 ethtest->flags |= ETH_TEST_FL_FAILED;
6192
6193 if (s2io_bist_test(sp, &data[4]))
6194 ethtest->flags |= ETH_TEST_FL_FAILED;
6195
6196 if (orig_state)
6197 s2io_open(sp->dev);
6198
6199 data[2] = 0;
6200 } else {
6201
6202 if (!orig_state) {
6203 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6204 dev->name);
6205 data[0] = -1;
6206 data[1] = -1;
6207 data[2] = -1;
6208 data[3] = -1;
6209 data[4] = -1;
6210 }
6211
6212 if (s2io_link_test(sp, &data[2]))
6213 ethtest->flags |= ETH_TEST_FL_FAILED;
6214
6215 data[0] = 0;
6216 data[1] = 0;
6217 data[3] = 0;
6218 data[4] = 0;
6219 }
6220}
6221
6222static void s2io_get_ethtool_stats(struct net_device *dev,
6223 struct ethtool_stats *estats,
6224 u64 *tmp_stats)
6225{
6226 int i = 0, k;
6227 struct s2io_nic *sp = netdev_priv(dev);
6228 struct stat_block *stats = sp->mac_control.stats_info;
6229 struct swStat *swstats = &stats->sw_stat;
6230 struct xpakStat *xstats = &stats->xpak_stat;
6231
6232 s2io_updt_stats(sp);
6233 tmp_stats[i++] =
6234 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6235 le32_to_cpu(stats->tmac_frms);
6236 tmp_stats[i++] =
6237 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6238 le32_to_cpu(stats->tmac_data_octets);
6239 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6240 tmp_stats[i++] =
6241 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6242 le32_to_cpu(stats->tmac_mcst_frms);
6243 tmp_stats[i++] =
6244 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6245 le32_to_cpu(stats->tmac_bcst_frms);
6246 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6247 tmp_stats[i++] =
6248 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6249 le32_to_cpu(stats->tmac_ttl_octets);
6250 tmp_stats[i++] =
6251 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6252 le32_to_cpu(stats->tmac_ucst_frms);
6253 tmp_stats[i++] =
6254 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6255 le32_to_cpu(stats->tmac_nucst_frms);
6256 tmp_stats[i++] =
6257 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6258 le32_to_cpu(stats->tmac_any_err_frms);
6259 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6260 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6261 tmp_stats[i++] =
6262 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6263 le32_to_cpu(stats->tmac_vld_ip);
6264 tmp_stats[i++] =
6265 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6266 le32_to_cpu(stats->tmac_drop_ip);
6267 tmp_stats[i++] =
6268 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6269 le32_to_cpu(stats->tmac_icmp);
6270 tmp_stats[i++] =
6271 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6272 le32_to_cpu(stats->tmac_rst_tcp);
6273 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6274 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6275 le32_to_cpu(stats->tmac_udp);
6276 tmp_stats[i++] =
6277 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6278 le32_to_cpu(stats->rmac_vld_frms);
6279 tmp_stats[i++] =
6280 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6281 le32_to_cpu(stats->rmac_data_octets);
6282 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6283 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6284 tmp_stats[i++] =
6285 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6286 le32_to_cpu(stats->rmac_vld_mcst_frms);
6287 tmp_stats[i++] =
6288 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6289 le32_to_cpu(stats->rmac_vld_bcst_frms);
6290 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6291 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6292 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6293 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6294 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6295 tmp_stats[i++] =
6296 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6297 le32_to_cpu(stats->rmac_ttl_octets);
6298 tmp_stats[i++] =
6299 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6300 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6301 tmp_stats[i++] =
6302 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6303 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6304 tmp_stats[i++] =
6305 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6306 le32_to_cpu(stats->rmac_discarded_frms);
6307 tmp_stats[i++] =
6308 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6309 << 32 | le32_to_cpu(stats->rmac_drop_events);
6310 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6311 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6312 tmp_stats[i++] =
6313 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6314 le32_to_cpu(stats->rmac_usized_frms);
6315 tmp_stats[i++] =
6316 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6317 le32_to_cpu(stats->rmac_osized_frms);
6318 tmp_stats[i++] =
6319 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6320 le32_to_cpu(stats->rmac_frag_frms);
6321 tmp_stats[i++] =
6322 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6323 le32_to_cpu(stats->rmac_jabber_frms);
6324 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6325 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6326 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6327 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6328 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6329 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6330 tmp_stats[i++] =
6331 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6332 le32_to_cpu(stats->rmac_ip);
6333 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6334 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6335 tmp_stats[i++] =
6336 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6337 le32_to_cpu(stats->rmac_drop_ip);
6338 tmp_stats[i++] =
6339 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6340 le32_to_cpu(stats->rmac_icmp);
6341 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6342 tmp_stats[i++] =
6343 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6344 le32_to_cpu(stats->rmac_udp);
6345 tmp_stats[i++] =
6346 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6347 le32_to_cpu(stats->rmac_err_drp_udp);
6348 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6349 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6350 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6351 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6352 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6353 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6354 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6355 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6357 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6358 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6359 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6360 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6361 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6362 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6363 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6364 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6365 tmp_stats[i++] =
6366 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6367 le32_to_cpu(stats->rmac_pause_cnt);
6368 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6369 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6370 tmp_stats[i++] =
6371 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6372 le32_to_cpu(stats->rmac_accepted_ip);
6373 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6374 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6375 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6376 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6377 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6378 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6379 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6380 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6381 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6382 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6383 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6384 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6385 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6386 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6387 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6388 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6389 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6390 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6391 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6392
6393
6394 if (sp->device_type == XFRAME_II_DEVICE) {
6395 tmp_stats[i++] =
6396 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6397 tmp_stats[i++] =
6398 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6399 tmp_stats[i++] =
6400 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6401 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6402 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6403 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6404 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6405 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6406 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6407 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6408 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6409 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6410 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6411 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6412 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6413 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6414 }
6415
6416 tmp_stats[i++] = 0;
6417 tmp_stats[i++] = swstats->single_ecc_errs;
6418 tmp_stats[i++] = swstats->double_ecc_errs;
6419 tmp_stats[i++] = swstats->parity_err_cnt;
6420 tmp_stats[i++] = swstats->serious_err_cnt;
6421 tmp_stats[i++] = swstats->soft_reset_cnt;
6422 tmp_stats[i++] = swstats->fifo_full_cnt;
6423 for (k = 0; k < MAX_RX_RINGS; k++)
6424 tmp_stats[i++] = swstats->ring_full_cnt[k];
6425 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6426 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6427 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6428 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6429 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6430 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6431 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6432 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6433 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6434 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6435 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6436 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6437 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6438 tmp_stats[i++] = swstats->sending_both;
6439 tmp_stats[i++] = swstats->outof_sequence_pkts;
6440 tmp_stats[i++] = swstats->flush_max_pkts;
6441 if (swstats->num_aggregations) {
6442 u64 tmp = swstats->sum_avg_pkts_aggregated;
6443 int count = 0;
6444
6445
6446
6447
6448 while (tmp >= swstats->num_aggregations) {
6449 tmp -= swstats->num_aggregations;
6450 count++;
6451 }
6452 tmp_stats[i++] = count;
6453 } else
6454 tmp_stats[i++] = 0;
6455 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6456 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6457 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6458 tmp_stats[i++] = swstats->mem_allocated;
6459 tmp_stats[i++] = swstats->mem_freed;
6460 tmp_stats[i++] = swstats->link_up_cnt;
6461 tmp_stats[i++] = swstats->link_down_cnt;
6462 tmp_stats[i++] = swstats->link_up_time;
6463 tmp_stats[i++] = swstats->link_down_time;
6464
6465 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6466 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6467 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6468 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6469 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6470
6471 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6472 tmp_stats[i++] = swstats->rx_abort_cnt;
6473 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6474 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6475 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6476 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6477 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6478 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6479 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6480 tmp_stats[i++] = swstats->tda_err_cnt;
6481 tmp_stats[i++] = swstats->pfc_err_cnt;
6482 tmp_stats[i++] = swstats->pcc_err_cnt;
6483 tmp_stats[i++] = swstats->tti_err_cnt;
6484 tmp_stats[i++] = swstats->tpa_err_cnt;
6485 tmp_stats[i++] = swstats->sm_err_cnt;
6486 tmp_stats[i++] = swstats->lso_err_cnt;
6487 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6488 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6489 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6490 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6491 tmp_stats[i++] = swstats->rc_err_cnt;
6492 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6493 tmp_stats[i++] = swstats->rpa_err_cnt;
6494 tmp_stats[i++] = swstats->rda_err_cnt;
6495 tmp_stats[i++] = swstats->rti_err_cnt;
6496 tmp_stats[i++] = swstats->mc_err_cnt;
6497}
6498
6499static int s2io_ethtool_get_regs_len(struct net_device *dev)
6500{
6501 return XENA_REG_SPACE;
6502}
6503
6504
6505static int s2io_get_eeprom_len(struct net_device *dev)
6506{
6507 return XENA_EEPROM_SPACE;
6508}
6509
6510static int s2io_get_sset_count(struct net_device *dev, int sset)
6511{
6512 struct s2io_nic *sp = netdev_priv(dev);
6513
6514 switch (sset) {
6515 case ETH_SS_TEST:
6516 return S2IO_TEST_LEN;
6517 case ETH_SS_STATS:
6518 switch (sp->device_type) {
6519 case XFRAME_I_DEVICE:
6520 return XFRAME_I_STAT_LEN;
6521 case XFRAME_II_DEVICE:
6522 return XFRAME_II_STAT_LEN;
6523 default:
6524 return 0;
6525 }
6526 default:
6527 return -EOPNOTSUPP;
6528 }
6529}
6530
6531static void s2io_ethtool_get_strings(struct net_device *dev,
6532 u32 stringset, u8 *data)
6533{
6534 int stat_size = 0;
6535 struct s2io_nic *sp = netdev_priv(dev);
6536
6537 switch (stringset) {
6538 case ETH_SS_TEST:
6539 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6540 break;
6541 case ETH_SS_STATS:
6542 stat_size = sizeof(ethtool_xena_stats_keys);
6543 memcpy(data, ðtool_xena_stats_keys, stat_size);
6544 if (sp->device_type == XFRAME_II_DEVICE) {
6545 memcpy(data + stat_size,
6546 ðtool_enhanced_stats_keys,
6547 sizeof(ethtool_enhanced_stats_keys));
6548 stat_size += sizeof(ethtool_enhanced_stats_keys);
6549 }
6550
6551 memcpy(data + stat_size, ðtool_driver_stats_keys,
6552 sizeof(ethtool_driver_stats_keys));
6553 }
6554}
6555
6556static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6557{
6558 struct s2io_nic *sp = netdev_priv(dev);
6559 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6560
6561 if (changed && netif_running(dev)) {
6562 int rc;
6563
6564 s2io_stop_all_tx_queue(sp);
6565 s2io_card_down(sp);
6566 dev->features = features;
6567 rc = s2io_card_up(sp);
6568 if (rc)
6569 s2io_reset(sp);
6570 else
6571 s2io_start_all_tx_queue(sp);
6572
6573 return rc ? rc : 1;
6574 }
6575
6576 return 0;
6577}
6578
6579static const struct ethtool_ops netdev_ethtool_ops = {
6580 .get_drvinfo = s2io_ethtool_gdrvinfo,
6581 .get_regs_len = s2io_ethtool_get_regs_len,
6582 .get_regs = s2io_ethtool_gregs,
6583 .get_link = ethtool_op_get_link,
6584 .get_eeprom_len = s2io_get_eeprom_len,
6585 .get_eeprom = s2io_ethtool_geeprom,
6586 .set_eeprom = s2io_ethtool_seeprom,
6587 .get_ringparam = s2io_ethtool_gringparam,
6588 .get_pauseparam = s2io_ethtool_getpause_data,
6589 .set_pauseparam = s2io_ethtool_setpause_data,
6590 .self_test = s2io_ethtool_test,
6591 .get_strings = s2io_ethtool_get_strings,
6592 .set_phys_id = s2io_ethtool_set_led,
6593 .get_ethtool_stats = s2io_get_ethtool_stats,
6594 .get_sset_count = s2io_get_sset_count,
6595 .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6596 .set_link_ksettings = s2io_ethtool_set_link_ksettings,
6597};
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6612{
6613 return -EOPNOTSUPP;
6614}
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6628{
6629 struct s2io_nic *sp = netdev_priv(dev);
6630 int ret = 0;
6631
6632 dev->mtu = new_mtu;
6633 if (netif_running(dev)) {
6634 s2io_stop_all_tx_queue(sp);
6635 s2io_card_down(sp);
6636 ret = s2io_card_up(sp);
6637 if (ret) {
6638 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6639 __func__);
6640 return ret;
6641 }
6642 s2io_wake_all_tx_queue(sp);
6643 } else {
6644 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6645 u64 val64 = new_mtu;
6646
6647 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6648 }
6649
6650 return ret;
6651}
6652
6653
6654
6655
6656
6657
6658
6659static void s2io_set_link(struct work_struct *work)
6660{
6661 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6662 set_link_task);
6663 struct net_device *dev = nic->dev;
6664 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6665 register u64 val64;
6666 u16 subid;
6667
6668 rtnl_lock();
6669
6670 if (!netif_running(dev))
6671 goto out_unlock;
6672
6673 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6674
6675 goto out_unlock;
6676 }
6677
6678 subid = nic->pdev->subsystem_device;
6679 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6680
6681
6682
6683
6684 msleep(100);
6685 }
6686
6687 val64 = readq(&bar0->adapter_status);
6688 if (LINK_IS_UP(val64)) {
6689 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6690 if (verify_xena_quiescence(nic)) {
6691 val64 = readq(&bar0->adapter_control);
6692 val64 |= ADAPTER_CNTL_EN;
6693 writeq(val64, &bar0->adapter_control);
6694 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6695 nic->device_type, subid)) {
6696 val64 = readq(&bar0->gpio_control);
6697 val64 |= GPIO_CTRL_GPIO_0;
6698 writeq(val64, &bar0->gpio_control);
6699 val64 = readq(&bar0->gpio_control);
6700 } else {
6701 val64 |= ADAPTER_LED_ON;
6702 writeq(val64, &bar0->adapter_control);
6703 }
6704 nic->device_enabled_once = true;
6705 } else {
6706 DBG_PRINT(ERR_DBG,
6707 "%s: Error: device is not Quiescent\n",
6708 dev->name);
6709 s2io_stop_all_tx_queue(nic);
6710 }
6711 }
6712 val64 = readq(&bar0->adapter_control);
6713 val64 |= ADAPTER_LED_ON;
6714 writeq(val64, &bar0->adapter_control);
6715 s2io_link(nic, LINK_UP);
6716 } else {
6717 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6718 subid)) {
6719 val64 = readq(&bar0->gpio_control);
6720 val64 &= ~GPIO_CTRL_GPIO_0;
6721 writeq(val64, &bar0->gpio_control);
6722 val64 = readq(&bar0->gpio_control);
6723 }
6724
6725 val64 = readq(&bar0->adapter_control);
6726 val64 = val64 & (~ADAPTER_LED_ON);
6727 writeq(val64, &bar0->adapter_control);
6728 s2io_link(nic, LINK_DOWN);
6729 }
6730 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6731
6732out_unlock:
6733 rtnl_unlock();
6734}
6735
6736static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6737 struct buffAdd *ba,
6738 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6739 u64 *temp2, int size)
6740{
6741 struct net_device *dev = sp->dev;
6742 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6743
6744 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6745 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6746
6747 if (*skb) {
6748 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6749
6750
6751
6752
6753
6754 rxdp1->Buffer0_ptr = *temp0;
6755 } else {
6756 *skb = netdev_alloc_skb(dev, size);
6757 if (!(*skb)) {
6758 DBG_PRINT(INFO_DBG,
6759 "%s: Out of memory to allocate %s\n",
6760 dev->name, "1 buf mode SKBs");
6761 stats->mem_alloc_fail_cnt++;
6762 return -ENOMEM ;
6763 }
6764 stats->mem_allocated += (*skb)->truesize;
6765
6766
6767
6768
6769 rxdp1->Buffer0_ptr = *temp0 =
6770 dma_map_single(&sp->pdev->dev, (*skb)->data,
6771 size - NET_IP_ALIGN,
6772 DMA_FROM_DEVICE);
6773 if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6774 goto memalloc_failed;
6775 rxdp->Host_Control = (unsigned long) (*skb);
6776 }
6777 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6778 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6779
6780 if (*skb) {
6781 rxdp3->Buffer2_ptr = *temp2;
6782 rxdp3->Buffer0_ptr = *temp0;
6783 rxdp3->Buffer1_ptr = *temp1;
6784 } else {
6785 *skb = netdev_alloc_skb(dev, size);
6786 if (!(*skb)) {
6787 DBG_PRINT(INFO_DBG,
6788 "%s: Out of memory to allocate %s\n",
6789 dev->name,
6790 "2 buf mode SKBs");
6791 stats->mem_alloc_fail_cnt++;
6792 return -ENOMEM;
6793 }
6794 stats->mem_allocated += (*skb)->truesize;
6795 rxdp3->Buffer2_ptr = *temp2 =
6796 dma_map_single(&sp->pdev->dev, (*skb)->data,
6797 dev->mtu + 4, DMA_FROM_DEVICE);
6798 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6799 goto memalloc_failed;
6800 rxdp3->Buffer0_ptr = *temp0 =
6801 dma_map_single(&sp->pdev->dev, ba->ba_0,
6802 BUF0_LEN, DMA_FROM_DEVICE);
6803 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6804 dma_unmap_single(&sp->pdev->dev,
6805 (dma_addr_t)rxdp3->Buffer2_ptr,
6806 dev->mtu + 4,
6807 DMA_FROM_DEVICE);
6808 goto memalloc_failed;
6809 }
6810 rxdp->Host_Control = (unsigned long) (*skb);
6811
6812
6813 rxdp3->Buffer1_ptr = *temp1 =
6814 dma_map_single(&sp->pdev->dev, ba->ba_1,
6815 BUF1_LEN, DMA_FROM_DEVICE);
6816 if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6817 dma_unmap_single(&sp->pdev->dev,
6818 (dma_addr_t)rxdp3->Buffer0_ptr,
6819 BUF0_LEN, DMA_FROM_DEVICE);
6820 dma_unmap_single(&sp->pdev->dev,
6821 (dma_addr_t)rxdp3->Buffer2_ptr,
6822 dev->mtu + 4,
6823 DMA_FROM_DEVICE);
6824 goto memalloc_failed;
6825 }
6826 }
6827 }
6828 return 0;
6829
6830memalloc_failed:
6831 stats->pci_map_fail_cnt++;
6832 stats->mem_freed += (*skb)->truesize;
6833 dev_kfree_skb(*skb);
6834 return -ENOMEM;
6835}
6836
6837static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6838 int size)
6839{
6840 struct net_device *dev = sp->dev;
6841 if (sp->rxd_mode == RXD_MODE_1) {
6842 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6843 } else if (sp->rxd_mode == RXD_MODE_3B) {
6844 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6845 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6846 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6847 }
6848}
6849
6850static int rxd_owner_bit_reset(struct s2io_nic *sp)
6851{
6852 int i, j, k, blk_cnt = 0, size;
6853 struct config_param *config = &sp->config;
6854 struct mac_info *mac_control = &sp->mac_control;
6855 struct net_device *dev = sp->dev;
6856 struct RxD_t *rxdp = NULL;
6857 struct sk_buff *skb = NULL;
6858 struct buffAdd *ba = NULL;
6859 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6860
6861
6862 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6863 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6864 if (sp->rxd_mode == RXD_MODE_1)
6865 size += NET_IP_ALIGN;
6866 else if (sp->rxd_mode == RXD_MODE_3B)
6867 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6868
6869 for (i = 0; i < config->rx_ring_num; i++) {
6870 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6871 struct ring_info *ring = &mac_control->rings[i];
6872
6873 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6874
6875 for (j = 0; j < blk_cnt; j++) {
6876 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6877 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6878 if (sp->rxd_mode == RXD_MODE_3B)
6879 ba = &ring->ba[j][k];
6880 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6881 &temp0_64,
6882 &temp1_64,
6883 &temp2_64,
6884 size) == -ENOMEM) {
6885 return 0;
6886 }
6887
6888 set_rxd_buffer_size(sp, rxdp, size);
6889 dma_wmb();
6890
6891 rxdp->Control_1 |= RXD_OWN_XENA;
6892 }
6893 }
6894 }
6895 return 0;
6896
6897}
6898
6899static int s2io_add_isr(struct s2io_nic *sp)
6900{
6901 int ret = 0;
6902 struct net_device *dev = sp->dev;
6903 int err = 0;
6904
6905 if (sp->config.intr_type == MSI_X)
6906 ret = s2io_enable_msi_x(sp);
6907 if (ret) {
6908 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6909 sp->config.intr_type = INTA;
6910 }
6911
6912
6913
6914
6915
6916 store_xmsi_data(sp);
6917
6918
6919 if (sp->config.intr_type == MSI_X) {
6920 int i, msix_rx_cnt = 0;
6921
6922 for (i = 0; i < sp->num_entries; i++) {
6923 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6924 if (sp->s2io_entries[i].type ==
6925 MSIX_RING_TYPE) {
6926 snprintf(sp->desc[i],
6927 sizeof(sp->desc[i]),
6928 "%s:MSI-X-%d-RX",
6929 dev->name, i);
6930 err = request_irq(sp->entries[i].vector,
6931 s2io_msix_ring_handle,
6932 0,
6933 sp->desc[i],
6934 sp->s2io_entries[i].arg);
6935 } else if (sp->s2io_entries[i].type ==
6936 MSIX_ALARM_TYPE) {
6937 snprintf(sp->desc[i],
6938 sizeof(sp->desc[i]),
6939 "%s:MSI-X-%d-TX",
6940 dev->name, i);
6941 err = request_irq(sp->entries[i].vector,
6942 s2io_msix_fifo_handle,
6943 0,
6944 sp->desc[i],
6945 sp->s2io_entries[i].arg);
6946
6947 }
6948
6949 if (!(sp->msix_info[i].addr &&
6950 sp->msix_info[i].data)) {
6951 DBG_PRINT(ERR_DBG,
6952 "%s @Addr:0x%llx Data:0x%llx\n",
6953 sp->desc[i],
6954 (unsigned long long)
6955 sp->msix_info[i].addr,
6956 (unsigned long long)
6957 ntohl(sp->msix_info[i].data));
6958 } else
6959 msix_rx_cnt++;
6960 if (err) {
6961 remove_msix_isr(sp);
6962
6963 DBG_PRINT(ERR_DBG,
6964 "%s:MSI-X-%d registration "
6965 "failed\n", dev->name, i);
6966
6967 DBG_PRINT(ERR_DBG,
6968 "%s: Defaulting to INTA\n",
6969 dev->name);
6970 sp->config.intr_type = INTA;
6971 break;
6972 }
6973 sp->s2io_entries[i].in_use =
6974 MSIX_REGISTERED_SUCCESS;
6975 }
6976 }
6977 if (!err) {
6978 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6979 DBG_PRINT(INFO_DBG,
6980 "MSI-X-TX entries enabled through alarm vector\n");
6981 }
6982 }
6983 if (sp->config.intr_type == INTA) {
6984 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6985 sp->name, dev);
6986 if (err) {
6987 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6988 dev->name);
6989 return -1;
6990 }
6991 }
6992 return 0;
6993}
6994
6995static void s2io_rem_isr(struct s2io_nic *sp)
6996{
6997 if (sp->config.intr_type == MSI_X)
6998 remove_msix_isr(sp);
6999 else
7000 remove_inta_isr(sp);
7001}
7002
7003static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7004{
7005 int cnt = 0;
7006 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7007 register u64 val64 = 0;
7008 struct config_param *config;
7009 config = &sp->config;
7010
7011 if (!is_s2io_card_up(sp))
7012 return;
7013
7014 del_timer_sync(&sp->alarm_timer);
7015
7016 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7017 msleep(50);
7018 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7019
7020
7021 if (sp->config.napi) {
7022 int off = 0;
7023 if (config->intr_type == MSI_X) {
7024 for (; off < sp->config.rx_ring_num; off++)
7025 napi_disable(&sp->mac_control.rings[off].napi);
7026 }
7027 else
7028 napi_disable(&sp->napi);
7029 }
7030
7031
7032 if (do_io)
7033 stop_nic(sp);
7034
7035 s2io_rem_isr(sp);
7036
7037
7038 s2io_link(sp, LINK_DOWN);
7039
7040
7041 while (do_io) {
7042
7043
7044
7045
7046
7047
7048
7049 rxd_owner_bit_reset(sp);
7050
7051 val64 = readq(&bar0->adapter_status);
7052 if (verify_xena_quiescence(sp)) {
7053 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7054 break;
7055 }
7056
7057 msleep(50);
7058 cnt++;
7059 if (cnt == 10) {
7060 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7061 "adapter status reads 0x%llx\n",
7062 (unsigned long long)val64);
7063 break;
7064 }
7065 }
7066 if (do_io)
7067 s2io_reset(sp);
7068
7069
7070 free_tx_buffers(sp);
7071
7072
7073 free_rx_buffers(sp);
7074
7075 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7076}
7077
7078static void s2io_card_down(struct s2io_nic *sp)
7079{
7080 do_s2io_card_down(sp, 1);
7081}
7082
7083static int s2io_card_up(struct s2io_nic *sp)
7084{
7085 int i, ret = 0;
7086 struct config_param *config;
7087 struct mac_info *mac_control;
7088 struct net_device *dev = sp->dev;
7089 u16 interruptible;
7090
7091
7092 ret = init_nic(sp);
7093 if (ret != 0) {
7094 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7095 dev->name);
7096 if (ret != -EIO)
7097 s2io_reset(sp);
7098 return ret;
7099 }
7100
7101
7102
7103
7104
7105 config = &sp->config;
7106 mac_control = &sp->mac_control;
7107
7108 for (i = 0; i < config->rx_ring_num; i++) {
7109 struct ring_info *ring = &mac_control->rings[i];
7110
7111 ring->mtu = dev->mtu;
7112 ring->lro = !!(dev->features & NETIF_F_LRO);
7113 ret = fill_rx_buffers(sp, ring, 1);
7114 if (ret) {
7115 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7116 dev->name);
7117 s2io_reset(sp);
7118 free_rx_buffers(sp);
7119 return -ENOMEM;
7120 }
7121 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7122 ring->rx_bufs_left);
7123 }
7124
7125
7126 if (config->napi) {
7127 if (config->intr_type == MSI_X) {
7128 for (i = 0; i < sp->config.rx_ring_num; i++)
7129 napi_enable(&sp->mac_control.rings[i].napi);
7130 } else {
7131 napi_enable(&sp->napi);
7132 }
7133 }
7134
7135
7136 if (sp->promisc_flg)
7137 sp->promisc_flg = 0;
7138 if (sp->m_cast_flg) {
7139 sp->m_cast_flg = 0;
7140 sp->all_multi_pos = 0;
7141 }
7142
7143
7144 s2io_set_multicast(dev);
7145
7146 if (dev->features & NETIF_F_LRO) {
7147
7148 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7149
7150 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7151 sp->lro_max_aggr_per_sess = lro_max_pkts;
7152 }
7153
7154
7155 if (start_nic(sp)) {
7156 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7157 s2io_reset(sp);
7158 free_rx_buffers(sp);
7159 return -ENODEV;
7160 }
7161
7162
7163 if (s2io_add_isr(sp) != 0) {
7164 if (sp->config.intr_type == MSI_X)
7165 s2io_rem_isr(sp);
7166 s2io_reset(sp);
7167 free_rx_buffers(sp);
7168 return -ENODEV;
7169 }
7170
7171 timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7172 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7173
7174 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7175
7176
7177 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7178 if (sp->config.intr_type != INTA) {
7179 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7180 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7181 } else {
7182 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7183 interruptible |= TX_PIC_INTR;
7184 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7185 }
7186
7187 return 0;
7188}
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200static void s2io_restart_nic(struct work_struct *work)
7201{
7202 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7203 struct net_device *dev = sp->dev;
7204
7205 rtnl_lock();
7206
7207 if (!netif_running(dev))
7208 goto out_unlock;
7209
7210 s2io_card_down(sp);
7211 if (s2io_card_up(sp)) {
7212 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7213 }
7214 s2io_wake_all_tx_queue(sp);
7215 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7216out_unlock:
7217 rtnl_unlock();
7218}
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7235{
7236 struct s2io_nic *sp = netdev_priv(dev);
7237 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7238
7239 if (netif_carrier_ok(dev)) {
7240 swstats->watchdog_timer_cnt++;
7241 schedule_work(&sp->rst_timer_task);
7242 swstats->soft_reset_cnt++;
7243 }
7244}
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7261{
7262 struct s2io_nic *sp = ring_data->nic;
7263 struct net_device *dev = ring_data->dev;
7264 struct sk_buff *skb = (struct sk_buff *)
7265 ((unsigned long)rxdp->Host_Control);
7266 int ring_no = ring_data->ring_no;
7267 u16 l3_csum, l4_csum;
7268 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7269 struct lro *lro;
7270 u8 err_mask;
7271 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7272
7273 skb->dev = dev;
7274
7275 if (err) {
7276
7277 if (err & 0x1)
7278 swstats->parity_err_cnt++;
7279
7280 err_mask = err >> 48;
7281 switch (err_mask) {
7282 case 1:
7283 swstats->rx_parity_err_cnt++;
7284 break;
7285
7286 case 2:
7287 swstats->rx_abort_cnt++;
7288 break;
7289
7290 case 3:
7291 swstats->rx_parity_abort_cnt++;
7292 break;
7293
7294 case 4:
7295 swstats->rx_rda_fail_cnt++;
7296 break;
7297
7298 case 5:
7299 swstats->rx_unkn_prot_cnt++;
7300 break;
7301
7302 case 6:
7303 swstats->rx_fcs_err_cnt++;
7304 break;
7305
7306 case 7:
7307 swstats->rx_buf_size_err_cnt++;
7308 break;
7309
7310 case 8:
7311 swstats->rx_rxd_corrupt_cnt++;
7312 break;
7313
7314 case 15:
7315 swstats->rx_unkn_err_cnt++;
7316 break;
7317 }
7318
7319
7320
7321
7322
7323
7324
7325 if (err_mask != 0x5) {
7326 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7327 dev->name, err_mask);
7328 dev->stats.rx_crc_errors++;
7329 swstats->mem_freed
7330 += skb->truesize;
7331 dev_kfree_skb(skb);
7332 ring_data->rx_bufs_left -= 1;
7333 rxdp->Host_Control = 0;
7334 return 0;
7335 }
7336 }
7337
7338 rxdp->Host_Control = 0;
7339 if (sp->rxd_mode == RXD_MODE_1) {
7340 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7341
7342 skb_put(skb, len);
7343 } else if (sp->rxd_mode == RXD_MODE_3B) {
7344 int get_block = ring_data->rx_curr_get_info.block_index;
7345 int get_off = ring_data->rx_curr_get_info.offset;
7346 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7347 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7348 unsigned char *buff = skb_push(skb, buf0_len);
7349
7350 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7351 memcpy(buff, ba->ba_0, buf0_len);
7352 skb_put(skb, buf2_len);
7353 }
7354
7355 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7356 ((!ring_data->lro) ||
7357 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7358 (dev->features & NETIF_F_RXCSUM)) {
7359 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7360 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7361 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7362
7363
7364
7365
7366
7367 skb->ip_summed = CHECKSUM_UNNECESSARY;
7368 if (ring_data->lro) {
7369 u32 tcp_len = 0;
7370 u8 *tcp;
7371 int ret = 0;
7372
7373 ret = s2io_club_tcp_session(ring_data,
7374 skb->data, &tcp,
7375 &tcp_len, &lro,
7376 rxdp, sp);
7377 switch (ret) {
7378 case 3:
7379 lro->parent = skb;
7380 goto aggregate;
7381 case 1:
7382 lro_append_pkt(sp, lro, skb, tcp_len);
7383 goto aggregate;
7384 case 4:
7385 lro_append_pkt(sp, lro, skb, tcp_len);
7386 queue_rx_frame(lro->parent,
7387 lro->vlan_tag);
7388 clear_lro_session(lro);
7389 swstats->flush_max_pkts++;
7390 goto aggregate;
7391 case 2:
7392 lro->parent->data_len = lro->frags_len;
7393 swstats->sending_both++;
7394 queue_rx_frame(lro->parent,
7395 lro->vlan_tag);
7396 clear_lro_session(lro);
7397 goto send_up;
7398 case 0:
7399 case -1:
7400 case 5:
7401
7402
7403
7404 break;
7405 default:
7406 DBG_PRINT(ERR_DBG,
7407 "%s: Samadhana!!\n",
7408 __func__);
7409 BUG();
7410 }
7411 }
7412 } else {
7413
7414
7415
7416
7417 skb_checksum_none_assert(skb);
7418 }
7419 } else
7420 skb_checksum_none_assert(skb);
7421
7422 swstats->mem_freed += skb->truesize;
7423send_up:
7424 skb_record_rx_queue(skb, ring_no);
7425 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7426aggregate:
7427 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7428 return SUCCESS;
7429}
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444static void s2io_link(struct s2io_nic *sp, int link)
7445{
7446 struct net_device *dev = sp->dev;
7447 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7448
7449 if (link != sp->last_link_state) {
7450 init_tti(sp, link);
7451 if (link == LINK_DOWN) {
7452 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7453 s2io_stop_all_tx_queue(sp);
7454 netif_carrier_off(dev);
7455 if (swstats->link_up_cnt)
7456 swstats->link_up_time =
7457 jiffies - sp->start_time;
7458 swstats->link_down_cnt++;
7459 } else {
7460 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7461 if (swstats->link_down_cnt)
7462 swstats->link_down_time =
7463 jiffies - sp->start_time;
7464 swstats->link_up_cnt++;
7465 netif_carrier_on(dev);
7466 s2io_wake_all_tx_queue(sp);
7467 }
7468 }
7469 sp->last_link_state = link;
7470 sp->start_time = jiffies;
7471}
7472
7473
7474
7475
7476
7477
7478
7479
7480
7481
7482
7483
7484static void s2io_init_pci(struct s2io_nic *sp)
7485{
7486 u16 pci_cmd = 0, pcix_cmd = 0;
7487
7488
7489 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7490 &(pcix_cmd));
7491 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7492 (pcix_cmd | 1));
7493 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7494 &(pcix_cmd));
7495
7496
7497 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7498 pci_write_config_word(sp->pdev, PCI_COMMAND,
7499 (pci_cmd | PCI_COMMAND_PARITY));
7500 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7501}
7502
7503static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7504 u8 *dev_multiq)
7505{
7506 int i;
7507
7508 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7509 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7510 "(%d) not supported\n", tx_fifo_num);
7511
7512 if (tx_fifo_num < 1)
7513 tx_fifo_num = 1;
7514 else
7515 tx_fifo_num = MAX_TX_FIFOS;
7516
7517 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7518 }
7519
7520 if (multiq)
7521 *dev_multiq = multiq;
7522
7523 if (tx_steering_type && (1 == tx_fifo_num)) {
7524 if (tx_steering_type != TX_DEFAULT_STEERING)
7525 DBG_PRINT(ERR_DBG,
7526 "Tx steering is not supported with "
7527 "one fifo. Disabling Tx steering.\n");
7528 tx_steering_type = NO_STEERING;
7529 }
7530
7531 if ((tx_steering_type < NO_STEERING) ||
7532 (tx_steering_type > TX_DEFAULT_STEERING)) {
7533 DBG_PRINT(ERR_DBG,
7534 "Requested transmit steering not supported\n");
7535 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7536 tx_steering_type = NO_STEERING;
7537 }
7538
7539 if (rx_ring_num > MAX_RX_RINGS) {
7540 DBG_PRINT(ERR_DBG,
7541 "Requested number of rx rings not supported\n");
7542 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7543 MAX_RX_RINGS);
7544 rx_ring_num = MAX_RX_RINGS;
7545 }
7546
7547 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7548 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7549 "Defaulting to INTA\n");
7550 *dev_intr_type = INTA;
7551 }
7552
7553 if ((*dev_intr_type == MSI_X) &&
7554 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7555 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7556 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7557 "Defaulting to INTA\n");
7558 *dev_intr_type = INTA;
7559 }
7560
7561 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7562 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7563 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7564 rx_ring_mode = 1;
7565 }
7566
7567 for (i = 0; i < MAX_RX_RINGS; i++)
7568 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7569 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7570 "supported\nDefaulting to %d\n",
7571 MAX_RX_BLOCKS_PER_RING);
7572 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7573 }
7574
7575 return SUCCESS;
7576}
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7589{
7590 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7591 register u64 val64 = 0;
7592
7593 if (ds_codepoint > 63)
7594 return FAILURE;
7595
7596 val64 = RTS_DS_MEM_DATA(ring);
7597 writeq(val64, &bar0->rts_ds_mem_data);
7598
7599 val64 = RTS_DS_MEM_CTRL_WE |
7600 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7601 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7602
7603 writeq(val64, &bar0->rts_ds_mem_ctrl);
7604
7605 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7606 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7607 S2IO_BIT_RESET);
7608}
7609
7610static const struct net_device_ops s2io_netdev_ops = {
7611 .ndo_open = s2io_open,
7612 .ndo_stop = s2io_close,
7613 .ndo_get_stats = s2io_get_stats,
7614 .ndo_start_xmit = s2io_xmit,
7615 .ndo_validate_addr = eth_validate_addr,
7616 .ndo_set_rx_mode = s2io_set_multicast,
7617 .ndo_do_ioctl = s2io_ioctl,
7618 .ndo_set_mac_address = s2io_set_mac_addr,
7619 .ndo_change_mtu = s2io_change_mtu,
7620 .ndo_set_features = s2io_set_features,
7621 .ndo_tx_timeout = s2io_tx_watchdog,
7622#ifdef CONFIG_NET_POLL_CONTROLLER
7623 .ndo_poll_controller = s2io_netpoll,
7624#endif
7625};
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636
7637
7638
7639
7640
7641static int
7642s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7643{
7644 struct s2io_nic *sp;
7645 struct net_device *dev;
7646 int i, j, ret;
7647 int dma_flag = false;
7648 u32 mac_up, mac_down;
7649 u64 val64 = 0, tmp64 = 0;
7650 struct XENA_dev_config __iomem *bar0 = NULL;
7651 u16 subid;
7652 struct config_param *config;
7653 struct mac_info *mac_control;
7654 int mode;
7655 u8 dev_intr_type = intr_type;
7656 u8 dev_multiq = 0;
7657
7658 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7659 if (ret)
7660 return ret;
7661
7662 ret = pci_enable_device(pdev);
7663 if (ret) {
7664 DBG_PRINT(ERR_DBG,
7665 "%s: pci_enable_device failed\n", __func__);
7666 return ret;
7667 }
7668
7669 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7670 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7671 dma_flag = true;
7672 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7673 DBG_PRINT(ERR_DBG,
7674 "Unable to obtain 64bit DMA for coherent allocations\n");
7675 pci_disable_device(pdev);
7676 return -ENOMEM;
7677 }
7678 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7679 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7680 } else {
7681 pci_disable_device(pdev);
7682 return -ENOMEM;
7683 }
7684 ret = pci_request_regions(pdev, s2io_driver_name);
7685 if (ret) {
7686 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7687 __func__, ret);
7688 pci_disable_device(pdev);
7689 return -ENODEV;
7690 }
7691 if (dev_multiq)
7692 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7693 else
7694 dev = alloc_etherdev(sizeof(struct s2io_nic));
7695 if (dev == NULL) {
7696 pci_disable_device(pdev);
7697 pci_release_regions(pdev);
7698 return -ENODEV;
7699 }
7700
7701 pci_set_master(pdev);
7702 pci_set_drvdata(pdev, dev);
7703 SET_NETDEV_DEV(dev, &pdev->dev);
7704
7705
7706 sp = netdev_priv(dev);
7707 sp->dev = dev;
7708 sp->pdev = pdev;
7709 sp->high_dma_flag = dma_flag;
7710 sp->device_enabled_once = false;
7711 if (rx_ring_mode == 1)
7712 sp->rxd_mode = RXD_MODE_1;
7713 if (rx_ring_mode == 2)
7714 sp->rxd_mode = RXD_MODE_3B;
7715
7716 sp->config.intr_type = dev_intr_type;
7717
7718 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7719 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7720 sp->device_type = XFRAME_II_DEVICE;
7721 else
7722 sp->device_type = XFRAME_I_DEVICE;
7723
7724
7725
7726 s2io_init_pci(sp);
7727
7728
7729
7730
7731
7732
7733
7734
7735 config = &sp->config;
7736 mac_control = &sp->mac_control;
7737
7738 config->napi = napi;
7739 config->tx_steering_type = tx_steering_type;
7740
7741
7742 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7743 config->tx_fifo_num = MAX_TX_FIFOS;
7744 else
7745 config->tx_fifo_num = tx_fifo_num;
7746
7747
7748 if (config->tx_fifo_num < 5) {
7749 if (config->tx_fifo_num == 1)
7750 sp->total_tcp_fifos = 1;
7751 else
7752 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7753 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7754 sp->total_udp_fifos = 1;
7755 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7756 } else {
7757 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7758 FIFO_OTHER_MAX_NUM);
7759 sp->udp_fifo_idx = sp->total_tcp_fifos;
7760 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7761 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7762 }
7763
7764 config->multiq = dev_multiq;
7765 for (i = 0; i < config->tx_fifo_num; i++) {
7766 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7767
7768 tx_cfg->fifo_len = tx_fifo_len[i];
7769 tx_cfg->fifo_priority = i;
7770 }
7771
7772
7773 for (i = 0; i < MAX_TX_FIFOS; i++)
7774 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7775
7776
7777 for (i = 0; i < config->tx_fifo_num; i++)
7778 sp->fifo_selector[i] = fifo_selector[i];
7779
7780
7781 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7782 for (i = 0; i < config->tx_fifo_num; i++) {
7783 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7784
7785 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7786 if (tx_cfg->fifo_len < 65) {
7787 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7788 break;
7789 }
7790 }
7791
7792 config->max_txds = MAX_SKB_FRAGS + 2;
7793
7794
7795 config->rx_ring_num = rx_ring_num;
7796 for (i = 0; i < config->rx_ring_num; i++) {
7797 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7798 struct ring_info *ring = &mac_control->rings[i];
7799
7800 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7801 rx_cfg->ring_priority = i;
7802 ring->rx_bufs_left = 0;
7803 ring->rxd_mode = sp->rxd_mode;
7804 ring->rxd_count = rxd_count[sp->rxd_mode];
7805 ring->pdev = sp->pdev;
7806 ring->dev = sp->dev;
7807 }
7808
7809 for (i = 0; i < rx_ring_num; i++) {
7810 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7811
7812 rx_cfg->ring_org = RING_ORG_BUFF1;
7813 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7814 }
7815
7816
7817 mac_control->rmac_pause_time = rmac_pause_time;
7818 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7819 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7820
7821
7822
7823 if (init_shared_mem(sp)) {
7824 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7825 ret = -ENOMEM;
7826 goto mem_alloc_failed;
7827 }
7828
7829 sp->bar0 = pci_ioremap_bar(pdev, 0);
7830 if (!sp->bar0) {
7831 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7832 dev->name);
7833 ret = -ENOMEM;
7834 goto bar0_remap_failed;
7835 }
7836
7837 sp->bar1 = pci_ioremap_bar(pdev, 2);
7838 if (!sp->bar1) {
7839 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7840 dev->name);
7841 ret = -ENOMEM;
7842 goto bar1_remap_failed;
7843 }
7844
7845
7846 for (j = 0; j < MAX_TX_FIFOS; j++) {
7847 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7848 }
7849
7850
7851 dev->netdev_ops = &s2io_netdev_ops;
7852 dev->ethtool_ops = &netdev_ethtool_ops;
7853 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7854 NETIF_F_TSO | NETIF_F_TSO6 |
7855 NETIF_F_RXCSUM | NETIF_F_LRO;
7856 dev->features |= dev->hw_features |
7857 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7858 if (sp->high_dma_flag == true)
7859 dev->features |= NETIF_F_HIGHDMA;
7860 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7861 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7862 INIT_WORK(&sp->set_link_task, s2io_set_link);
7863
7864 pci_save_state(sp->pdev);
7865
7866
7867 if (s2io_set_swapper(sp)) {
7868 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7869 dev->name);
7870 ret = -EAGAIN;
7871 goto set_swap_failed;
7872 }
7873
7874
7875 if (sp->device_type & XFRAME_II_DEVICE) {
7876 mode = s2io_verify_pci_mode(sp);
7877 if (mode < 0) {
7878 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7879 __func__);
7880 ret = -EBADSLT;
7881 goto set_swap_failed;
7882 }
7883 }
7884
7885 if (sp->config.intr_type == MSI_X) {
7886 sp->num_entries = config->rx_ring_num + 1;
7887 ret = s2io_enable_msi_x(sp);
7888
7889 if (!ret) {
7890 ret = s2io_test_msi(sp);
7891
7892 remove_msix_isr(sp);
7893 }
7894 if (ret) {
7895
7896 DBG_PRINT(ERR_DBG,
7897 "MSI-X requested but failed to enable\n");
7898 sp->config.intr_type = INTA;
7899 }
7900 }
7901
7902 if (config->intr_type == MSI_X) {
7903 for (i = 0; i < config->rx_ring_num ; i++) {
7904 struct ring_info *ring = &mac_control->rings[i];
7905
7906 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7907 }
7908 } else {
7909 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7910 }
7911
7912
7913 if (sp->device_type & XFRAME_I_DEVICE) {
7914
7915
7916
7917
7918 fix_mac_address(sp);
7919 s2io_reset(sp);
7920 }
7921
7922
7923
7924
7925
7926 bar0 = sp->bar0;
7927 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7928 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7929 writeq(val64, &bar0->rmac_addr_cmd_mem);
7930 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7931 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7932 S2IO_BIT_RESET);
7933 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7934 mac_down = (u32)tmp64;
7935 mac_up = (u32) (tmp64 >> 32);
7936
7937 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7938 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7939 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7940 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7941 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7942 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7943
7944
7945 dev->addr_len = ETH_ALEN;
7946 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7947
7948
7949 if (sp->device_type == XFRAME_I_DEVICE) {
7950 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7951 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7952 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7953 } else if (sp->device_type == XFRAME_II_DEVICE) {
7954 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7955 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7956 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7957 }
7958
7959
7960 dev->min_mtu = MIN_MTU;
7961 dev->max_mtu = S2IO_JUMBO_SIZE;
7962
7963
7964 do_s2io_store_unicast_mc(sp);
7965
7966
7967 if ((sp->device_type == XFRAME_II_DEVICE) &&
7968 (config->intr_type == MSI_X))
7969 sp->num_entries = config->rx_ring_num + 1;
7970
7971
7972 store_xmsi_data(sp);
7973
7974 s2io_reset(sp);
7975
7976
7977
7978
7979
7980 sp->state = 0;
7981
7982
7983 for (i = 0; i < sp->config.tx_fifo_num; i++) {
7984 struct fifo_info *fifo = &mac_control->fifos[i];
7985
7986 spin_lock_init(&fifo->tx_lock);
7987 }
7988
7989
7990
7991
7992
7993 subid = sp->pdev->subsystem_device;
7994 if ((subid & 0xFF) >= 0x07) {
7995 val64 = readq(&bar0->gpio_control);
7996 val64 |= 0x0000800000000000ULL;
7997 writeq(val64, &bar0->gpio_control);
7998 val64 = 0x0411040400000000ULL;
7999 writeq(val64, (void __iomem *)bar0 + 0x2700);
8000 val64 = readq(&bar0->gpio_control);
8001 }
8002
8003 sp->rx_csum = 1;
8004
8005 if (register_netdev(dev)) {
8006 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8007 ret = -ENODEV;
8008 goto register_failed;
8009 }
8010 s2io_vpd_read(sp);
8011 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8012 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8013 sp->product_name, pdev->revision);
8014 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8015 s2io_driver_version);
8016 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8017 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8018 if (sp->device_type & XFRAME_II_DEVICE) {
8019 mode = s2io_print_pci_mode(sp);
8020 if (mode < 0) {
8021 ret = -EBADSLT;
8022 unregister_netdev(dev);
8023 goto set_swap_failed;
8024 }
8025 }
8026 switch (sp->rxd_mode) {
8027 case RXD_MODE_1:
8028 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8029 dev->name);
8030 break;
8031 case RXD_MODE_3B:
8032 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8033 dev->name);
8034 break;
8035 }
8036
8037 switch (sp->config.napi) {
8038 case 0:
8039 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8040 break;
8041 case 1:
8042 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8043 break;
8044 }
8045
8046 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8047 sp->config.tx_fifo_num);
8048
8049 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8050 sp->config.rx_ring_num);
8051
8052 switch (sp->config.intr_type) {
8053 case INTA:
8054 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8055 break;
8056 case MSI_X:
8057 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8058 break;
8059 }
8060 if (sp->config.multiq) {
8061 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8062 struct fifo_info *fifo = &mac_control->fifos[i];
8063
8064 fifo->multiq = config->multiq;
8065 }
8066 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8067 dev->name);
8068 } else
8069 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8070 dev->name);
8071
8072 switch (sp->config.tx_steering_type) {
8073 case NO_STEERING:
8074 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8075 dev->name);
8076 break;
8077 case TX_PRIORITY_STEERING:
8078 DBG_PRINT(ERR_DBG,
8079 "%s: Priority steering enabled for transmit\n",
8080 dev->name);
8081 break;
8082 case TX_DEFAULT_STEERING:
8083 DBG_PRINT(ERR_DBG,
8084 "%s: Default steering enabled for transmit\n",
8085 dev->name);
8086 }
8087
8088 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8089 dev->name);
8090
8091 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8092 sp->product_name);
8093
8094 if (vlan_tag_strip)
8095 sp->vlan_strip_flag = 1;
8096 else
8097 sp->vlan_strip_flag = 0;
8098
8099
8100
8101
8102
8103
8104 netif_carrier_off(dev);
8105
8106 return 0;
8107
8108register_failed:
8109set_swap_failed:
8110 iounmap(sp->bar1);
8111bar1_remap_failed:
8112 iounmap(sp->bar0);
8113bar0_remap_failed:
8114mem_alloc_failed:
8115 free_shared_mem(sp);
8116 pci_disable_device(pdev);
8117 pci_release_regions(pdev);
8118 free_netdev(dev);
8119
8120 return ret;
8121}
8122
8123
8124
8125
8126
8127
8128
8129
8130
8131
8132static void s2io_rem_nic(struct pci_dev *pdev)
8133{
8134 struct net_device *dev = pci_get_drvdata(pdev);
8135 struct s2io_nic *sp;
8136
8137 if (dev == NULL) {
8138 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8139 return;
8140 }
8141
8142 sp = netdev_priv(dev);
8143
8144 cancel_work_sync(&sp->rst_timer_task);
8145 cancel_work_sync(&sp->set_link_task);
8146
8147 unregister_netdev(dev);
8148
8149 free_shared_mem(sp);
8150 iounmap(sp->bar0);
8151 iounmap(sp->bar1);
8152 pci_release_regions(pdev);
8153 free_netdev(dev);
8154 pci_disable_device(pdev);
8155}
8156
8157module_pci_driver(s2io_driver);
8158
8159static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8160 struct tcphdr **tcp, struct RxD_t *rxdp,
8161 struct s2io_nic *sp)
8162{
8163 int ip_off;
8164 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8165
8166 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8167 DBG_PRINT(INIT_DBG,
8168 "%s: Non-TCP frames not supported for LRO\n",
8169 __func__);
8170 return -1;
8171 }
8172
8173
8174 if ((l2_type == 0) || (l2_type == 4)) {
8175 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8176
8177
8178
8179
8180 if ((!sp->vlan_strip_flag) &&
8181 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8182 ip_off += HEADER_VLAN_SIZE;
8183 } else {
8184
8185 return -1;
8186 }
8187
8188 *ip = (struct iphdr *)(buffer + ip_off);
8189 ip_len = (u8)((*ip)->ihl);
8190 ip_len <<= 2;
8191 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8192
8193 return 0;
8194}
8195
8196static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8197 struct tcphdr *tcp)
8198{
8199 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8200 if ((lro->iph->saddr != ip->saddr) ||
8201 (lro->iph->daddr != ip->daddr) ||
8202 (lro->tcph->source != tcp->source) ||
8203 (lro->tcph->dest != tcp->dest))
8204 return -1;
8205 return 0;
8206}
8207
8208static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8209{
8210 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8211}
8212
8213static void initiate_new_session(struct lro *lro, u8 *l2h,
8214 struct iphdr *ip, struct tcphdr *tcp,
8215 u32 tcp_pyld_len, u16 vlan_tag)
8216{
8217 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8218 lro->l2h = l2h;
8219 lro->iph = ip;
8220 lro->tcph = tcp;
8221 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8222 lro->tcp_ack = tcp->ack_seq;
8223 lro->sg_num = 1;
8224 lro->total_len = ntohs(ip->tot_len);
8225 lro->frags_len = 0;
8226 lro->vlan_tag = vlan_tag;
8227
8228
8229
8230
8231 if (tcp->doff == 8) {
8232 __be32 *ptr;
8233 ptr = (__be32 *)(tcp+1);
8234 lro->saw_ts = 1;
8235 lro->cur_tsval = ntohl(*(ptr+1));
8236 lro->cur_tsecr = *(ptr+2);
8237 }
8238 lro->in_use = 1;
8239}
8240
8241static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8242{
8243 struct iphdr *ip = lro->iph;
8244 struct tcphdr *tcp = lro->tcph;
8245 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8246
8247 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8248
8249
8250 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8251 ip->tot_len = htons(lro->total_len);
8252
8253
8254 tcp->ack_seq = lro->tcp_ack;
8255 tcp->window = lro->window;
8256
8257
8258 if (lro->saw_ts) {
8259 __be32 *ptr = (__be32 *)(tcp + 1);
8260 *(ptr+2) = lro->cur_tsecr;
8261 }
8262
8263
8264
8265
8266 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8267 swstats->num_aggregations++;
8268}
8269
8270static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8271 struct tcphdr *tcp, u32 l4_pyld)
8272{
8273 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8274 lro->total_len += l4_pyld;
8275 lro->frags_len += l4_pyld;
8276 lro->tcp_next_seq += l4_pyld;
8277 lro->sg_num++;
8278
8279
8280 lro->tcp_ack = tcp->ack_seq;
8281 lro->window = tcp->window;
8282
8283 if (lro->saw_ts) {
8284 __be32 *ptr;
8285
8286 ptr = (__be32 *)(tcp+1);
8287 lro->cur_tsval = ntohl(*(ptr+1));
8288 lro->cur_tsecr = *(ptr + 2);
8289 }
8290}
8291
8292static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8293 struct tcphdr *tcp, u32 tcp_pyld_len)
8294{
8295 u8 *ptr;
8296
8297 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8298
8299 if (!tcp_pyld_len) {
8300
8301 return -1;
8302 }
8303
8304 if (ip->ihl != 5)
8305 return -1;
8306
8307
8308 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8309 return -1;
8310
8311
8312 if (tcp->urg || tcp->psh || tcp->rst ||
8313 tcp->syn || tcp->fin ||
8314 tcp->ece || tcp->cwr || !tcp->ack) {
8315
8316
8317
8318
8319
8320 return -1;
8321 }
8322
8323
8324
8325
8326
8327 if (tcp->doff != 5 && tcp->doff != 8)
8328 return -1;
8329
8330 if (tcp->doff == 8) {
8331 ptr = (u8 *)(tcp + 1);
8332 while (*ptr == TCPOPT_NOP)
8333 ptr++;
8334 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8335 return -1;
8336
8337
8338 if (l_lro)
8339 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8340 return -1;
8341
8342
8343 if (*((__be32 *)(ptr+6)) == 0)
8344 return -1;
8345 }
8346
8347 return 0;
8348}
8349
8350static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8351 u8 **tcp, u32 *tcp_len, struct lro **lro,
8352 struct RxD_t *rxdp, struct s2io_nic *sp)
8353{
8354 struct iphdr *ip;
8355 struct tcphdr *tcph;
8356 int ret = 0, i;
8357 u16 vlan_tag = 0;
8358 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8359
8360 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8361 rxdp, sp);
8362 if (ret)
8363 return ret;
8364
8365 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8366
8367 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8368 tcph = (struct tcphdr *)*tcp;
8369 *tcp_len = get_l4_pyld_length(ip, tcph);
8370 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8371 struct lro *l_lro = &ring_data->lro0_n[i];
8372 if (l_lro->in_use) {
8373 if (check_for_socket_match(l_lro, ip, tcph))
8374 continue;
8375
8376 *lro = l_lro;
8377
8378 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8379 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8380 "expected 0x%x, actual 0x%x\n",
8381 __func__,
8382 (*lro)->tcp_next_seq,
8383 ntohl(tcph->seq));
8384
8385 swstats->outof_sequence_pkts++;
8386 ret = 2;
8387 break;
8388 }
8389
8390 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8391 *tcp_len))
8392 ret = 1;
8393 else
8394 ret = 2;
8395 break;
8396 }
8397 }
8398
8399 if (ret == 0) {
8400
8401
8402
8403
8404
8405 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8406 return 5;
8407
8408 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8409 struct lro *l_lro = &ring_data->lro0_n[i];
8410 if (!(l_lro->in_use)) {
8411 *lro = l_lro;
8412 ret = 3;
8413 break;
8414 }
8415 }
8416 }
8417
8418 if (ret == 0) {
8419 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8420 __func__);
8421 *lro = NULL;
8422 return ret;
8423 }
8424
8425 switch (ret) {
8426 case 3:
8427 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8428 vlan_tag);
8429 break;
8430 case 2:
8431 update_L3L4_header(sp, *lro);
8432 break;
8433 case 1:
8434 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8435 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8436 update_L3L4_header(sp, *lro);
8437 ret = 4;
8438 }
8439 break;
8440 default:
8441 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8442 break;
8443 }
8444
8445 return ret;
8446}
8447
8448static void clear_lro_session(struct lro *lro)
8449{
8450 static u16 lro_struct_size = sizeof(struct lro);
8451
8452 memset(lro, 0, lro_struct_size);
8453}
8454
8455static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8456{
8457 struct net_device *dev = skb->dev;
8458 struct s2io_nic *sp = netdev_priv(dev);
8459
8460 skb->protocol = eth_type_trans(skb, dev);
8461 if (vlan_tag && sp->vlan_strip_flag)
8462 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8463 if (sp->config.napi)
8464 netif_receive_skb(skb);
8465 else
8466 netif_rx(skb);
8467}
8468
8469static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8470 struct sk_buff *skb, u32 tcp_len)
8471{
8472 struct sk_buff *first = lro->parent;
8473 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8474
8475 first->len += tcp_len;
8476 first->data_len = lro->frags_len;
8477 skb_pull(skb, (skb->len - tcp_len));
8478 if (skb_shinfo(first)->frag_list)
8479 lro->last_frag->next = skb;
8480 else
8481 skb_shinfo(first)->frag_list = skb;
8482 first->truesize += skb->truesize;
8483 lro->last_frag = skb;
8484 swstats->clubbed_frms_cnt++;
8485}
8486
8487
8488
8489
8490
8491
8492
8493
8494
8495static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8496 pci_channel_state_t state)
8497{
8498 struct net_device *netdev = pci_get_drvdata(pdev);
8499 struct s2io_nic *sp = netdev_priv(netdev);
8500
8501 netif_device_detach(netdev);
8502
8503 if (state == pci_channel_io_perm_failure)
8504 return PCI_ERS_RESULT_DISCONNECT;
8505
8506 if (netif_running(netdev)) {
8507
8508 do_s2io_card_down(sp, 0);
8509 }
8510 pci_disable_device(pdev);
8511
8512 return PCI_ERS_RESULT_NEED_RESET;
8513}
8514
8515
8516
8517
8518
8519
8520
8521
8522
8523
8524static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8525{
8526 struct net_device *netdev = pci_get_drvdata(pdev);
8527 struct s2io_nic *sp = netdev_priv(netdev);
8528
8529 if (pci_enable_device(pdev)) {
8530 pr_err("Cannot re-enable PCI device after reset.\n");
8531 return PCI_ERS_RESULT_DISCONNECT;
8532 }
8533
8534 pci_set_master(pdev);
8535 s2io_reset(sp);
8536
8537 return PCI_ERS_RESULT_RECOVERED;
8538}
8539
8540
8541
8542
8543
8544
8545
8546
8547static void s2io_io_resume(struct pci_dev *pdev)
8548{
8549 struct net_device *netdev = pci_get_drvdata(pdev);
8550 struct s2io_nic *sp = netdev_priv(netdev);
8551
8552 if (netif_running(netdev)) {
8553 if (s2io_card_up(sp)) {
8554 pr_err("Can't bring device back up after reset.\n");
8555 return;
8556 }
8557
8558 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8559 s2io_card_down(sp);
8560 pr_err("Can't restore mac addr after reset.\n");
8561 return;
8562 }
8563 }
8564
8565 netif_device_attach(netdev);
8566 netif_tx_wake_all_queues(netdev);
8567}
8568