1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55#include <linux/module.h>
56#include <linux/types.h>
57#include <linux/errno.h>
58#include <linux/ioport.h>
59#include <linux/pci.h>
60#include <linux/dma-mapping.h>
61#include <linux/kernel.h>
62#include <linux/netdevice.h>
63#include <linux/etherdevice.h>
64#include <linux/mdio.h>
65#include <linux/skbuff.h>
66#include <linux/init.h>
67#include <linux/delay.h>
68#include <linux/stddef.h>
69#include <linux/ioctl.h>
70#include <linux/timex.h>
71#include <linux/ethtool.h>
72#include <linux/workqueue.h>
73#include <linux/if_vlan.h>
74#include <linux/ip.h>
75#include <linux/tcp.h>
76#include <linux/uaccess.h>
77#include <linux/io.h>
78#include <linux/slab.h>
79#include <linux/prefetch.h>
80#include <net/tcp.h>
81#include <net/checksum.h>
82
83#include <asm/div64.h>
84#include <asm/irq.h>
85
86
87#include "s2io.h"
88#include "s2io-regs.h"
89
90#define DRV_VERSION "2.0.26.28"
91
92
93static const char s2io_driver_name[] = "Neterion";
94static const char s2io_driver_version[] = DRV_VERSION;
95
96static const int rxd_size[2] = {32, 48};
97static const int rxd_count[2] = {127, 85};
98
99static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100{
101 int ret;
102
103 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
104 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
105
106 return ret;
107}
108
109
110
111
112
113
114#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
115 (dev_type == XFRAME_I_DEVICE) ? \
116 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
117 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118
119#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
120 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121
122static inline int is_s2io_card_up(const struct s2io_nic *sp)
123{
124 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
125}
126
127
128static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
129 "Register test\t(offline)",
130 "Eeprom test\t(offline)",
131 "Link test\t(online)",
132 "RLDRAM test\t(offline)",
133 "BIST Test\t(offline)"
134};
135
136static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_frms"},
138 {"tmac_data_octets"},
139 {"tmac_drop_frms"},
140 {"tmac_mcst_frms"},
141 {"tmac_bcst_frms"},
142 {"tmac_pause_ctrl_frms"},
143 {"tmac_ttl_octets"},
144 {"tmac_ucst_frms"},
145 {"tmac_nucst_frms"},
146 {"tmac_any_err_frms"},
147 {"tmac_ttl_less_fb_octets"},
148 {"tmac_vld_ip_octets"},
149 {"tmac_vld_ip"},
150 {"tmac_drop_ip"},
151 {"tmac_icmp"},
152 {"tmac_rst_tcp"},
153 {"tmac_tcp"},
154 {"tmac_udp"},
155 {"rmac_vld_frms"},
156 {"rmac_data_octets"},
157 {"rmac_fcs_err_frms"},
158 {"rmac_drop_frms"},
159 {"rmac_vld_mcst_frms"},
160 {"rmac_vld_bcst_frms"},
161 {"rmac_in_rng_len_err_frms"},
162 {"rmac_out_rng_len_err_frms"},
163 {"rmac_long_frms"},
164 {"rmac_pause_ctrl_frms"},
165 {"rmac_unsup_ctrl_frms"},
166 {"rmac_ttl_octets"},
167 {"rmac_accepted_ucst_frms"},
168 {"rmac_accepted_nucst_frms"},
169 {"rmac_discarded_frms"},
170 {"rmac_drop_events"},
171 {"rmac_ttl_less_fb_octets"},
172 {"rmac_ttl_frms"},
173 {"rmac_usized_frms"},
174 {"rmac_osized_frms"},
175 {"rmac_frag_frms"},
176 {"rmac_jabber_frms"},
177 {"rmac_ttl_64_frms"},
178 {"rmac_ttl_65_127_frms"},
179 {"rmac_ttl_128_255_frms"},
180 {"rmac_ttl_256_511_frms"},
181 {"rmac_ttl_512_1023_frms"},
182 {"rmac_ttl_1024_1518_frms"},
183 {"rmac_ip"},
184 {"rmac_ip_octets"},
185 {"rmac_hdr_err_ip"},
186 {"rmac_drop_ip"},
187 {"rmac_icmp"},
188 {"rmac_tcp"},
189 {"rmac_udp"},
190 {"rmac_err_drp_udp"},
191 {"rmac_xgmii_err_sym"},
192 {"rmac_frms_q0"},
193 {"rmac_frms_q1"},
194 {"rmac_frms_q2"},
195 {"rmac_frms_q3"},
196 {"rmac_frms_q4"},
197 {"rmac_frms_q5"},
198 {"rmac_frms_q6"},
199 {"rmac_frms_q7"},
200 {"rmac_full_q0"},
201 {"rmac_full_q1"},
202 {"rmac_full_q2"},
203 {"rmac_full_q3"},
204 {"rmac_full_q4"},
205 {"rmac_full_q5"},
206 {"rmac_full_q6"},
207 {"rmac_full_q7"},
208 {"rmac_pause_cnt"},
209 {"rmac_xgmii_data_err_cnt"},
210 {"rmac_xgmii_ctrl_err_cnt"},
211 {"rmac_accepted_ip"},
212 {"rmac_err_tcp"},
213 {"rd_req_cnt"},
214 {"new_rd_req_cnt"},
215 {"new_rd_req_rtry_cnt"},
216 {"rd_rtry_cnt"},
217 {"wr_rtry_rd_ack_cnt"},
218 {"wr_req_cnt"},
219 {"new_wr_req_cnt"},
220 {"new_wr_req_rtry_cnt"},
221 {"wr_rtry_cnt"},
222 {"wr_disc_cnt"},
223 {"rd_rtry_wr_ack_cnt"},
224 {"txp_wr_cnt"},
225 {"txd_rd_cnt"},
226 {"txd_wr_cnt"},
227 {"rxd_rd_cnt"},
228 {"rxd_wr_cnt"},
229 {"txf_rd_cnt"},
230 {"rxf_wr_cnt"}
231};
232
233static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
234 {"rmac_ttl_1519_4095_frms"},
235 {"rmac_ttl_4096_8191_frms"},
236 {"rmac_ttl_8192_max_frms"},
237 {"rmac_ttl_gt_max_frms"},
238 {"rmac_osized_alt_frms"},
239 {"rmac_jabber_alt_frms"},
240 {"rmac_gt_max_alt_frms"},
241 {"rmac_vlan_frms"},
242 {"rmac_len_discard"},
243 {"rmac_fcs_discard"},
244 {"rmac_pf_discard"},
245 {"rmac_da_discard"},
246 {"rmac_red_discard"},
247 {"rmac_rts_discard"},
248 {"rmac_ingm_full_discard"},
249 {"link_fault_cnt"}
250};
251
252static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
253 {"\n DRIVER STATISTICS"},
254 {"single_bit_ecc_errs"},
255 {"double_bit_ecc_errs"},
256 {"parity_err_cnt"},
257 {"serious_err_cnt"},
258 {"soft_reset_cnt"},
259 {"fifo_full_cnt"},
260 {"ring_0_full_cnt"},
261 {"ring_1_full_cnt"},
262 {"ring_2_full_cnt"},
263 {"ring_3_full_cnt"},
264 {"ring_4_full_cnt"},
265 {"ring_5_full_cnt"},
266 {"ring_6_full_cnt"},
267 {"ring_7_full_cnt"},
268 {"alarm_transceiver_temp_high"},
269 {"alarm_transceiver_temp_low"},
270 {"alarm_laser_bias_current_high"},
271 {"alarm_laser_bias_current_low"},
272 {"alarm_laser_output_power_high"},
273 {"alarm_laser_output_power_low"},
274 {"warn_transceiver_temp_high"},
275 {"warn_transceiver_temp_low"},
276 {"warn_laser_bias_current_high"},
277 {"warn_laser_bias_current_low"},
278 {"warn_laser_output_power_high"},
279 {"warn_laser_output_power_low"},
280 {"lro_aggregated_pkts"},
281 {"lro_flush_both_count"},
282 {"lro_out_of_sequence_pkts"},
283 {"lro_flush_due_to_max_pkts"},
284 {"lro_avg_aggr_pkts"},
285 {"mem_alloc_fail_cnt"},
286 {"pci_map_fail_cnt"},
287 {"watchdog_timer_cnt"},
288 {"mem_allocated"},
289 {"mem_freed"},
290 {"link_up_cnt"},
291 {"link_down_cnt"},
292 {"link_up_time"},
293 {"link_down_time"},
294 {"tx_tcode_buf_abort_cnt"},
295 {"tx_tcode_desc_abort_cnt"},
296 {"tx_tcode_parity_err_cnt"},
297 {"tx_tcode_link_loss_cnt"},
298 {"tx_tcode_list_proc_err_cnt"},
299 {"rx_tcode_parity_err_cnt"},
300 {"rx_tcode_abort_cnt"},
301 {"rx_tcode_parity_abort_cnt"},
302 {"rx_tcode_rda_fail_cnt"},
303 {"rx_tcode_unkn_prot_cnt"},
304 {"rx_tcode_fcs_err_cnt"},
305 {"rx_tcode_buf_size_err_cnt"},
306 {"rx_tcode_rxd_corrupt_cnt"},
307 {"rx_tcode_unkn_err_cnt"},
308 {"tda_err_cnt"},
309 {"pfc_err_cnt"},
310 {"pcc_err_cnt"},
311 {"tti_err_cnt"},
312 {"tpa_err_cnt"},
313 {"sm_err_cnt"},
314 {"lso_err_cnt"},
315 {"mac_tmac_err_cnt"},
316 {"mac_rmac_err_cnt"},
317 {"xgxs_txgxs_err_cnt"},
318 {"xgxs_rxgxs_err_cnt"},
319 {"rc_err_cnt"},
320 {"prc_pcix_err_cnt"},
321 {"rpa_err_cnt"},
322 {"rda_err_cnt"},
323 {"rti_err_cnt"},
324 {"mc_err_cnt"}
325};
326
327#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
328#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
329#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
330
331#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
332#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
333
334#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
335#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
336
337#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
338#define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN)
339
340#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
341 init_timer(&timer); \
342 timer.function = handle; \
343 timer.data = (unsigned long)arg; \
344 mod_timer(&timer, (jiffies + exp)) \
345
346
347static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348{
349 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
350 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
351 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
352 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
353 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
354 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
355}
356
357
358
359
360
361
362#define END_SIGN 0x0
363static const u64 herc_act_dtx_cfg[] = {
364
365 0x8000051536750000ULL, 0x80000515367500E0ULL,
366
367 0x8000051536750004ULL, 0x80000515367500E4ULL,
368
369 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
370
371 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
372
373 0x801205150D440000ULL, 0x801205150D4400E0ULL,
374
375 0x801205150D440004ULL, 0x801205150D4400E4ULL,
376
377 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
378
379 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
380
381 END_SIGN
382};
383
384static const u64 xena_dtx_cfg[] = {
385
386 0x8000051500000000ULL, 0x80000515000000E0ULL,
387
388 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
389
390 0x8001051500000000ULL, 0x80010515000000E0ULL,
391
392 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
393
394 0x8002051500000000ULL, 0x80020515000000E0ULL,
395
396 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
397 END_SIGN
398};
399
400
401
402
403
404static const u64 fix_mac[] = {
405 0x0060000000000000ULL, 0x0060600000000000ULL,
406 0x0040600000000000ULL, 0x0000600000000000ULL,
407 0x0020600000000000ULL, 0x0060600000000000ULL,
408 0x0020600000000000ULL, 0x0060600000000000ULL,
409 0x0020600000000000ULL, 0x0060600000000000ULL,
410 0x0020600000000000ULL, 0x0060600000000000ULL,
411 0x0020600000000000ULL, 0x0060600000000000ULL,
412 0x0020600000000000ULL, 0x0060600000000000ULL,
413 0x0020600000000000ULL, 0x0060600000000000ULL,
414 0x0020600000000000ULL, 0x0060600000000000ULL,
415 0x0020600000000000ULL, 0x0060600000000000ULL,
416 0x0020600000000000ULL, 0x0060600000000000ULL,
417 0x0020600000000000ULL, 0x0000600000000000ULL,
418 0x0040600000000000ULL, 0x0060600000000000ULL,
419 END_SIGN
420};
421
422MODULE_LICENSE("GPL");
423MODULE_VERSION(DRV_VERSION);
424
425
426
427S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
428S2IO_PARM_INT(rx_ring_num, 1);
429S2IO_PARM_INT(multiq, 0);
430S2IO_PARM_INT(rx_ring_mode, 1);
431S2IO_PARM_INT(use_continuous_tx_intrs, 1);
432S2IO_PARM_INT(rmac_pause_time, 0x100);
433S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
434S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
435S2IO_PARM_INT(shared_splits, 0);
436S2IO_PARM_INT(tmac_util_period, 5);
437S2IO_PARM_INT(rmac_util_period, 5);
438S2IO_PARM_INT(l3l4hdr_size, 128);
439
440S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
441
442S2IO_PARM_INT(rxsync_frequency, 3);
443
444S2IO_PARM_INT(intr_type, 2);
445
446
447
448
449
450S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
451S2IO_PARM_INT(indicate_max_pkts, 0);
452
453S2IO_PARM_INT(napi, 1);
454S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
455
456static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
457{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
458static unsigned int rx_ring_sz[MAX_RX_RINGS] =
459{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
460static unsigned int rts_frm_len[MAX_RX_RINGS] =
461{[0 ...(MAX_RX_RINGS - 1)] = 0 };
462
463module_param_array(tx_fifo_len, uint, NULL, 0);
464module_param_array(rx_ring_sz, uint, NULL, 0);
465module_param_array(rts_frm_len, uint, NULL, 0);
466
467
468
469
470
471static const struct pci_device_id s2io_tbl[] = {
472 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
473 PCI_ANY_ID, PCI_ANY_ID},
474 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
475 PCI_ANY_ID, PCI_ANY_ID},
476 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
477 PCI_ANY_ID, PCI_ANY_ID},
478 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
479 PCI_ANY_ID, PCI_ANY_ID},
480 {0,}
481};
482
483MODULE_DEVICE_TABLE(pci, s2io_tbl);
484
485static const struct pci_error_handlers s2io_err_handler = {
486 .error_detected = s2io_io_error_detected,
487 .slot_reset = s2io_io_slot_reset,
488 .resume = s2io_io_resume,
489};
490
491static struct pci_driver s2io_driver = {
492 .name = "S2IO",
493 .id_table = s2io_tbl,
494 .probe = s2io_init_nic,
495 .remove = s2io_rem_nic,
496 .err_handler = &s2io_err_handler,
497};
498
499
500#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
501
502
503static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
504{
505 if (!sp->config.multiq) {
506 int i;
507
508 for (i = 0; i < sp->config.tx_fifo_num; i++)
509 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
510 }
511 netif_tx_stop_all_queues(sp->dev);
512}
513
514static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
515{
516 if (!sp->config.multiq)
517 sp->mac_control.fifos[fifo_no].queue_state =
518 FIFO_QUEUE_STOP;
519
520 netif_tx_stop_all_queues(sp->dev);
521}
522
523static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
524{
525 if (!sp->config.multiq) {
526 int i;
527
528 for (i = 0; i < sp->config.tx_fifo_num; i++)
529 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
530 }
531 netif_tx_start_all_queues(sp->dev);
532}
533
534static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
535{
536 if (!sp->config.multiq) {
537 int i;
538
539 for (i = 0; i < sp->config.tx_fifo_num; i++)
540 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
541 }
542 netif_tx_wake_all_queues(sp->dev);
543}
544
545static inline void s2io_wake_tx_queue(
546 struct fifo_info *fifo, int cnt, u8 multiq)
547{
548
549 if (multiq) {
550 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
551 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
552 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
553 if (netif_queue_stopped(fifo->dev)) {
554 fifo->queue_state = FIFO_QUEUE_START;
555 netif_wake_queue(fifo->dev);
556 }
557 }
558}
559
560
561
562
563
564
565
566
567
568static int init_shared_mem(struct s2io_nic *nic)
569{
570 u32 size;
571 void *tmp_v_addr, *tmp_v_addr_next;
572 dma_addr_t tmp_p_addr, tmp_p_addr_next;
573 struct RxD_block *pre_rxd_blk = NULL;
574 int i, j, blk_cnt;
575 int lst_size, lst_per_page;
576 struct net_device *dev = nic->dev;
577 unsigned long tmp;
578 struct buffAdd *ba;
579 struct config_param *config = &nic->config;
580 struct mac_info *mac_control = &nic->mac_control;
581 unsigned long long mem_allocated = 0;
582
583
584 size = 0;
585 for (i = 0; i < config->tx_fifo_num; i++) {
586 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
587
588 size += tx_cfg->fifo_len;
589 }
590 if (size > MAX_AVAILABLE_TXDS) {
591 DBG_PRINT(ERR_DBG,
592 "Too many TxDs requested: %d, max supported: %d\n",
593 size, MAX_AVAILABLE_TXDS);
594 return -EINVAL;
595 }
596
597 size = 0;
598 for (i = 0; i < config->tx_fifo_num; i++) {
599 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
600
601 size = tx_cfg->fifo_len;
602
603
604
605 if (size < 2) {
606 DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
607 "Valid lengths are 2 through 8192\n",
608 i, size);
609 return -EINVAL;
610 }
611 }
612
613 lst_size = (sizeof(struct TxD) * config->max_txds);
614 lst_per_page = PAGE_SIZE / lst_size;
615
616 for (i = 0; i < config->tx_fifo_num; i++) {
617 struct fifo_info *fifo = &mac_control->fifos[i];
618 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
619 int fifo_len = tx_cfg->fifo_len;
620 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
621
622 fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
623 if (!fifo->list_info) {
624 DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
625 return -ENOMEM;
626 }
627 mem_allocated += list_holder_size;
628 }
629 for (i = 0; i < config->tx_fifo_num; i++) {
630 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
631 lst_per_page);
632 struct fifo_info *fifo = &mac_control->fifos[i];
633 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
634
635 fifo->tx_curr_put_info.offset = 0;
636 fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
637 fifo->tx_curr_get_info.offset = 0;
638 fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
639 fifo->fifo_no = i;
640 fifo->nic = nic;
641 fifo->max_txds = MAX_SKB_FRAGS + 2;
642 fifo->dev = dev;
643
644 for (j = 0; j < page_num; j++) {
645 int k = 0;
646 dma_addr_t tmp_p;
647 void *tmp_v;
648 tmp_v = pci_alloc_consistent(nic->pdev,
649 PAGE_SIZE, &tmp_p);
650 if (!tmp_v) {
651 DBG_PRINT(INFO_DBG,
652 "pci_alloc_consistent failed for TxDL\n");
653 return -ENOMEM;
654 }
655
656
657
658
659
660 if (!tmp_p) {
661 mac_control->zerodma_virt_addr = tmp_v;
662 DBG_PRINT(INIT_DBG,
663 "%s: Zero DMA address for TxDL. "
664 "Virtual address %p\n",
665 dev->name, tmp_v);
666 tmp_v = pci_alloc_consistent(nic->pdev,
667 PAGE_SIZE, &tmp_p);
668 if (!tmp_v) {
669 DBG_PRINT(INFO_DBG,
670 "pci_alloc_consistent failed for TxDL\n");
671 return -ENOMEM;
672 }
673 mem_allocated += PAGE_SIZE;
674 }
675 while (k < lst_per_page) {
676 int l = (j * lst_per_page) + k;
677 if (l == tx_cfg->fifo_len)
678 break;
679 fifo->list_info[l].list_virt_addr =
680 tmp_v + (k * lst_size);
681 fifo->list_info[l].list_phy_addr =
682 tmp_p + (k * lst_size);
683 k++;
684 }
685 }
686 }
687
688 for (i = 0; i < config->tx_fifo_num; i++) {
689 struct fifo_info *fifo = &mac_control->fifos[i];
690 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
691
692 size = tx_cfg->fifo_len;
693 fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
694 if (!fifo->ufo_in_band_v)
695 return -ENOMEM;
696 mem_allocated += (size * sizeof(u64));
697 }
698
699
700 size = 0;
701 for (i = 0; i < config->rx_ring_num; i++) {
702 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
703 struct ring_info *ring = &mac_control->rings[i];
704
705 if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
706 DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
707 "multiple of RxDs per Block\n",
708 dev->name, i);
709 return FAILURE;
710 }
711 size += rx_cfg->num_rxd;
712 ring->block_count = rx_cfg->num_rxd /
713 (rxd_count[nic->rxd_mode] + 1);
714 ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
715 }
716 if (nic->rxd_mode == RXD_MODE_1)
717 size = (size * (sizeof(struct RxD1)));
718 else
719 size = (size * (sizeof(struct RxD3)));
720
721 for (i = 0; i < config->rx_ring_num; i++) {
722 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
723 struct ring_info *ring = &mac_control->rings[i];
724
725 ring->rx_curr_get_info.block_index = 0;
726 ring->rx_curr_get_info.offset = 0;
727 ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
728 ring->rx_curr_put_info.block_index = 0;
729 ring->rx_curr_put_info.offset = 0;
730 ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
731 ring->nic = nic;
732 ring->ring_no = i;
733
734 blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
735
736 for (j = 0; j < blk_cnt; j++) {
737 struct rx_block_info *rx_blocks;
738 int l;
739
740 rx_blocks = &ring->rx_blocks[j];
741 size = SIZE_OF_BLOCK;
742 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
743 &tmp_p_addr);
744 if (tmp_v_addr == NULL) {
745
746
747
748
749
750
751 rx_blocks->block_virt_addr = tmp_v_addr;
752 return -ENOMEM;
753 }
754 mem_allocated += size;
755 memset(tmp_v_addr, 0, size);
756
757 size = sizeof(struct rxd_info) *
758 rxd_count[nic->rxd_mode];
759 rx_blocks->block_virt_addr = tmp_v_addr;
760 rx_blocks->block_dma_addr = tmp_p_addr;
761 rx_blocks->rxds = kmalloc(size, GFP_KERNEL);
762 if (!rx_blocks->rxds)
763 return -ENOMEM;
764 mem_allocated += size;
765 for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
766 rx_blocks->rxds[l].virt_addr =
767 rx_blocks->block_virt_addr +
768 (rxd_size[nic->rxd_mode] * l);
769 rx_blocks->rxds[l].dma_addr =
770 rx_blocks->block_dma_addr +
771 (rxd_size[nic->rxd_mode] * l);
772 }
773 }
774
775 for (j = 0; j < blk_cnt; j++) {
776 int next = (j + 1) % blk_cnt;
777 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
778 tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
779 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
780 tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
781
782 pre_rxd_blk = tmp_v_addr;
783 pre_rxd_blk->reserved_2_pNext_RxD_block =
784 (unsigned long)tmp_v_addr_next;
785 pre_rxd_blk->pNext_RxD_Blk_physical =
786 (u64)tmp_p_addr_next;
787 }
788 }
789 if (nic->rxd_mode == RXD_MODE_3B) {
790
791
792
793
794 for (i = 0; i < config->rx_ring_num; i++) {
795 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
796 struct ring_info *ring = &mac_control->rings[i];
797
798 blk_cnt = rx_cfg->num_rxd /
799 (rxd_count[nic->rxd_mode] + 1);
800 size = sizeof(struct buffAdd *) * blk_cnt;
801 ring->ba = kmalloc(size, GFP_KERNEL);
802 if (!ring->ba)
803 return -ENOMEM;
804 mem_allocated += size;
805 for (j = 0; j < blk_cnt; j++) {
806 int k = 0;
807
808 size = sizeof(struct buffAdd) *
809 (rxd_count[nic->rxd_mode] + 1);
810 ring->ba[j] = kmalloc(size, GFP_KERNEL);
811 if (!ring->ba[j])
812 return -ENOMEM;
813 mem_allocated += size;
814 while (k != rxd_count[nic->rxd_mode]) {
815 ba = &ring->ba[j][k];
816 size = BUF0_LEN + ALIGN_SIZE;
817 ba->ba_0_org = kmalloc(size, GFP_KERNEL);
818 if (!ba->ba_0_org)
819 return -ENOMEM;
820 mem_allocated += size;
821 tmp = (unsigned long)ba->ba_0_org;
822 tmp += ALIGN_SIZE;
823 tmp &= ~((unsigned long)ALIGN_SIZE);
824 ba->ba_0 = (void *)tmp;
825
826 size = BUF1_LEN + ALIGN_SIZE;
827 ba->ba_1_org = kmalloc(size, GFP_KERNEL);
828 if (!ba->ba_1_org)
829 return -ENOMEM;
830 mem_allocated += size;
831 tmp = (unsigned long)ba->ba_1_org;
832 tmp += ALIGN_SIZE;
833 tmp &= ~((unsigned long)ALIGN_SIZE);
834 ba->ba_1 = (void *)tmp;
835 k++;
836 }
837 }
838 }
839 }
840
841
842 size = sizeof(struct stat_block);
843 mac_control->stats_mem =
844 pci_alloc_consistent(nic->pdev, size,
845 &mac_control->stats_mem_phy);
846
847 if (!mac_control->stats_mem) {
848
849
850
851
852
853 return -ENOMEM;
854 }
855 mem_allocated += size;
856 mac_control->stats_mem_sz = size;
857
858 tmp_v_addr = mac_control->stats_mem;
859 mac_control->stats_info = tmp_v_addr;
860 memset(tmp_v_addr, 0, size);
861 DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
862 dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
863 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
864 return SUCCESS;
865}
866
867
868
869
870
871
872
873
874static void free_shared_mem(struct s2io_nic *nic)
875{
876 int i, j, blk_cnt, size;
877 void *tmp_v_addr;
878 dma_addr_t tmp_p_addr;
879 int lst_size, lst_per_page;
880 struct net_device *dev;
881 int page_num = 0;
882 struct config_param *config;
883 struct mac_info *mac_control;
884 struct stat_block *stats;
885 struct swStat *swstats;
886
887 if (!nic)
888 return;
889
890 dev = nic->dev;
891
892 config = &nic->config;
893 mac_control = &nic->mac_control;
894 stats = mac_control->stats_info;
895 swstats = &stats->sw_stat;
896
897 lst_size = sizeof(struct TxD) * config->max_txds;
898 lst_per_page = PAGE_SIZE / lst_size;
899
900 for (i = 0; i < config->tx_fifo_num; i++) {
901 struct fifo_info *fifo = &mac_control->fifos[i];
902 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
903
904 page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
905 for (j = 0; j < page_num; j++) {
906 int mem_blks = (j * lst_per_page);
907 struct list_info_hold *fli;
908
909 if (!fifo->list_info)
910 return;
911
912 fli = &fifo->list_info[mem_blks];
913 if (!fli->list_virt_addr)
914 break;
915 pci_free_consistent(nic->pdev, PAGE_SIZE,
916 fli->list_virt_addr,
917 fli->list_phy_addr);
918 swstats->mem_freed += PAGE_SIZE;
919 }
920
921
922
923 if (mac_control->zerodma_virt_addr) {
924 pci_free_consistent(nic->pdev, PAGE_SIZE,
925 mac_control->zerodma_virt_addr,
926 (dma_addr_t)0);
927 DBG_PRINT(INIT_DBG,
928 "%s: Freeing TxDL with zero DMA address. "
929 "Virtual address %p\n",
930 dev->name, mac_control->zerodma_virt_addr);
931 swstats->mem_freed += PAGE_SIZE;
932 }
933 kfree(fifo->list_info);
934 swstats->mem_freed += tx_cfg->fifo_len *
935 sizeof(struct list_info_hold);
936 }
937
938 size = SIZE_OF_BLOCK;
939 for (i = 0; i < config->rx_ring_num; i++) {
940 struct ring_info *ring = &mac_control->rings[i];
941
942 blk_cnt = ring->block_count;
943 for (j = 0; j < blk_cnt; j++) {
944 tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
945 tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
946 if (tmp_v_addr == NULL)
947 break;
948 pci_free_consistent(nic->pdev, size,
949 tmp_v_addr, tmp_p_addr);
950 swstats->mem_freed += size;
951 kfree(ring->rx_blocks[j].rxds);
952 swstats->mem_freed += sizeof(struct rxd_info) *
953 rxd_count[nic->rxd_mode];
954 }
955 }
956
957 if (nic->rxd_mode == RXD_MODE_3B) {
958
959 for (i = 0; i < config->rx_ring_num; i++) {
960 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
961 struct ring_info *ring = &mac_control->rings[i];
962
963 blk_cnt = rx_cfg->num_rxd /
964 (rxd_count[nic->rxd_mode] + 1);
965 for (j = 0; j < blk_cnt; j++) {
966 int k = 0;
967 if (!ring->ba[j])
968 continue;
969 while (k != rxd_count[nic->rxd_mode]) {
970 struct buffAdd *ba = &ring->ba[j][k];
971 kfree(ba->ba_0_org);
972 swstats->mem_freed +=
973 BUF0_LEN + ALIGN_SIZE;
974 kfree(ba->ba_1_org);
975 swstats->mem_freed +=
976 BUF1_LEN + ALIGN_SIZE;
977 k++;
978 }
979 kfree(ring->ba[j]);
980 swstats->mem_freed += sizeof(struct buffAdd) *
981 (rxd_count[nic->rxd_mode] + 1);
982 }
983 kfree(ring->ba);
984 swstats->mem_freed += sizeof(struct buffAdd *) *
985 blk_cnt;
986 }
987 }
988
989 for (i = 0; i < nic->config.tx_fifo_num; i++) {
990 struct fifo_info *fifo = &mac_control->fifos[i];
991 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
992
993 if (fifo->ufo_in_band_v) {
994 swstats->mem_freed += tx_cfg->fifo_len *
995 sizeof(u64);
996 kfree(fifo->ufo_in_band_v);
997 }
998 }
999
1000 if (mac_control->stats_mem) {
1001 swstats->mem_freed += mac_control->stats_mem_sz;
1002 pci_free_consistent(nic->pdev,
1003 mac_control->stats_mem_sz,
1004 mac_control->stats_mem,
1005 mac_control->stats_mem_phy);
1006 }
1007}
1008
1009
1010
1011
1012
1013static int s2io_verify_pci_mode(struct s2io_nic *nic)
1014{
1015 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1016 register u64 val64 = 0;
1017 int mode;
1018
1019 val64 = readq(&bar0->pci_mode);
1020 mode = (u8)GET_PCI_MODE(val64);
1021
1022 if (val64 & PCI_MODE_UNKNOWN_MODE)
1023 return -1;
1024 return mode;
1025}
1026
1027#define NEC_VENID 0x1033
1028#define NEC_DEVID 0x0125
1029static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1030{
1031 struct pci_dev *tdev = NULL;
1032 for_each_pci_dev(tdev) {
1033 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1034 if (tdev->bus == s2io_pdev->bus->parent) {
1035 pci_dev_put(tdev);
1036 return 1;
1037 }
1038 }
1039 }
1040 return 0;
1041}
1042
1043static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1044
1045
1046
1047static int s2io_print_pci_mode(struct s2io_nic *nic)
1048{
1049 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1050 register u64 val64 = 0;
1051 int mode;
1052 struct config_param *config = &nic->config;
1053 const char *pcimode;
1054
1055 val64 = readq(&bar0->pci_mode);
1056 mode = (u8)GET_PCI_MODE(val64);
1057
1058 if (val64 & PCI_MODE_UNKNOWN_MODE)
1059 return -1;
1060
1061 config->bus_speed = bus_speed[mode];
1062
1063 if (s2io_on_nec_bridge(nic->pdev)) {
1064 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1065 nic->dev->name);
1066 return mode;
1067 }
1068
1069 switch (mode) {
1070 case PCI_MODE_PCI_33:
1071 pcimode = "33MHz PCI bus";
1072 break;
1073 case PCI_MODE_PCI_66:
1074 pcimode = "66MHz PCI bus";
1075 break;
1076 case PCI_MODE_PCIX_M1_66:
1077 pcimode = "66MHz PCIX(M1) bus";
1078 break;
1079 case PCI_MODE_PCIX_M1_100:
1080 pcimode = "100MHz PCIX(M1) bus";
1081 break;
1082 case PCI_MODE_PCIX_M1_133:
1083 pcimode = "133MHz PCIX(M1) bus";
1084 break;
1085 case PCI_MODE_PCIX_M2_66:
1086 pcimode = "133MHz PCIX(M2) bus";
1087 break;
1088 case PCI_MODE_PCIX_M2_100:
1089 pcimode = "200MHz PCIX(M2) bus";
1090 break;
1091 case PCI_MODE_PCIX_M2_133:
1092 pcimode = "266MHz PCIX(M2) bus";
1093 break;
1094 default:
1095 pcimode = "unsupported bus!";
1096 mode = -1;
1097 }
1098
1099 DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1100 nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1101
1102 return mode;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115static int init_tti(struct s2io_nic *nic, int link)
1116{
1117 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1118 register u64 val64 = 0;
1119 int i;
1120 struct config_param *config = &nic->config;
1121
1122 for (i = 0; i < config->tx_fifo_num; i++) {
1123
1124
1125
1126
1127
1128 if (nic->device_type == XFRAME_II_DEVICE) {
1129 int count = (nic->config.bus_speed * 125)/2;
1130 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1131 } else
1132 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1133
1134 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1135 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1136 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1137 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1138 if (i == 0)
1139 if (use_continuous_tx_intrs && (link == LINK_UP))
1140 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1141 writeq(val64, &bar0->tti_data1_mem);
1142
1143 if (nic->config.intr_type == MSI_X) {
1144 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1145 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1146 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1147 TTI_DATA2_MEM_TX_UFC_D(0x300);
1148 } else {
1149 if ((nic->config.tx_steering_type ==
1150 TX_DEFAULT_STEERING) &&
1151 (config->tx_fifo_num > 1) &&
1152 (i >= nic->udp_fifo_idx) &&
1153 (i < (nic->udp_fifo_idx +
1154 nic->total_udp_fifos)))
1155 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1156 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1157 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1158 TTI_DATA2_MEM_TX_UFC_D(0x120);
1159 else
1160 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1161 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1162 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1163 TTI_DATA2_MEM_TX_UFC_D(0x80);
1164 }
1165
1166 writeq(val64, &bar0->tti_data2_mem);
1167
1168 val64 = TTI_CMD_MEM_WE |
1169 TTI_CMD_MEM_STROBE_NEW_CMD |
1170 TTI_CMD_MEM_OFFSET(i);
1171 writeq(val64, &bar0->tti_command_mem);
1172
1173 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1174 TTI_CMD_MEM_STROBE_NEW_CMD,
1175 S2IO_BIT_RESET) != SUCCESS)
1176 return FAILURE;
1177 }
1178
1179 return SUCCESS;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191static int init_nic(struct s2io_nic *nic)
1192{
1193 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1194 struct net_device *dev = nic->dev;
1195 register u64 val64 = 0;
1196 void __iomem *add;
1197 u32 time;
1198 int i, j;
1199 int dtx_cnt = 0;
1200 unsigned long long mem_share;
1201 int mem_size;
1202 struct config_param *config = &nic->config;
1203 struct mac_info *mac_control = &nic->mac_control;
1204
1205
1206 if (s2io_set_swapper(nic)) {
1207 DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1208 return -EIO;
1209 }
1210
1211
1212
1213
1214 if (nic->device_type & XFRAME_II_DEVICE) {
1215 val64 = 0xA500000000ULL;
1216 writeq(val64, &bar0->sw_reset);
1217 msleep(500);
1218 val64 = readq(&bar0->sw_reset);
1219 }
1220
1221
1222 val64 = 0;
1223 writeq(val64, &bar0->sw_reset);
1224 msleep(500);
1225 val64 = readq(&bar0->sw_reset);
1226
1227
1228
1229
1230 if (nic->device_type == XFRAME_II_DEVICE) {
1231 for (i = 0; i < 50; i++) {
1232 val64 = readq(&bar0->adapter_status);
1233 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1234 break;
1235 msleep(10);
1236 }
1237 if (i == 50)
1238 return -ENODEV;
1239 }
1240
1241
1242 add = &bar0->mac_cfg;
1243 val64 = readq(&bar0->mac_cfg);
1244 val64 |= MAC_RMAC_BCAST_ENABLE;
1245 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1246 writel((u32)val64, add);
1247 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1248 writel((u32) (val64 >> 32), (add + 4));
1249
1250
1251 val64 = readq(&bar0->mac_int_mask);
1252 val64 = readq(&bar0->mc_int_mask);
1253 val64 = readq(&bar0->xgxs_int_mask);
1254
1255
1256 val64 = dev->mtu;
1257 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1258
1259 if (nic->device_type & XFRAME_II_DEVICE) {
1260 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1261 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1262 &bar0->dtx_control, UF);
1263 if (dtx_cnt & 0x1)
1264 msleep(1);
1265 dtx_cnt++;
1266 }
1267 } else {
1268 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1269 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1270 &bar0->dtx_control, UF);
1271 val64 = readq(&bar0->dtx_control);
1272 dtx_cnt++;
1273 }
1274 }
1275
1276
1277 val64 = 0;
1278 writeq(val64, &bar0->tx_fifo_partition_0);
1279 writeq(val64, &bar0->tx_fifo_partition_1);
1280 writeq(val64, &bar0->tx_fifo_partition_2);
1281 writeq(val64, &bar0->tx_fifo_partition_3);
1282
1283 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1284 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1285
1286 val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1287 vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1288
1289 if (i == (config->tx_fifo_num - 1)) {
1290 if (i % 2 == 0)
1291 i++;
1292 }
1293
1294 switch (i) {
1295 case 1:
1296 writeq(val64, &bar0->tx_fifo_partition_0);
1297 val64 = 0;
1298 j = 0;
1299 break;
1300 case 3:
1301 writeq(val64, &bar0->tx_fifo_partition_1);
1302 val64 = 0;
1303 j = 0;
1304 break;
1305 case 5:
1306 writeq(val64, &bar0->tx_fifo_partition_2);
1307 val64 = 0;
1308 j = 0;
1309 break;
1310 case 7:
1311 writeq(val64, &bar0->tx_fifo_partition_3);
1312 val64 = 0;
1313 j = 0;
1314 break;
1315 default:
1316 j++;
1317 break;
1318 }
1319 }
1320
1321
1322
1323
1324
1325 if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1326 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1327
1328 val64 = readq(&bar0->tx_fifo_partition_0);
1329 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1330 &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1331
1332
1333
1334
1335
1336 val64 = readq(&bar0->tx_pa_cfg);
1337 val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1338 TX_PA_CFG_IGNORE_SNAP_OUI |
1339 TX_PA_CFG_IGNORE_LLC_CTRL |
1340 TX_PA_CFG_IGNORE_L2_ERR;
1341 writeq(val64, &bar0->tx_pa_cfg);
1342
1343
1344 val64 = 0;
1345 for (i = 0; i < config->rx_ring_num; i++) {
1346 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1347
1348 val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1349 }
1350 writeq(val64, &bar0->rx_queue_priority);
1351
1352
1353
1354
1355
1356 val64 = 0;
1357 if (nic->device_type & XFRAME_II_DEVICE)
1358 mem_size = 32;
1359 else
1360 mem_size = 64;
1361
1362 for (i = 0; i < config->rx_ring_num; i++) {
1363 switch (i) {
1364 case 0:
1365 mem_share = (mem_size / config->rx_ring_num +
1366 mem_size % config->rx_ring_num);
1367 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1368 continue;
1369 case 1:
1370 mem_share = (mem_size / config->rx_ring_num);
1371 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1372 continue;
1373 case 2:
1374 mem_share = (mem_size / config->rx_ring_num);
1375 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1376 continue;
1377 case 3:
1378 mem_share = (mem_size / config->rx_ring_num);
1379 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1380 continue;
1381 case 4:
1382 mem_share = (mem_size / config->rx_ring_num);
1383 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1384 continue;
1385 case 5:
1386 mem_share = (mem_size / config->rx_ring_num);
1387 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1388 continue;
1389 case 6:
1390 mem_share = (mem_size / config->rx_ring_num);
1391 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1392 continue;
1393 case 7:
1394 mem_share = (mem_size / config->rx_ring_num);
1395 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1396 continue;
1397 }
1398 }
1399 writeq(val64, &bar0->rx_queue_cfg);
1400
1401
1402
1403
1404
1405 switch (config->tx_fifo_num) {
1406 case 1:
1407 val64 = 0x0;
1408 writeq(val64, &bar0->tx_w_round_robin_0);
1409 writeq(val64, &bar0->tx_w_round_robin_1);
1410 writeq(val64, &bar0->tx_w_round_robin_2);
1411 writeq(val64, &bar0->tx_w_round_robin_3);
1412 writeq(val64, &bar0->tx_w_round_robin_4);
1413 break;
1414 case 2:
1415 val64 = 0x0001000100010001ULL;
1416 writeq(val64, &bar0->tx_w_round_robin_0);
1417 writeq(val64, &bar0->tx_w_round_robin_1);
1418 writeq(val64, &bar0->tx_w_round_robin_2);
1419 writeq(val64, &bar0->tx_w_round_robin_3);
1420 val64 = 0x0001000100000000ULL;
1421 writeq(val64, &bar0->tx_w_round_robin_4);
1422 break;
1423 case 3:
1424 val64 = 0x0001020001020001ULL;
1425 writeq(val64, &bar0->tx_w_round_robin_0);
1426 val64 = 0x0200010200010200ULL;
1427 writeq(val64, &bar0->tx_w_round_robin_1);
1428 val64 = 0x0102000102000102ULL;
1429 writeq(val64, &bar0->tx_w_round_robin_2);
1430 val64 = 0x0001020001020001ULL;
1431 writeq(val64, &bar0->tx_w_round_robin_3);
1432 val64 = 0x0200010200000000ULL;
1433 writeq(val64, &bar0->tx_w_round_robin_4);
1434 break;
1435 case 4:
1436 val64 = 0x0001020300010203ULL;
1437 writeq(val64, &bar0->tx_w_round_robin_0);
1438 writeq(val64, &bar0->tx_w_round_robin_1);
1439 writeq(val64, &bar0->tx_w_round_robin_2);
1440 writeq(val64, &bar0->tx_w_round_robin_3);
1441 val64 = 0x0001020300000000ULL;
1442 writeq(val64, &bar0->tx_w_round_robin_4);
1443 break;
1444 case 5:
1445 val64 = 0x0001020304000102ULL;
1446 writeq(val64, &bar0->tx_w_round_robin_0);
1447 val64 = 0x0304000102030400ULL;
1448 writeq(val64, &bar0->tx_w_round_robin_1);
1449 val64 = 0x0102030400010203ULL;
1450 writeq(val64, &bar0->tx_w_round_robin_2);
1451 val64 = 0x0400010203040001ULL;
1452 writeq(val64, &bar0->tx_w_round_robin_3);
1453 val64 = 0x0203040000000000ULL;
1454 writeq(val64, &bar0->tx_w_round_robin_4);
1455 break;
1456 case 6:
1457 val64 = 0x0001020304050001ULL;
1458 writeq(val64, &bar0->tx_w_round_robin_0);
1459 val64 = 0x0203040500010203ULL;
1460 writeq(val64, &bar0->tx_w_round_robin_1);
1461 val64 = 0x0405000102030405ULL;
1462 writeq(val64, &bar0->tx_w_round_robin_2);
1463 val64 = 0x0001020304050001ULL;
1464 writeq(val64, &bar0->tx_w_round_robin_3);
1465 val64 = 0x0203040500000000ULL;
1466 writeq(val64, &bar0->tx_w_round_robin_4);
1467 break;
1468 case 7:
1469 val64 = 0x0001020304050600ULL;
1470 writeq(val64, &bar0->tx_w_round_robin_0);
1471 val64 = 0x0102030405060001ULL;
1472 writeq(val64, &bar0->tx_w_round_robin_1);
1473 val64 = 0x0203040506000102ULL;
1474 writeq(val64, &bar0->tx_w_round_robin_2);
1475 val64 = 0x0304050600010203ULL;
1476 writeq(val64, &bar0->tx_w_round_robin_3);
1477 val64 = 0x0405060000000000ULL;
1478 writeq(val64, &bar0->tx_w_round_robin_4);
1479 break;
1480 case 8:
1481 val64 = 0x0001020304050607ULL;
1482 writeq(val64, &bar0->tx_w_round_robin_0);
1483 writeq(val64, &bar0->tx_w_round_robin_1);
1484 writeq(val64, &bar0->tx_w_round_robin_2);
1485 writeq(val64, &bar0->tx_w_round_robin_3);
1486 val64 = 0x0001020300000000ULL;
1487 writeq(val64, &bar0->tx_w_round_robin_4);
1488 break;
1489 }
1490
1491
1492 val64 = readq(&bar0->tx_fifo_partition_0);
1493 val64 |= (TX_FIFO_PARTITION_EN);
1494 writeq(val64, &bar0->tx_fifo_partition_0);
1495
1496
1497
1498
1499
1500 switch (config->rx_ring_num) {
1501 case 1:
1502 val64 = 0x0;
1503 writeq(val64, &bar0->rx_w_round_robin_0);
1504 writeq(val64, &bar0->rx_w_round_robin_1);
1505 writeq(val64, &bar0->rx_w_round_robin_2);
1506 writeq(val64, &bar0->rx_w_round_robin_3);
1507 writeq(val64, &bar0->rx_w_round_robin_4);
1508
1509 val64 = 0x8080808080808080ULL;
1510 writeq(val64, &bar0->rts_qos_steering);
1511 break;
1512 case 2:
1513 val64 = 0x0001000100010001ULL;
1514 writeq(val64, &bar0->rx_w_round_robin_0);
1515 writeq(val64, &bar0->rx_w_round_robin_1);
1516 writeq(val64, &bar0->rx_w_round_robin_2);
1517 writeq(val64, &bar0->rx_w_round_robin_3);
1518 val64 = 0x0001000100000000ULL;
1519 writeq(val64, &bar0->rx_w_round_robin_4);
1520
1521 val64 = 0x8080808040404040ULL;
1522 writeq(val64, &bar0->rts_qos_steering);
1523 break;
1524 case 3:
1525 val64 = 0x0001020001020001ULL;
1526 writeq(val64, &bar0->rx_w_round_robin_0);
1527 val64 = 0x0200010200010200ULL;
1528 writeq(val64, &bar0->rx_w_round_robin_1);
1529 val64 = 0x0102000102000102ULL;
1530 writeq(val64, &bar0->rx_w_round_robin_2);
1531 val64 = 0x0001020001020001ULL;
1532 writeq(val64, &bar0->rx_w_round_robin_3);
1533 val64 = 0x0200010200000000ULL;
1534 writeq(val64, &bar0->rx_w_round_robin_4);
1535
1536 val64 = 0x8080804040402020ULL;
1537 writeq(val64, &bar0->rts_qos_steering);
1538 break;
1539 case 4:
1540 val64 = 0x0001020300010203ULL;
1541 writeq(val64, &bar0->rx_w_round_robin_0);
1542 writeq(val64, &bar0->rx_w_round_robin_1);
1543 writeq(val64, &bar0->rx_w_round_robin_2);
1544 writeq(val64, &bar0->rx_w_round_robin_3);
1545 val64 = 0x0001020300000000ULL;
1546 writeq(val64, &bar0->rx_w_round_robin_4);
1547
1548 val64 = 0x8080404020201010ULL;
1549 writeq(val64, &bar0->rts_qos_steering);
1550 break;
1551 case 5:
1552 val64 = 0x0001020304000102ULL;
1553 writeq(val64, &bar0->rx_w_round_robin_0);
1554 val64 = 0x0304000102030400ULL;
1555 writeq(val64, &bar0->rx_w_round_robin_1);
1556 val64 = 0x0102030400010203ULL;
1557 writeq(val64, &bar0->rx_w_round_robin_2);
1558 val64 = 0x0400010203040001ULL;
1559 writeq(val64, &bar0->rx_w_round_robin_3);
1560 val64 = 0x0203040000000000ULL;
1561 writeq(val64, &bar0->rx_w_round_robin_4);
1562
1563 val64 = 0x8080404020201008ULL;
1564 writeq(val64, &bar0->rts_qos_steering);
1565 break;
1566 case 6:
1567 val64 = 0x0001020304050001ULL;
1568 writeq(val64, &bar0->rx_w_round_robin_0);
1569 val64 = 0x0203040500010203ULL;
1570 writeq(val64, &bar0->rx_w_round_robin_1);
1571 val64 = 0x0405000102030405ULL;
1572 writeq(val64, &bar0->rx_w_round_robin_2);
1573 val64 = 0x0001020304050001ULL;
1574 writeq(val64, &bar0->rx_w_round_robin_3);
1575 val64 = 0x0203040500000000ULL;
1576 writeq(val64, &bar0->rx_w_round_robin_4);
1577
1578 val64 = 0x8080404020100804ULL;
1579 writeq(val64, &bar0->rts_qos_steering);
1580 break;
1581 case 7:
1582 val64 = 0x0001020304050600ULL;
1583 writeq(val64, &bar0->rx_w_round_robin_0);
1584 val64 = 0x0102030405060001ULL;
1585 writeq(val64, &bar0->rx_w_round_robin_1);
1586 val64 = 0x0203040506000102ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_2);
1588 val64 = 0x0304050600010203ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_3);
1590 val64 = 0x0405060000000000ULL;
1591 writeq(val64, &bar0->rx_w_round_robin_4);
1592
1593 val64 = 0x8080402010080402ULL;
1594 writeq(val64, &bar0->rts_qos_steering);
1595 break;
1596 case 8:
1597 val64 = 0x0001020304050607ULL;
1598 writeq(val64, &bar0->rx_w_round_robin_0);
1599 writeq(val64, &bar0->rx_w_round_robin_1);
1600 writeq(val64, &bar0->rx_w_round_robin_2);
1601 writeq(val64, &bar0->rx_w_round_robin_3);
1602 val64 = 0x0001020300000000ULL;
1603 writeq(val64, &bar0->rx_w_round_robin_4);
1604
1605 val64 = 0x8040201008040201ULL;
1606 writeq(val64, &bar0->rts_qos_steering);
1607 break;
1608 }
1609
1610
1611 val64 = 0;
1612 for (i = 0; i < 8; i++)
1613 writeq(val64, &bar0->rts_frm_len_n[i]);
1614
1615
1616 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1617 for (i = 0 ; i < config->rx_ring_num ; i++)
1618 writeq(val64, &bar0->rts_frm_len_n[i]);
1619
1620
1621
1622
1623 for (i = 0; i < config->rx_ring_num; i++) {
1624
1625
1626
1627
1628
1629
1630 if (rts_frm_len[i] != 0) {
1631 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1632 &bar0->rts_frm_len_n[i]);
1633 }
1634 }
1635
1636
1637 for (i = 0; i < 64; i++) {
1638 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1639 DBG_PRINT(ERR_DBG,
1640 "%s: rts_ds_steer failed on codepoint %d\n",
1641 dev->name, i);
1642 return -ENODEV;
1643 }
1644 }
1645
1646
1647 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1648
1649 if (nic->device_type == XFRAME_II_DEVICE) {
1650 val64 = STAT_BC(0x320);
1651 writeq(val64, &bar0->stat_byte_cnt);
1652 }
1653
1654
1655
1656
1657
1658 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1659 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1660 writeq(val64, &bar0->mac_link_util);
1661
1662
1663
1664
1665
1666
1667
1668 if (SUCCESS != init_tti(nic, nic->last_link_state))
1669 return -ENODEV;
1670
1671
1672 if (nic->device_type == XFRAME_II_DEVICE) {
1673
1674
1675
1676
1677 int count = (nic->config.bus_speed * 125)/4;
1678 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1679 } else
1680 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1681 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1682 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1683 RTI_DATA1_MEM_RX_URNG_C(0x30) |
1684 RTI_DATA1_MEM_RX_TIMER_AC_EN;
1685
1686 writeq(val64, &bar0->rti_data1_mem);
1687
1688 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1689 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1690 if (nic->config.intr_type == MSI_X)
1691 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1692 RTI_DATA2_MEM_RX_UFC_D(0x40));
1693 else
1694 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1695 RTI_DATA2_MEM_RX_UFC_D(0x80));
1696 writeq(val64, &bar0->rti_data2_mem);
1697
1698 for (i = 0; i < config->rx_ring_num; i++) {
1699 val64 = RTI_CMD_MEM_WE |
1700 RTI_CMD_MEM_STROBE_NEW_CMD |
1701 RTI_CMD_MEM_OFFSET(i);
1702 writeq(val64, &bar0->rti_command_mem);
1703
1704
1705
1706
1707
1708
1709
1710
1711 time = 0;
1712 while (true) {
1713 val64 = readq(&bar0->rti_command_mem);
1714 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1715 break;
1716
1717 if (time > 10) {
1718 DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1719 dev->name);
1720 return -ENODEV;
1721 }
1722 time++;
1723 msleep(50);
1724 }
1725 }
1726
1727
1728
1729
1730
1731 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1732 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1733
1734
1735 add = &bar0->mac_cfg;
1736 val64 = readq(&bar0->mac_cfg);
1737 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1738 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1739 writel((u32) (val64), add);
1740 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1741 writel((u32) (val64 >> 32), (add + 4));
1742 val64 = readq(&bar0->mac_cfg);
1743
1744
1745 add = &bar0->mac_cfg;
1746 val64 = readq(&bar0->mac_cfg);
1747 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1748 if (nic->device_type == XFRAME_II_DEVICE)
1749 writeq(val64, &bar0->mac_cfg);
1750 else {
1751 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1752 writel((u32) (val64), add);
1753 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1754 writel((u32) (val64 >> 32), (add + 4));
1755 }
1756
1757
1758
1759
1760
1761 val64 = readq(&bar0->rmac_pause_cfg);
1762 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1763 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1764 writeq(val64, &bar0->rmac_pause_cfg);
1765
1766
1767
1768
1769
1770
1771
1772 val64 = 0;
1773 for (i = 0; i < 4; i++) {
1774 val64 |= (((u64)0xFF00 |
1775 nic->mac_control.mc_pause_threshold_q0q3)
1776 << (i * 2 * 8));
1777 }
1778 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1779
1780 val64 = 0;
1781 for (i = 0; i < 4; i++) {
1782 val64 |= (((u64)0xFF00 |
1783 nic->mac_control.mc_pause_threshold_q4q7)
1784 << (i * 2 * 8));
1785 }
1786 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1787
1788
1789
1790
1791
1792 val64 = readq(&bar0->pic_control);
1793 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1794 writeq(val64, &bar0->pic_control);
1795
1796 if (nic->config.bus_speed == 266) {
1797 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1798 writeq(0x0, &bar0->read_retry_delay);
1799 writeq(0x0, &bar0->write_retry_delay);
1800 }
1801
1802
1803
1804
1805
1806 if (nic->device_type == XFRAME_II_DEVICE) {
1807 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1808 MISC_LINK_STABILITY_PRD(3);
1809 writeq(val64, &bar0->misc_control);
1810 val64 = readq(&bar0->pic_control2);
1811 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1812 writeq(val64, &bar0->pic_control2);
1813 }
1814 if (strstr(nic->product_name, "CX4")) {
1815 val64 = TMAC_AVG_IPG(0x17);
1816 writeq(val64, &bar0->tmac_avg_ipg);
1817 }
1818
1819 return SUCCESS;
1820}
1821#define LINK_UP_DOWN_INTERRUPT 1
1822#define MAC_RMAC_ERR_TIMER 2
1823
1824static int s2io_link_fault_indication(struct s2io_nic *nic)
1825{
1826 if (nic->device_type == XFRAME_II_DEVICE)
1827 return LINK_UP_DOWN_INTERRUPT;
1828 else
1829 return MAC_RMAC_ERR_TIMER;
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1842{
1843 u64 temp64;
1844
1845 temp64 = readq(addr);
1846
1847 if (flag == ENABLE_INTRS)
1848 temp64 &= ~((u64)value);
1849 else
1850 temp64 |= ((u64)value);
1851 writeq(temp64, addr);
1852}
1853
1854static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1855{
1856 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1857 register u64 gen_int_mask = 0;
1858 u64 interruptible;
1859
1860 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1861 if (mask & TX_DMA_INTR) {
1862 gen_int_mask |= TXDMA_INT_M;
1863
1864 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1865 TXDMA_PCC_INT | TXDMA_TTI_INT |
1866 TXDMA_LSO_INT | TXDMA_TPA_INT |
1867 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1868
1869 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1870 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1871 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1872 &bar0->pfc_err_mask);
1873
1874 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1875 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1876 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1877
1878 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1879 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1880 PCC_N_SERR | PCC_6_COF_OV_ERR |
1881 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1882 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1883 PCC_TXB_ECC_SG_ERR,
1884 flag, &bar0->pcc_err_mask);
1885
1886 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1887 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1888
1889 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1890 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1891 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1892 flag, &bar0->lso_err_mask);
1893
1894 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1895 flag, &bar0->tpa_err_mask);
1896
1897 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1898 }
1899
1900 if (mask & TX_MAC_INTR) {
1901 gen_int_mask |= TXMAC_INT_M;
1902 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1903 &bar0->mac_int_mask);
1904 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1905 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1906 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1907 flag, &bar0->mac_tmac_err_mask);
1908 }
1909
1910 if (mask & TX_XGXS_INTR) {
1911 gen_int_mask |= TXXGXS_INT_M;
1912 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1913 &bar0->xgxs_int_mask);
1914 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1915 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1916 flag, &bar0->xgxs_txgxs_err_mask);
1917 }
1918
1919 if (mask & RX_DMA_INTR) {
1920 gen_int_mask |= RXDMA_INT_M;
1921 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1922 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1923 flag, &bar0->rxdma_int_mask);
1924 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1925 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1926 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1927 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1928 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1929 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1930 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1931 &bar0->prc_pcix_err_mask);
1932 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1933 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1934 &bar0->rpa_err_mask);
1935 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1936 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1937 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1938 RDA_FRM_ECC_SG_ERR |
1939 RDA_MISC_ERR|RDA_PCIX_ERR,
1940 flag, &bar0->rda_err_mask);
1941 do_s2io_write_bits(RTI_SM_ERR_ALARM |
1942 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1943 flag, &bar0->rti_err_mask);
1944 }
1945
1946 if (mask & RX_MAC_INTR) {
1947 gen_int_mask |= RXMAC_INT_M;
1948 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1949 &bar0->mac_int_mask);
1950 interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1951 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1952 RMAC_DOUBLE_ECC_ERR);
1953 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1954 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1955 do_s2io_write_bits(interruptible,
1956 flag, &bar0->mac_rmac_err_mask);
1957 }
1958
1959 if (mask & RX_XGXS_INTR) {
1960 gen_int_mask |= RXXGXS_INT_M;
1961 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1962 &bar0->xgxs_int_mask);
1963 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1964 &bar0->xgxs_rxgxs_err_mask);
1965 }
1966
1967 if (mask & MC_INTR) {
1968 gen_int_mask |= MC_INT_M;
1969 do_s2io_write_bits(MC_INT_MASK_MC_INT,
1970 flag, &bar0->mc_int_mask);
1971 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1972 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1973 &bar0->mc_err_mask);
1974 }
1975 nic->general_int_mask = gen_int_mask;
1976
1977
1978 nic->general_int_mask = 0;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1993{
1994 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1995 register u64 temp64 = 0, intr_mask = 0;
1996
1997 intr_mask = nic->general_int_mask;
1998
1999
2000
2001 if (mask & TX_PIC_INTR) {
2002
2003 intr_mask |= TXPIC_INT_M;
2004 if (flag == ENABLE_INTRS) {
2005
2006
2007
2008
2009
2010
2011 if (s2io_link_fault_indication(nic) ==
2012 LINK_UP_DOWN_INTERRUPT) {
2013 do_s2io_write_bits(PIC_INT_GPIO, flag,
2014 &bar0->pic_int_mask);
2015 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2016 &bar0->gpio_int_mask);
2017 } else
2018 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019 } else if (flag == DISABLE_INTRS) {
2020
2021
2022
2023
2024 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2025 }
2026 }
2027
2028
2029 if (mask & TX_TRAFFIC_INTR) {
2030 intr_mask |= TXTRAFFIC_INT_M;
2031 if (flag == ENABLE_INTRS) {
2032
2033
2034
2035
2036 writeq(0x0, &bar0->tx_traffic_mask);
2037 } else if (flag == DISABLE_INTRS) {
2038
2039
2040
2041
2042 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2043 }
2044 }
2045
2046
2047 if (mask & RX_TRAFFIC_INTR) {
2048 intr_mask |= RXTRAFFIC_INT_M;
2049 if (flag == ENABLE_INTRS) {
2050
2051 writeq(0x0, &bar0->rx_traffic_mask);
2052 } else if (flag == DISABLE_INTRS) {
2053
2054
2055
2056
2057 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2058 }
2059 }
2060
2061 temp64 = readq(&bar0->general_int_mask);
2062 if (flag == ENABLE_INTRS)
2063 temp64 &= ~((u64)intr_mask);
2064 else
2065 temp64 = DISABLE_ALL_INTRS;
2066 writeq(temp64, &bar0->general_int_mask);
2067
2068 nic->general_int_mask = readq(&bar0->general_int_mask);
2069}
2070
2071
2072
2073
2074
2075
2076static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2077{
2078 int ret = 0, herc;
2079 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2080 u64 val64 = readq(&bar0->adapter_status);
2081
2082 herc = (sp->device_type == XFRAME_II_DEVICE);
2083
2084 if (flag == false) {
2085 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2086 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2087 ret = 1;
2088 } else {
2089 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2090 ret = 1;
2091 }
2092 } else {
2093 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2094 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2095 ADAPTER_STATUS_RMAC_PCC_IDLE))
2096 ret = 1;
2097 } else {
2098 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2099 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2100 ret = 1;
2101 }
2102 }
2103
2104 return ret;
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116static int verify_xena_quiescence(struct s2io_nic *sp)
2117{
2118 int mode;
2119 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2120 u64 val64 = readq(&bar0->adapter_status);
2121 mode = s2io_verify_pci_mode(sp);
2122
2123 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2124 DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2125 return 0;
2126 }
2127 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2128 DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2129 return 0;
2130 }
2131 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2132 DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2133 return 0;
2134 }
2135 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2136 DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2137 return 0;
2138 }
2139 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2140 DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2141 return 0;
2142 }
2143 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2144 DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2145 return 0;
2146 }
2147 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2148 DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2149 return 0;
2150 }
2151 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2152 DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2153 return 0;
2154 }
2155
2156
2157
2158
2159
2160
2161 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2162 sp->device_type == XFRAME_II_DEVICE &&
2163 mode != PCI_MODE_PCI_33) {
2164 DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2165 return 0;
2166 }
2167 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2168 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2169 DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2170 return 0;
2171 }
2172 return 1;
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183static void fix_mac_address(struct s2io_nic *sp)
2184{
2185 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2186 int i = 0;
2187
2188 while (fix_mac[i] != END_SIGN) {
2189 writeq(fix_mac[i++], &bar0->gpio_control);
2190 udelay(10);
2191 (void) readq(&bar0->gpio_control);
2192 }
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208static int start_nic(struct s2io_nic *nic)
2209{
2210 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2211 struct net_device *dev = nic->dev;
2212 register u64 val64 = 0;
2213 u16 subid, i;
2214 struct config_param *config = &nic->config;
2215 struct mac_info *mac_control = &nic->mac_control;
2216
2217
2218 for (i = 0; i < config->rx_ring_num; i++) {
2219 struct ring_info *ring = &mac_control->rings[i];
2220
2221 writeq((u64)ring->rx_blocks[0].block_dma_addr,
2222 &bar0->prc_rxd0_n[i]);
2223
2224 val64 = readq(&bar0->prc_ctrl_n[i]);
2225 if (nic->rxd_mode == RXD_MODE_1)
2226 val64 |= PRC_CTRL_RC_ENABLED;
2227 else
2228 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2229 if (nic->device_type == XFRAME_II_DEVICE)
2230 val64 |= PRC_CTRL_GROUP_READS;
2231 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2232 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2233 writeq(val64, &bar0->prc_ctrl_n[i]);
2234 }
2235
2236 if (nic->rxd_mode == RXD_MODE_3B) {
2237
2238 val64 = readq(&bar0->rx_pa_cfg);
2239 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2240 writeq(val64, &bar0->rx_pa_cfg);
2241 }
2242
2243 if (vlan_tag_strip == 0) {
2244 val64 = readq(&bar0->rx_pa_cfg);
2245 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2246 writeq(val64, &bar0->rx_pa_cfg);
2247 nic->vlan_strip_flag = 0;
2248 }
2249
2250
2251
2252
2253
2254
2255 val64 = readq(&bar0->mc_rldram_mrs);
2256 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2257 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2258 val64 = readq(&bar0->mc_rldram_mrs);
2259
2260 msleep(100);
2261
2262
2263 val64 = readq(&bar0->adapter_control);
2264 val64 &= ~ADAPTER_ECC_EN;
2265 writeq(val64, &bar0->adapter_control);
2266
2267
2268
2269
2270
2271 val64 = readq(&bar0->adapter_status);
2272 if (!verify_xena_quiescence(nic)) {
2273 DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2274 "Adapter status reads: 0x%llx\n",
2275 dev->name, (unsigned long long)val64);
2276 return FAILURE;
2277 }
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288 val64 = readq(&bar0->adapter_control);
2289 val64 |= ADAPTER_EOI_TX_ON;
2290 writeq(val64, &bar0->adapter_control);
2291
2292 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2293
2294
2295
2296
2297 schedule_work(&nic->set_link_task);
2298 }
2299
2300 subid = nic->pdev->subsystem_device;
2301 if (((subid & 0xFF) >= 0x07) &&
2302 (nic->device_type == XFRAME_I_DEVICE)) {
2303 val64 = readq(&bar0->gpio_control);
2304 val64 |= 0x0000800000000000ULL;
2305 writeq(val64, &bar0->gpio_control);
2306 val64 = 0x0411040400000000ULL;
2307 writeq(val64, (void __iomem *)bar0 + 0x2700);
2308 }
2309
2310 return SUCCESS;
2311}
2312
2313
2314
2315static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2316 struct TxD *txdlp, int get_off)
2317{
2318 struct s2io_nic *nic = fifo_data->nic;
2319 struct sk_buff *skb;
2320 struct TxD *txds;
2321 u16 j, frg_cnt;
2322
2323 txds = txdlp;
2324 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2325 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2326 sizeof(u64), PCI_DMA_TODEVICE);
2327 txds++;
2328 }
2329
2330 skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2331 if (!skb) {
2332 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2333 return NULL;
2334 }
2335 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2336 skb_headlen(skb), PCI_DMA_TODEVICE);
2337 frg_cnt = skb_shinfo(skb)->nr_frags;
2338 if (frg_cnt) {
2339 txds++;
2340 for (j = 0; j < frg_cnt; j++, txds++) {
2341 const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2342 if (!txds->Buffer_Pointer)
2343 break;
2344 pci_unmap_page(nic->pdev,
2345 (dma_addr_t)txds->Buffer_Pointer,
2346 skb_frag_size(frag), PCI_DMA_TODEVICE);
2347 }
2348 }
2349 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2350 return skb;
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361static void free_tx_buffers(struct s2io_nic *nic)
2362{
2363 struct net_device *dev = nic->dev;
2364 struct sk_buff *skb;
2365 struct TxD *txdp;
2366 int i, j;
2367 int cnt = 0;
2368 struct config_param *config = &nic->config;
2369 struct mac_info *mac_control = &nic->mac_control;
2370 struct stat_block *stats = mac_control->stats_info;
2371 struct swStat *swstats = &stats->sw_stat;
2372
2373 for (i = 0; i < config->tx_fifo_num; i++) {
2374 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2375 struct fifo_info *fifo = &mac_control->fifos[i];
2376 unsigned long flags;
2377
2378 spin_lock_irqsave(&fifo->tx_lock, flags);
2379 for (j = 0; j < tx_cfg->fifo_len; j++) {
2380 txdp = fifo->list_info[j].list_virt_addr;
2381 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2382 if (skb) {
2383 swstats->mem_freed += skb->truesize;
2384 dev_kfree_skb(skb);
2385 cnt++;
2386 }
2387 }
2388 DBG_PRINT(INTR_DBG,
2389 "%s: forcibly freeing %d skbs on FIFO%d\n",
2390 dev->name, cnt, i);
2391 fifo->tx_curr_get_info.offset = 0;
2392 fifo->tx_curr_put_info.offset = 0;
2393 spin_unlock_irqrestore(&fifo->tx_lock, flags);
2394 }
2395}
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407static void stop_nic(struct s2io_nic *nic)
2408{
2409 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2410 register u64 val64 = 0;
2411 u16 interruptible;
2412
2413
2414 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2415 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2416 interruptible |= TX_PIC_INTR;
2417 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2418
2419
2420 val64 = readq(&bar0->adapter_control);
2421 val64 &= ~(ADAPTER_CNTL_EN);
2422 writeq(val64, &bar0->adapter_control);
2423}
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2448 int from_card_up)
2449{
2450 struct sk_buff *skb;
2451 struct RxD_t *rxdp;
2452 int off, size, block_no, block_no1;
2453 u32 alloc_tab = 0;
2454 u32 alloc_cnt;
2455 u64 tmp;
2456 struct buffAdd *ba;
2457 struct RxD_t *first_rxdp = NULL;
2458 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2459 struct RxD1 *rxdp1;
2460 struct RxD3 *rxdp3;
2461 struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2462
2463 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2464
2465 block_no1 = ring->rx_curr_get_info.block_index;
2466 while (alloc_tab < alloc_cnt) {
2467 block_no = ring->rx_curr_put_info.block_index;
2468
2469 off = ring->rx_curr_put_info.offset;
2470
2471 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2472
2473 if ((block_no == block_no1) &&
2474 (off == ring->rx_curr_get_info.offset) &&
2475 (rxdp->Host_Control)) {
2476 DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2477 ring->dev->name);
2478 goto end;
2479 }
2480 if (off && (off == ring->rxd_count)) {
2481 ring->rx_curr_put_info.block_index++;
2482 if (ring->rx_curr_put_info.block_index ==
2483 ring->block_count)
2484 ring->rx_curr_put_info.block_index = 0;
2485 block_no = ring->rx_curr_put_info.block_index;
2486 off = 0;
2487 ring->rx_curr_put_info.offset = off;
2488 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2489 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2490 ring->dev->name, rxdp);
2491
2492 }
2493
2494 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2495 ((ring->rxd_mode == RXD_MODE_3B) &&
2496 (rxdp->Control_2 & s2BIT(0)))) {
2497 ring->rx_curr_put_info.offset = off;
2498 goto end;
2499 }
2500
2501 size = ring->mtu +
2502 HEADER_ETHERNET_II_802_3_SIZE +
2503 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2504 if (ring->rxd_mode == RXD_MODE_1)
2505 size += NET_IP_ALIGN;
2506 else
2507 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2508
2509
2510 skb = netdev_alloc_skb(nic->dev, size);
2511 if (!skb) {
2512 DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2513 ring->dev->name);
2514 if (first_rxdp) {
2515 dma_wmb();
2516 first_rxdp->Control_1 |= RXD_OWN_XENA;
2517 }
2518 swstats->mem_alloc_fail_cnt++;
2519
2520 return -ENOMEM ;
2521 }
2522 swstats->mem_allocated += skb->truesize;
2523
2524 if (ring->rxd_mode == RXD_MODE_1) {
2525
2526 rxdp1 = (struct RxD1 *)rxdp;
2527 memset(rxdp, 0, sizeof(struct RxD1));
2528 skb_reserve(skb, NET_IP_ALIGN);
2529 rxdp1->Buffer0_ptr =
2530 pci_map_single(ring->pdev, skb->data,
2531 size - NET_IP_ALIGN,
2532 PCI_DMA_FROMDEVICE);
2533 if (pci_dma_mapping_error(nic->pdev,
2534 rxdp1->Buffer0_ptr))
2535 goto pci_map_failed;
2536
2537 rxdp->Control_2 =
2538 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2539 rxdp->Host_Control = (unsigned long)skb;
2540 } else if (ring->rxd_mode == RXD_MODE_3B) {
2541
2542
2543
2544
2545
2546
2547 rxdp3 = (struct RxD3 *)rxdp;
2548
2549 Buffer0_ptr = rxdp3->Buffer0_ptr;
2550 Buffer1_ptr = rxdp3->Buffer1_ptr;
2551 memset(rxdp, 0, sizeof(struct RxD3));
2552
2553 rxdp3->Buffer0_ptr = Buffer0_ptr;
2554 rxdp3->Buffer1_ptr = Buffer1_ptr;
2555
2556 ba = &ring->ba[block_no][off];
2557 skb_reserve(skb, BUF0_LEN);
2558 tmp = (u64)(unsigned long)skb->data;
2559 tmp += ALIGN_SIZE;
2560 tmp &= ~ALIGN_SIZE;
2561 skb->data = (void *) (unsigned long)tmp;
2562 skb_reset_tail_pointer(skb);
2563
2564 if (from_card_up) {
2565 rxdp3->Buffer0_ptr =
2566 pci_map_single(ring->pdev, ba->ba_0,
2567 BUF0_LEN,
2568 PCI_DMA_FROMDEVICE);
2569 if (pci_dma_mapping_error(nic->pdev,
2570 rxdp3->Buffer0_ptr))
2571 goto pci_map_failed;
2572 } else
2573 pci_dma_sync_single_for_device(ring->pdev,
2574 (dma_addr_t)rxdp3->Buffer0_ptr,
2575 BUF0_LEN,
2576 PCI_DMA_FROMDEVICE);
2577
2578 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2579 if (ring->rxd_mode == RXD_MODE_3B) {
2580
2581
2582
2583
2584
2585
2586 rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2587 skb->data,
2588 ring->mtu + 4,
2589 PCI_DMA_FROMDEVICE);
2590
2591 if (pci_dma_mapping_error(nic->pdev,
2592 rxdp3->Buffer2_ptr))
2593 goto pci_map_failed;
2594
2595 if (from_card_up) {
2596 rxdp3->Buffer1_ptr =
2597 pci_map_single(ring->pdev,
2598 ba->ba_1,
2599 BUF1_LEN,
2600 PCI_DMA_FROMDEVICE);
2601
2602 if (pci_dma_mapping_error(nic->pdev,
2603 rxdp3->Buffer1_ptr)) {
2604 pci_unmap_single(ring->pdev,
2605 (dma_addr_t)(unsigned long)
2606 skb->data,
2607 ring->mtu + 4,
2608 PCI_DMA_FROMDEVICE);
2609 goto pci_map_failed;
2610 }
2611 }
2612 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2613 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2614 (ring->mtu + 4);
2615 }
2616 rxdp->Control_2 |= s2BIT(0);
2617 rxdp->Host_Control = (unsigned long) (skb);
2618 }
2619 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2620 rxdp->Control_1 |= RXD_OWN_XENA;
2621 off++;
2622 if (off == (ring->rxd_count + 1))
2623 off = 0;
2624 ring->rx_curr_put_info.offset = off;
2625
2626 rxdp->Control_2 |= SET_RXD_MARKER;
2627 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2628 if (first_rxdp) {
2629 dma_wmb();
2630 first_rxdp->Control_1 |= RXD_OWN_XENA;
2631 }
2632 first_rxdp = rxdp;
2633 }
2634 ring->rx_bufs_left += 1;
2635 alloc_tab++;
2636 }
2637
2638end:
2639
2640
2641
2642
2643 if (first_rxdp) {
2644 dma_wmb();
2645 first_rxdp->Control_1 |= RXD_OWN_XENA;
2646 }
2647
2648 return SUCCESS;
2649
2650pci_map_failed:
2651 swstats->pci_map_fail_cnt++;
2652 swstats->mem_freed += skb->truesize;
2653 dev_kfree_skb_irq(skb);
2654 return -ENOMEM;
2655}
2656
2657static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2658{
2659 struct net_device *dev = sp->dev;
2660 int j;
2661 struct sk_buff *skb;
2662 struct RxD_t *rxdp;
2663 struct RxD1 *rxdp1;
2664 struct RxD3 *rxdp3;
2665 struct mac_info *mac_control = &sp->mac_control;
2666 struct stat_block *stats = mac_control->stats_info;
2667 struct swStat *swstats = &stats->sw_stat;
2668
2669 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2670 rxdp = mac_control->rings[ring_no].
2671 rx_blocks[blk].rxds[j].virt_addr;
2672 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2673 if (!skb)
2674 continue;
2675 if (sp->rxd_mode == RXD_MODE_1) {
2676 rxdp1 = (struct RxD1 *)rxdp;
2677 pci_unmap_single(sp->pdev,
2678 (dma_addr_t)rxdp1->Buffer0_ptr,
2679 dev->mtu +
2680 HEADER_ETHERNET_II_802_3_SIZE +
2681 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2682 PCI_DMA_FROMDEVICE);
2683 memset(rxdp, 0, sizeof(struct RxD1));
2684 } else if (sp->rxd_mode == RXD_MODE_3B) {
2685 rxdp3 = (struct RxD3 *)rxdp;
2686 pci_unmap_single(sp->pdev,
2687 (dma_addr_t)rxdp3->Buffer0_ptr,
2688 BUF0_LEN,
2689 PCI_DMA_FROMDEVICE);
2690 pci_unmap_single(sp->pdev,
2691 (dma_addr_t)rxdp3->Buffer1_ptr,
2692 BUF1_LEN,
2693 PCI_DMA_FROMDEVICE);
2694 pci_unmap_single(sp->pdev,
2695 (dma_addr_t)rxdp3->Buffer2_ptr,
2696 dev->mtu + 4,
2697 PCI_DMA_FROMDEVICE);
2698 memset(rxdp, 0, sizeof(struct RxD3));
2699 }
2700 swstats->mem_freed += skb->truesize;
2701 dev_kfree_skb(skb);
2702 mac_control->rings[ring_no].rx_bufs_left -= 1;
2703 }
2704}
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715static void free_rx_buffers(struct s2io_nic *sp)
2716{
2717 struct net_device *dev = sp->dev;
2718 int i, blk = 0, buf_cnt = 0;
2719 struct config_param *config = &sp->config;
2720 struct mac_info *mac_control = &sp->mac_control;
2721
2722 for (i = 0; i < config->rx_ring_num; i++) {
2723 struct ring_info *ring = &mac_control->rings[i];
2724
2725 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2726 free_rxd_blk(sp, i, blk);
2727
2728 ring->rx_curr_put_info.block_index = 0;
2729 ring->rx_curr_get_info.block_index = 0;
2730 ring->rx_curr_put_info.offset = 0;
2731 ring->rx_curr_get_info.offset = 0;
2732 ring->rx_bufs_left = 0;
2733 DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2734 dev->name, buf_cnt, i);
2735 }
2736}
2737
2738static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2739{
2740 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2741 DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2742 ring->dev->name);
2743 }
2744 return 0;
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760static int s2io_poll_msix(struct napi_struct *napi, int budget)
2761{
2762 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2763 struct net_device *dev = ring->dev;
2764 int pkts_processed = 0;
2765 u8 __iomem *addr = NULL;
2766 u8 val8 = 0;
2767 struct s2io_nic *nic = netdev_priv(dev);
2768 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2769 int budget_org = budget;
2770
2771 if (unlikely(!is_s2io_card_up(nic)))
2772 return 0;
2773
2774 pkts_processed = rx_intr_handler(ring, budget);
2775 s2io_chk_rx_buffers(nic, ring);
2776
2777 if (pkts_processed < budget_org) {
2778 napi_complete_done(napi, pkts_processed);
2779
2780 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2781 addr += 7 - ring->ring_no;
2782 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2783 writeb(val8, addr);
2784 val8 = readb(addr);
2785 }
2786 return pkts_processed;
2787}
2788
2789static int s2io_poll_inta(struct napi_struct *napi, int budget)
2790{
2791 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2792 int pkts_processed = 0;
2793 int ring_pkts_processed, i;
2794 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795 int budget_org = budget;
2796 struct config_param *config = &nic->config;
2797 struct mac_info *mac_control = &nic->mac_control;
2798
2799 if (unlikely(!is_s2io_card_up(nic)))
2800 return 0;
2801
2802 for (i = 0; i < config->rx_ring_num; i++) {
2803 struct ring_info *ring = &mac_control->rings[i];
2804 ring_pkts_processed = rx_intr_handler(ring, budget);
2805 s2io_chk_rx_buffers(nic, ring);
2806 pkts_processed += ring_pkts_processed;
2807 budget -= ring_pkts_processed;
2808 if (budget <= 0)
2809 break;
2810 }
2811 if (pkts_processed < budget_org) {
2812 napi_complete_done(napi, pkts_processed);
2813
2814 writeq(0, &bar0->rx_traffic_mask);
2815 readl(&bar0->rx_traffic_mask);
2816 }
2817 return pkts_processed;
2818}
2819
2820#ifdef CONFIG_NET_POLL_CONTROLLER
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830static void s2io_netpoll(struct net_device *dev)
2831{
2832 struct s2io_nic *nic = netdev_priv(dev);
2833 const int irq = nic->pdev->irq;
2834 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2836 int i;
2837 struct config_param *config = &nic->config;
2838 struct mac_info *mac_control = &nic->mac_control;
2839
2840 if (pci_channel_offline(nic->pdev))
2841 return;
2842
2843 disable_irq(irq);
2844
2845 writeq(val64, &bar0->rx_traffic_int);
2846 writeq(val64, &bar0->tx_traffic_int);
2847
2848
2849
2850
2851
2852 for (i = 0; i < config->tx_fifo_num; i++)
2853 tx_intr_handler(&mac_control->fifos[i]);
2854
2855
2856 for (i = 0; i < config->rx_ring_num; i++) {
2857 struct ring_info *ring = &mac_control->rings[i];
2858
2859 rx_intr_handler(ring, 0);
2860 }
2861
2862 for (i = 0; i < config->rx_ring_num; i++) {
2863 struct ring_info *ring = &mac_control->rings[i];
2864
2865 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2866 DBG_PRINT(INFO_DBG,
2867 "%s: Out of memory in Rx Netpoll!!\n",
2868 dev->name);
2869 break;
2870 }
2871 }
2872 enable_irq(irq);
2873}
2874#endif
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889static int rx_intr_handler(struct ring_info *ring_data, int budget)
2890{
2891 int get_block, put_block;
2892 struct rx_curr_get_info get_info, put_info;
2893 struct RxD_t *rxdp;
2894 struct sk_buff *skb;
2895 int pkt_cnt = 0, napi_pkts = 0;
2896 int i;
2897 struct RxD1 *rxdp1;
2898 struct RxD3 *rxdp3;
2899
2900 if (budget <= 0)
2901 return napi_pkts;
2902
2903 get_info = ring_data->rx_curr_get_info;
2904 get_block = get_info.block_index;
2905 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2906 put_block = put_info.block_index;
2907 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2908
2909 while (RXD_IS_UP2DT(rxdp)) {
2910
2911
2912
2913
2914 if ((get_block == put_block) &&
2915 (get_info.offset + 1) == put_info.offset) {
2916 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2917 ring_data->dev->name);
2918 break;
2919 }
2920 skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2921 if (skb == NULL) {
2922 DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2923 ring_data->dev->name);
2924 return 0;
2925 }
2926 if (ring_data->rxd_mode == RXD_MODE_1) {
2927 rxdp1 = (struct RxD1 *)rxdp;
2928 pci_unmap_single(ring_data->pdev, (dma_addr_t)
2929 rxdp1->Buffer0_ptr,
2930 ring_data->mtu +
2931 HEADER_ETHERNET_II_802_3_SIZE +
2932 HEADER_802_2_SIZE +
2933 HEADER_SNAP_SIZE,
2934 PCI_DMA_FROMDEVICE);
2935 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2936 rxdp3 = (struct RxD3 *)rxdp;
2937 pci_dma_sync_single_for_cpu(ring_data->pdev,
2938 (dma_addr_t)rxdp3->Buffer0_ptr,
2939 BUF0_LEN,
2940 PCI_DMA_FROMDEVICE);
2941 pci_unmap_single(ring_data->pdev,
2942 (dma_addr_t)rxdp3->Buffer2_ptr,
2943 ring_data->mtu + 4,
2944 PCI_DMA_FROMDEVICE);
2945 }
2946 prefetch(skb->data);
2947 rx_osm_handler(ring_data, rxdp);
2948 get_info.offset++;
2949 ring_data->rx_curr_get_info.offset = get_info.offset;
2950 rxdp = ring_data->rx_blocks[get_block].
2951 rxds[get_info.offset].virt_addr;
2952 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2953 get_info.offset = 0;
2954 ring_data->rx_curr_get_info.offset = get_info.offset;
2955 get_block++;
2956 if (get_block == ring_data->block_count)
2957 get_block = 0;
2958 ring_data->rx_curr_get_info.block_index = get_block;
2959 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2960 }
2961
2962 if (ring_data->nic->config.napi) {
2963 budget--;
2964 napi_pkts++;
2965 if (!budget)
2966 break;
2967 }
2968 pkt_cnt++;
2969 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2970 break;
2971 }
2972 if (ring_data->lro) {
2973
2974 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2975 struct lro *lro = &ring_data->lro0_n[i];
2976 if (lro->in_use) {
2977 update_L3L4_header(ring_data->nic, lro);
2978 queue_rx_frame(lro->parent, lro->vlan_tag);
2979 clear_lro_session(lro);
2980 }
2981 }
2982 }
2983 return napi_pkts;
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998static void tx_intr_handler(struct fifo_info *fifo_data)
2999{
3000 struct s2io_nic *nic = fifo_data->nic;
3001 struct tx_curr_get_info get_info, put_info;
3002 struct sk_buff *skb = NULL;
3003 struct TxD *txdlp;
3004 int pkt_cnt = 0;
3005 unsigned long flags = 0;
3006 u8 err_mask;
3007 struct stat_block *stats = nic->mac_control.stats_info;
3008 struct swStat *swstats = &stats->sw_stat;
3009
3010 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3011 return;
3012
3013 get_info = fifo_data->tx_curr_get_info;
3014 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3015 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3016 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3017 (get_info.offset != put_info.offset) &&
3018 (txdlp->Host_Control)) {
3019
3020 if (txdlp->Control_1 & TXD_T_CODE) {
3021 unsigned long long err;
3022 err = txdlp->Control_1 & TXD_T_CODE;
3023 if (err & 0x1) {
3024 swstats->parity_err_cnt++;
3025 }
3026
3027
3028 err_mask = err >> 48;
3029 switch (err_mask) {
3030 case 2:
3031 swstats->tx_buf_abort_cnt++;
3032 break;
3033
3034 case 3:
3035 swstats->tx_desc_abort_cnt++;
3036 break;
3037
3038 case 7:
3039 swstats->tx_parity_err_cnt++;
3040 break;
3041
3042 case 10:
3043 swstats->tx_link_loss_cnt++;
3044 break;
3045
3046 case 15:
3047 swstats->tx_list_proc_err_cnt++;
3048 break;
3049 }
3050 }
3051
3052 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3053 if (skb == NULL) {
3054 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3055 DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3056 __func__);
3057 return;
3058 }
3059 pkt_cnt++;
3060
3061
3062 swstats->mem_freed += skb->truesize;
3063 dev_kfree_skb_irq(skb);
3064
3065 get_info.offset++;
3066 if (get_info.offset == get_info.fifo_len + 1)
3067 get_info.offset = 0;
3068 txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3069 fifo_data->tx_curr_get_info.offset = get_info.offset;
3070 }
3071
3072 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3073
3074 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3075}
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3088 struct net_device *dev)
3089{
3090 u64 val64;
3091 struct s2io_nic *sp = netdev_priv(dev);
3092 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3093
3094
3095 val64 = MDIO_MMD_INDX_ADDR(addr) |
3096 MDIO_MMD_DEV_ADDR(mmd_type) |
3097 MDIO_MMS_PRT_ADDR(0x0);
3098 writeq(val64, &bar0->mdio_control);
3099 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3100 writeq(val64, &bar0->mdio_control);
3101 udelay(100);
3102
3103
3104 val64 = MDIO_MMD_INDX_ADDR(addr) |
3105 MDIO_MMD_DEV_ADDR(mmd_type) |
3106 MDIO_MMS_PRT_ADDR(0x0) |
3107 MDIO_MDIO_DATA(value) |
3108 MDIO_OP(MDIO_OP_WRITE_TRANS);
3109 writeq(val64, &bar0->mdio_control);
3110 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3111 writeq(val64, &bar0->mdio_control);
3112 udelay(100);
3113
3114 val64 = MDIO_MMD_INDX_ADDR(addr) |
3115 MDIO_MMD_DEV_ADDR(mmd_type) |
3116 MDIO_MMS_PRT_ADDR(0x0) |
3117 MDIO_OP(MDIO_OP_READ_TRANS);
3118 writeq(val64, &bar0->mdio_control);
3119 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3120 writeq(val64, &bar0->mdio_control);
3121 udelay(100);
3122}
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3134{
3135 u64 val64 = 0x0;
3136 u64 rval64 = 0x0;
3137 struct s2io_nic *sp = netdev_priv(dev);
3138 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3139
3140
3141 val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3142 | MDIO_MMD_DEV_ADDR(mmd_type)
3143 | MDIO_MMS_PRT_ADDR(0x0));
3144 writeq(val64, &bar0->mdio_control);
3145 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3146 writeq(val64, &bar0->mdio_control);
3147 udelay(100);
3148
3149
3150 val64 = MDIO_MMD_INDX_ADDR(addr) |
3151 MDIO_MMD_DEV_ADDR(mmd_type) |
3152 MDIO_MMS_PRT_ADDR(0x0) |
3153 MDIO_OP(MDIO_OP_READ_TRANS);
3154 writeq(val64, &bar0->mdio_control);
3155 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3156 writeq(val64, &bar0->mdio_control);
3157 udelay(100);
3158
3159
3160 rval64 = readq(&bar0->mdio_control);
3161 rval64 = rval64 & 0xFFFF0000;
3162 rval64 = rval64 >> 16;
3163 return rval64;
3164}
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3177 u16 flag, u16 type)
3178{
3179 u64 mask = 0x3;
3180 u64 val64;
3181 int i;
3182 for (i = 0; i < index; i++)
3183 mask = mask << 0x2;
3184
3185 if (flag > 0) {
3186 *counter = *counter + 1;
3187 val64 = *regs_stat & mask;
3188 val64 = val64 >> (index * 0x2);
3189 val64 = val64 + 1;
3190 if (val64 == 3) {
3191 switch (type) {
3192 case 1:
3193 DBG_PRINT(ERR_DBG,
3194 "Take Xframe NIC out of service.\n");
3195 DBG_PRINT(ERR_DBG,
3196"Excessive temperatures may result in premature transceiver failure.\n");
3197 break;
3198 case 2:
3199 DBG_PRINT(ERR_DBG,
3200 "Take Xframe NIC out of service.\n");
3201 DBG_PRINT(ERR_DBG,
3202"Excessive bias currents may indicate imminent laser diode failure.\n");
3203 break;
3204 case 3:
3205 DBG_PRINT(ERR_DBG,
3206 "Take Xframe NIC out of service.\n");
3207 DBG_PRINT(ERR_DBG,
3208"Excessive laser output power may saturate far-end receiver.\n");
3209 break;
3210 default:
3211 DBG_PRINT(ERR_DBG,
3212 "Incorrect XPAK Alarm type\n");
3213 }
3214 val64 = 0x0;
3215 }
3216 val64 = val64 << (index * 0x2);
3217 *regs_stat = (*regs_stat & (~mask)) | (val64);
3218
3219 } else {
3220 *regs_stat = *regs_stat & (~mask);
3221 }
3222}
3223
3224
3225
3226
3227
3228
3229
3230
3231static void s2io_updt_xpak_counter(struct net_device *dev)
3232{
3233 u16 flag = 0x0;
3234 u16 type = 0x0;
3235 u16 val16 = 0x0;
3236 u64 val64 = 0x0;
3237 u64 addr = 0x0;
3238
3239 struct s2io_nic *sp = netdev_priv(dev);
3240 struct stat_block *stats = sp->mac_control.stats_info;
3241 struct xpakStat *xstats = &stats->xpak_stat;
3242
3243
3244 addr = MDIO_CTRL1;
3245 val64 = 0x0;
3246 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3247 if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3248 DBG_PRINT(ERR_DBG,
3249 "ERR: MDIO slave access failed - Returned %llx\n",
3250 (unsigned long long)val64);
3251 return;
3252 }
3253
3254
3255 if (val64 != MDIO_CTRL1_SPEED10G) {
3256 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3257 "Returned: %llx- Expected: 0x%x\n",
3258 (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3259 return;
3260 }
3261
3262
3263 addr = 0xA100;
3264 s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3265 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266
3267
3268 addr = 0xA070;
3269 val64 = 0x0;
3270 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3271
3272 flag = CHECKBIT(val64, 0x7);
3273 type = 1;
3274 s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3275 &xstats->xpak_regs_stat,
3276 0x0, flag, type);
3277
3278 if (CHECKBIT(val64, 0x6))
3279 xstats->alarm_transceiver_temp_low++;
3280
3281 flag = CHECKBIT(val64, 0x3);
3282 type = 2;
3283 s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3284 &xstats->xpak_regs_stat,
3285 0x2, flag, type);
3286
3287 if (CHECKBIT(val64, 0x2))
3288 xstats->alarm_laser_bias_current_low++;
3289
3290 flag = CHECKBIT(val64, 0x1);
3291 type = 3;
3292 s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3293 &xstats->xpak_regs_stat,
3294 0x4, flag, type);
3295
3296 if (CHECKBIT(val64, 0x0))
3297 xstats->alarm_laser_output_power_low++;
3298
3299
3300 addr = 0xA074;
3301 val64 = 0x0;
3302 val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3303
3304 if (CHECKBIT(val64, 0x7))
3305 xstats->warn_transceiver_temp_high++;
3306
3307 if (CHECKBIT(val64, 0x6))
3308 xstats->warn_transceiver_temp_low++;
3309
3310 if (CHECKBIT(val64, 0x3))
3311 xstats->warn_laser_bias_current_high++;
3312
3313 if (CHECKBIT(val64, 0x2))
3314 xstats->warn_laser_bias_current_low++;
3315
3316 if (CHECKBIT(val64, 0x1))
3317 xstats->warn_laser_output_power_high++;
3318
3319 if (CHECKBIT(val64, 0x0))
3320 xstats->warn_laser_output_power_low++;
3321}
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3335 int bit_state)
3336{
3337 int ret = FAILURE, cnt = 0, delay = 1;
3338 u64 val64;
3339
3340 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3341 return FAILURE;
3342
3343 do {
3344 val64 = readq(addr);
3345 if (bit_state == S2IO_BIT_RESET) {
3346 if (!(val64 & busy_bit)) {
3347 ret = SUCCESS;
3348 break;
3349 }
3350 } else {
3351 if (val64 & busy_bit) {
3352 ret = SUCCESS;
3353 break;
3354 }
3355 }
3356
3357 if (in_interrupt())
3358 mdelay(delay);
3359 else
3360 msleep(delay);
3361
3362 if (++cnt >= 10)
3363 delay = 50;
3364 } while (cnt < 20);
3365 return ret;
3366}
3367
3368
3369
3370
3371
3372
3373static u16 check_pci_device_id(u16 id)
3374{
3375 switch (id) {
3376 case PCI_DEVICE_ID_HERC_WIN:
3377 case PCI_DEVICE_ID_HERC_UNI:
3378 return XFRAME_II_DEVICE;
3379 case PCI_DEVICE_ID_S2IO_UNI:
3380 case PCI_DEVICE_ID_S2IO_WIN:
3381 return XFRAME_I_DEVICE;
3382 default:
3383 return PCI_ANY_ID;
3384 }
3385}
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397static void s2io_reset(struct s2io_nic *sp)
3398{
3399 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3400 u64 val64;
3401 u16 subid, pci_cmd;
3402 int i;
3403 u16 val16;
3404 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3405 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3406 struct stat_block *stats;
3407 struct swStat *swstats;
3408
3409 DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3410 __func__, pci_name(sp->pdev));
3411
3412
3413 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3414
3415 val64 = SW_RESET_ALL;
3416 writeq(val64, &bar0->sw_reset);
3417 if (strstr(sp->product_name, "CX4"))
3418 msleep(750);
3419 msleep(250);
3420 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3421
3422
3423 pci_restore_state(sp->pdev);
3424 pci_save_state(sp->pdev);
3425 pci_read_config_word(sp->pdev, 0x2, &val16);
3426 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3427 break;
3428 msleep(200);
3429 }
3430
3431 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3432 DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3433
3434 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3435
3436 s2io_init_pci(sp);
3437
3438
3439 s2io_set_swapper(sp);
3440
3441
3442 do_s2io_restore_unicast_mc(sp);
3443
3444
3445 restore_xmsi_data(sp);
3446
3447
3448 if (sp->device_type == XFRAME_II_DEVICE) {
3449
3450 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3451
3452
3453 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3454
3455
3456 writeq(s2BIT(62), &bar0->txpic_int_reg);
3457 }
3458
3459
3460 memset(&sp->stats, 0, sizeof(struct net_device_stats));
3461
3462 stats = sp->mac_control.stats_info;
3463 swstats = &stats->sw_stat;
3464
3465
3466 up_cnt = swstats->link_up_cnt;
3467 down_cnt = swstats->link_down_cnt;
3468 up_time = swstats->link_up_time;
3469 down_time = swstats->link_down_time;
3470 reset_cnt = swstats->soft_reset_cnt;
3471 mem_alloc_cnt = swstats->mem_allocated;
3472 mem_free_cnt = swstats->mem_freed;
3473 watchdog_cnt = swstats->watchdog_timer_cnt;
3474
3475 memset(stats, 0, sizeof(struct stat_block));
3476
3477
3478 swstats->link_up_cnt = up_cnt;
3479 swstats->link_down_cnt = down_cnt;
3480 swstats->link_up_time = up_time;
3481 swstats->link_down_time = down_time;
3482 swstats->soft_reset_cnt = reset_cnt;
3483 swstats->mem_allocated = mem_alloc_cnt;
3484 swstats->mem_freed = mem_free_cnt;
3485 swstats->watchdog_timer_cnt = watchdog_cnt;
3486
3487
3488 subid = sp->pdev->subsystem_device;
3489 if (((subid & 0xFF) >= 0x07) &&
3490 (sp->device_type == XFRAME_I_DEVICE)) {
3491 val64 = readq(&bar0->gpio_control);
3492 val64 |= 0x0000800000000000ULL;
3493 writeq(val64, &bar0->gpio_control);
3494 val64 = 0x0411040400000000ULL;
3495 writeq(val64, (void __iomem *)bar0 + 0x2700);
3496 }
3497
3498
3499
3500
3501
3502 if (sp->device_type == XFRAME_II_DEVICE) {
3503 val64 = readq(&bar0->pcc_err_reg);
3504 writeq(val64, &bar0->pcc_err_reg);
3505 }
3506
3507 sp->device_enabled_once = false;
3508}
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520static int s2io_set_swapper(struct s2io_nic *sp)
3521{
3522 struct net_device *dev = sp->dev;
3523 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3524 u64 val64, valt, valr;
3525
3526
3527
3528
3529
3530
3531 val64 = readq(&bar0->pif_rd_swapper_fb);
3532 if (val64 != 0x0123456789ABCDEFULL) {
3533 int i = 0;
3534 static const u64 value[] = {
3535 0xC30000C3C30000C3ULL,
3536 0x8100008181000081ULL,
3537 0x4200004242000042ULL,
3538 0
3539 };
3540
3541 while (i < 4) {
3542 writeq(value[i], &bar0->swapper_ctrl);
3543 val64 = readq(&bar0->pif_rd_swapper_fb);
3544 if (val64 == 0x0123456789ABCDEFULL)
3545 break;
3546 i++;
3547 }
3548 if (i == 4) {
3549 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3550 "feedback read %llx\n",
3551 dev->name, (unsigned long long)val64);
3552 return FAILURE;
3553 }
3554 valr = value[i];
3555 } else {
3556 valr = readq(&bar0->swapper_ctrl);
3557 }
3558
3559 valt = 0x0123456789ABCDEFULL;
3560 writeq(valt, &bar0->xmsi_address);
3561 val64 = readq(&bar0->xmsi_address);
3562
3563 if (val64 != valt) {
3564 int i = 0;
3565 static const u64 value[] = {
3566 0x00C3C30000C3C300ULL,
3567 0x0081810000818100ULL,
3568 0x0042420000424200ULL,
3569 0
3570 };
3571
3572 while (i < 4) {
3573 writeq((value[i] | valr), &bar0->swapper_ctrl);
3574 writeq(valt, &bar0->xmsi_address);
3575 val64 = readq(&bar0->xmsi_address);
3576 if (val64 == valt)
3577 break;
3578 i++;
3579 }
3580 if (i == 4) {
3581 unsigned long long x = val64;
3582 DBG_PRINT(ERR_DBG,
3583 "Write failed, Xmsi_addr reads:0x%llx\n", x);
3584 return FAILURE;
3585 }
3586 }
3587 val64 = readq(&bar0->swapper_ctrl);
3588 val64 &= 0xFFFF000000000000ULL;
3589
3590#ifdef __BIG_ENDIAN
3591
3592
3593
3594
3595 val64 |= (SWAPPER_CTRL_TXP_FE |
3596 SWAPPER_CTRL_TXP_SE |
3597 SWAPPER_CTRL_TXD_R_FE |
3598 SWAPPER_CTRL_TXD_W_FE |
3599 SWAPPER_CTRL_TXF_R_FE |
3600 SWAPPER_CTRL_RXD_R_FE |
3601 SWAPPER_CTRL_RXD_W_FE |
3602 SWAPPER_CTRL_RXF_W_FE |
3603 SWAPPER_CTRL_XMSI_FE |
3604 SWAPPER_CTRL_STATS_FE |
3605 SWAPPER_CTRL_STATS_SE);
3606 if (sp->config.intr_type == INTA)
3607 val64 |= SWAPPER_CTRL_XMSI_SE;
3608 writeq(val64, &bar0->swapper_ctrl);
3609#else
3610
3611
3612
3613
3614
3615 val64 |= (SWAPPER_CTRL_TXP_FE |
3616 SWAPPER_CTRL_TXP_SE |
3617 SWAPPER_CTRL_TXD_R_FE |
3618 SWAPPER_CTRL_TXD_R_SE |
3619 SWAPPER_CTRL_TXD_W_FE |
3620 SWAPPER_CTRL_TXD_W_SE |
3621 SWAPPER_CTRL_TXF_R_FE |
3622 SWAPPER_CTRL_RXD_R_FE |
3623 SWAPPER_CTRL_RXD_R_SE |
3624 SWAPPER_CTRL_RXD_W_FE |
3625 SWAPPER_CTRL_RXD_W_SE |
3626 SWAPPER_CTRL_RXF_W_FE |
3627 SWAPPER_CTRL_XMSI_FE |
3628 SWAPPER_CTRL_STATS_FE |
3629 SWAPPER_CTRL_STATS_SE);
3630 if (sp->config.intr_type == INTA)
3631 val64 |= SWAPPER_CTRL_XMSI_SE;
3632 writeq(val64, &bar0->swapper_ctrl);
3633#endif
3634 val64 = readq(&bar0->swapper_ctrl);
3635
3636
3637
3638
3639
3640 val64 = readq(&bar0->pif_rd_swapper_fb);
3641 if (val64 != 0x0123456789ABCDEFULL) {
3642
3643 DBG_PRINT(ERR_DBG,
3644 "%s: Endian settings are wrong, feedback read %llx\n",
3645 dev->name, (unsigned long long)val64);
3646 return FAILURE;
3647 }
3648
3649 return SUCCESS;
3650}
3651
3652static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3653{
3654 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3655 u64 val64;
3656 int ret = 0, cnt = 0;
3657
3658 do {
3659 val64 = readq(&bar0->xmsi_access);
3660 if (!(val64 & s2BIT(15)))
3661 break;
3662 mdelay(1);
3663 cnt++;
3664 } while (cnt < 5);
3665 if (cnt == 5) {
3666 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3667 ret = 1;
3668 }
3669
3670 return ret;
3671}
3672
3673static void restore_xmsi_data(struct s2io_nic *nic)
3674{
3675 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3676 u64 val64;
3677 int i, msix_index;
3678
3679 if (nic->device_type == XFRAME_I_DEVICE)
3680 return;
3681
3682 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3683 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3684 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3685 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3686 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3687 writeq(val64, &bar0->xmsi_access);
3688 if (wait_for_msix_trans(nic, msix_index)) {
3689 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3690 __func__, msix_index);
3691 continue;
3692 }
3693 }
3694}
3695
3696static void store_xmsi_data(struct s2io_nic *nic)
3697{
3698 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3699 u64 val64, addr, data;
3700 int i, msix_index;
3701
3702 if (nic->device_type == XFRAME_I_DEVICE)
3703 return;
3704
3705
3706 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3707 msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3708 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3709 writeq(val64, &bar0->xmsi_access);
3710 if (wait_for_msix_trans(nic, msix_index)) {
3711 DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3712 __func__, msix_index);
3713 continue;
3714 }
3715 addr = readq(&bar0->xmsi_address);
3716 data = readq(&bar0->xmsi_data);
3717 if (addr && data) {
3718 nic->msix_info[i].addr = addr;
3719 nic->msix_info[i].data = data;
3720 }
3721 }
3722}
3723
3724static int s2io_enable_msi_x(struct s2io_nic *nic)
3725{
3726 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3727 u64 rx_mat;
3728 u16 msi_control;
3729 int ret, i, j, msix_indx = 1;
3730 int size;
3731 struct stat_block *stats = nic->mac_control.stats_info;
3732 struct swStat *swstats = &stats->sw_stat;
3733
3734 size = nic->num_entries * sizeof(struct msix_entry);
3735 nic->entries = kzalloc(size, GFP_KERNEL);
3736 if (!nic->entries) {
3737 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3738 __func__);
3739 swstats->mem_alloc_fail_cnt++;
3740 return -ENOMEM;
3741 }
3742 swstats->mem_allocated += size;
3743
3744 size = nic->num_entries * sizeof(struct s2io_msix_entry);
3745 nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3746 if (!nic->s2io_entries) {
3747 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3748 __func__);
3749 swstats->mem_alloc_fail_cnt++;
3750 kfree(nic->entries);
3751 swstats->mem_freed
3752 += (nic->num_entries * sizeof(struct msix_entry));
3753 return -ENOMEM;
3754 }
3755 swstats->mem_allocated += size;
3756
3757 nic->entries[0].entry = 0;
3758 nic->s2io_entries[0].entry = 0;
3759 nic->s2io_entries[0].in_use = MSIX_FLG;
3760 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3761 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3762
3763 for (i = 1; i < nic->num_entries; i++) {
3764 nic->entries[i].entry = ((i - 1) * 8) + 1;
3765 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3766 nic->s2io_entries[i].arg = NULL;
3767 nic->s2io_entries[i].in_use = 0;
3768 }
3769
3770 rx_mat = readq(&bar0->rx_mat);
3771 for (j = 0; j < nic->config.rx_ring_num; j++) {
3772 rx_mat |= RX_MAT_SET(j, msix_indx);
3773 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3774 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3775 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3776 msix_indx += 8;
3777 }
3778 writeq(rx_mat, &bar0->rx_mat);
3779 readq(&bar0->rx_mat);
3780
3781 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3782 nic->num_entries, nic->num_entries);
3783
3784 if (ret < 0) {
3785 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3786 kfree(nic->entries);
3787 swstats->mem_freed += nic->num_entries *
3788 sizeof(struct msix_entry);
3789 kfree(nic->s2io_entries);
3790 swstats->mem_freed += nic->num_entries *
3791 sizeof(struct s2io_msix_entry);
3792 nic->entries = NULL;
3793 nic->s2io_entries = NULL;
3794 return -ENOMEM;
3795 }
3796
3797
3798
3799
3800
3801 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3802 msi_control |= 0x1;
3803 pci_write_config_word(nic->pdev, 0x42, msi_control);
3804
3805 return 0;
3806}
3807
3808
3809static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3810{
3811 struct s2io_nic *sp = dev_id;
3812
3813 sp->msi_detected = 1;
3814 wake_up(&sp->msi_wait);
3815
3816 return IRQ_HANDLED;
3817}
3818
3819
3820static int s2io_test_msi(struct s2io_nic *sp)
3821{
3822 struct pci_dev *pdev = sp->pdev;
3823 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3824 int err;
3825 u64 val64, saved64;
3826
3827 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3828 sp->name, sp);
3829 if (err) {
3830 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3831 sp->dev->name, pci_name(pdev), pdev->irq);
3832 return err;
3833 }
3834
3835 init_waitqueue_head(&sp->msi_wait);
3836 sp->msi_detected = 0;
3837
3838 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3839 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3840 val64 |= SCHED_INT_CTRL_TIMER_EN;
3841 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3842 writeq(val64, &bar0->scheduled_int_ctrl);
3843
3844 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3845
3846 if (!sp->msi_detected) {
3847
3848 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3849 "using MSI(X) during test\n",
3850 sp->dev->name, pci_name(pdev));
3851
3852 err = -EOPNOTSUPP;
3853 }
3854
3855 free_irq(sp->entries[1].vector, sp);
3856
3857 writeq(saved64, &bar0->scheduled_int_ctrl);
3858
3859 return err;
3860}
3861
3862static void remove_msix_isr(struct s2io_nic *sp)
3863{
3864 int i;
3865 u16 msi_control;
3866
3867 for (i = 0; i < sp->num_entries; i++) {
3868 if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3869 int vector = sp->entries[i].vector;
3870 void *arg = sp->s2io_entries[i].arg;
3871 free_irq(vector, arg);
3872 }
3873 }
3874
3875 kfree(sp->entries);
3876 kfree(sp->s2io_entries);
3877 sp->entries = NULL;
3878 sp->s2io_entries = NULL;
3879
3880 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3881 msi_control &= 0xFFFE;
3882 pci_write_config_word(sp->pdev, 0x42, msi_control);
3883
3884 pci_disable_msix(sp->pdev);
3885}
3886
3887static void remove_inta_isr(struct s2io_nic *sp)
3888{
3889 free_irq(sp->pdev->irq, sp->dev);
3890}
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908static int s2io_open(struct net_device *dev)
3909{
3910 struct s2io_nic *sp = netdev_priv(dev);
3911 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3912 int err = 0;
3913
3914
3915
3916
3917
3918 netif_carrier_off(dev);
3919 sp->last_link_state = 0;
3920
3921
3922 err = s2io_card_up(sp);
3923 if (err) {
3924 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3925 dev->name);
3926 goto hw_init_failed;
3927 }
3928
3929 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3930 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3931 s2io_card_down(sp);
3932 err = -ENODEV;
3933 goto hw_init_failed;
3934 }
3935 s2io_start_all_tx_queue(sp);
3936 return 0;
3937
3938hw_init_failed:
3939 if (sp->config.intr_type == MSI_X) {
3940 if (sp->entries) {
3941 kfree(sp->entries);
3942 swstats->mem_freed += sp->num_entries *
3943 sizeof(struct msix_entry);
3944 }
3945 if (sp->s2io_entries) {
3946 kfree(sp->s2io_entries);
3947 swstats->mem_freed += sp->num_entries *
3948 sizeof(struct s2io_msix_entry);
3949 }
3950 }
3951 return err;
3952}
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967static int s2io_close(struct net_device *dev)
3968{
3969 struct s2io_nic *sp = netdev_priv(dev);
3970 struct config_param *config = &sp->config;
3971 u64 tmp64;
3972 int offset;
3973
3974
3975
3976
3977 if (!is_s2io_card_up(sp))
3978 return 0;
3979
3980 s2io_stop_all_tx_queue(sp);
3981
3982 for (offset = 1; offset < config->max_mc_addr; offset++) {
3983 tmp64 = do_s2io_read_unicast_mc(sp, offset);
3984 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3985 do_s2io_delete_unicast_mc(sp, tmp64);
3986 }
3987
3988 s2io_card_down(sp);
3989
3990 return 0;
3991}
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4007{
4008 struct s2io_nic *sp = netdev_priv(dev);
4009 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4010 register u64 val64;
4011 struct TxD *txdp;
4012 struct TxFIFO_element __iomem *tx_fifo;
4013 unsigned long flags = 0;
4014 u16 vlan_tag = 0;
4015 struct fifo_info *fifo = NULL;
4016 int offload_type;
4017 int enable_per_list_interrupt = 0;
4018 struct config_param *config = &sp->config;
4019 struct mac_info *mac_control = &sp->mac_control;
4020 struct stat_block *stats = mac_control->stats_info;
4021 struct swStat *swstats = &stats->sw_stat;
4022
4023 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4024
4025 if (unlikely(skb->len <= 0)) {
4026 DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4027 dev_kfree_skb_any(skb);
4028 return NETDEV_TX_OK;
4029 }
4030
4031 if (!is_s2io_card_up(sp)) {
4032 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4033 dev->name);
4034 dev_kfree_skb_any(skb);
4035 return NETDEV_TX_OK;
4036 }
4037
4038 queue = 0;
4039 if (skb_vlan_tag_present(skb))
4040 vlan_tag = skb_vlan_tag_get(skb);
4041 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4042 if (skb->protocol == htons(ETH_P_IP)) {
4043 struct iphdr *ip;
4044 struct tcphdr *th;
4045 ip = ip_hdr(skb);
4046
4047 if (!ip_is_fragment(ip)) {
4048 th = (struct tcphdr *)(((unsigned char *)ip) +
4049 ip->ihl*4);
4050
4051 if (ip->protocol == IPPROTO_TCP) {
4052 queue_len = sp->total_tcp_fifos;
4053 queue = (ntohs(th->source) +
4054 ntohs(th->dest)) &
4055 sp->fifo_selector[queue_len - 1];
4056 if (queue >= queue_len)
4057 queue = queue_len - 1;
4058 } else if (ip->protocol == IPPROTO_UDP) {
4059 queue_len = sp->total_udp_fifos;
4060 queue = (ntohs(th->source) +
4061 ntohs(th->dest)) &
4062 sp->fifo_selector[queue_len - 1];
4063 if (queue >= queue_len)
4064 queue = queue_len - 1;
4065 queue += sp->udp_fifo_idx;
4066 if (skb->len > 1024)
4067 enable_per_list_interrupt = 1;
4068 }
4069 }
4070 }
4071 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4072
4073 queue = config->fifo_mapping
4074 [skb->priority & (MAX_TX_FIFOS - 1)];
4075 fifo = &mac_control->fifos[queue];
4076
4077 spin_lock_irqsave(&fifo->tx_lock, flags);
4078
4079 if (sp->config.multiq) {
4080 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4081 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4082 return NETDEV_TX_BUSY;
4083 }
4084 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4085 if (netif_queue_stopped(dev)) {
4086 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4087 return NETDEV_TX_BUSY;
4088 }
4089 }
4090
4091 put_off = (u16)fifo->tx_curr_put_info.offset;
4092 get_off = (u16)fifo->tx_curr_get_info.offset;
4093 txdp = fifo->list_info[put_off].list_virt_addr;
4094
4095 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4096
4097 if (txdp->Host_Control ||
4098 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4099 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4100 s2io_stop_tx_queue(sp, fifo->fifo_no);
4101 dev_kfree_skb_any(skb);
4102 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4103 return NETDEV_TX_OK;
4104 }
4105
4106 offload_type = s2io_offload_type(skb);
4107 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4108 txdp->Control_1 |= TXD_TCP_LSO_EN;
4109 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4110 }
4111 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4112 txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4113 TXD_TX_CKO_TCP_EN |
4114 TXD_TX_CKO_UDP_EN);
4115 }
4116 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4117 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4118 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4119 if (enable_per_list_interrupt)
4120 if (put_off & (queue_len >> 5))
4121 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4122 if (vlan_tag) {
4123 txdp->Control_2 |= TXD_VLAN_ENABLE;
4124 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4125 }
4126
4127 frg_len = skb_headlen(skb);
4128 txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4129 frg_len, PCI_DMA_TODEVICE);
4130 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4131 goto pci_map_failed;
4132
4133 txdp->Host_Control = (unsigned long)skb;
4134 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4135
4136 frg_cnt = skb_shinfo(skb)->nr_frags;
4137
4138 for (i = 0; i < frg_cnt; i++) {
4139 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4140
4141 if (!skb_frag_size(frag))
4142 continue;
4143 txdp++;
4144 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4145 frag, 0,
4146 skb_frag_size(frag),
4147 DMA_TO_DEVICE);
4148 txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4149 }
4150 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4151
4152 tx_fifo = mac_control->tx_FIFO_start[queue];
4153 val64 = fifo->list_info[put_off].list_phy_addr;
4154 writeq(val64, &tx_fifo->TxDL_Pointer);
4155
4156 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4157 TX_FIFO_LAST_LIST);
4158 if (offload_type)
4159 val64 |= TX_FIFO_SPECIAL_FUNC;
4160
4161 writeq(val64, &tx_fifo->List_Control);
4162
4163 mmiowb();
4164
4165 put_off++;
4166 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4167 put_off = 0;
4168 fifo->tx_curr_put_info.offset = put_off;
4169
4170
4171 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4172 swstats->fifo_full_cnt++;
4173 DBG_PRINT(TX_DBG,
4174 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4175 put_off, get_off);
4176 s2io_stop_tx_queue(sp, fifo->fifo_no);
4177 }
4178 swstats->mem_allocated += skb->truesize;
4179 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180
4181 if (sp->config.intr_type == MSI_X)
4182 tx_intr_handler(fifo);
4183
4184 return NETDEV_TX_OK;
4185
4186pci_map_failed:
4187 swstats->pci_map_fail_cnt++;
4188 s2io_stop_tx_queue(sp, fifo->fifo_no);
4189 swstats->mem_freed += skb->truesize;
4190 dev_kfree_skb_any(skb);
4191 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4192 return NETDEV_TX_OK;
4193}
4194
4195static void
4196s2io_alarm_handle(unsigned long data)
4197{
4198 struct s2io_nic *sp = (struct s2io_nic *)data;
4199 struct net_device *dev = sp->dev;
4200
4201 s2io_handle_errors(dev);
4202 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4203}
4204
4205static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4206{
4207 struct ring_info *ring = (struct ring_info *)dev_id;
4208 struct s2io_nic *sp = ring->nic;
4209 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4210
4211 if (unlikely(!is_s2io_card_up(sp)))
4212 return IRQ_HANDLED;
4213
4214 if (sp->config.napi) {
4215 u8 __iomem *addr = NULL;
4216 u8 val8 = 0;
4217
4218 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4219 addr += (7 - ring->ring_no);
4220 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4221 writeb(val8, addr);
4222 val8 = readb(addr);
4223 napi_schedule(&ring->napi);
4224 } else {
4225 rx_intr_handler(ring, 0);
4226 s2io_chk_rx_buffers(sp, ring);
4227 }
4228
4229 return IRQ_HANDLED;
4230}
4231
4232static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4233{
4234 int i;
4235 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4236 struct s2io_nic *sp = fifos->nic;
4237 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4238 struct config_param *config = &sp->config;
4239 u64 reason;
4240
4241 if (unlikely(!is_s2io_card_up(sp)))
4242 return IRQ_NONE;
4243
4244 reason = readq(&bar0->general_int_status);
4245 if (unlikely(reason == S2IO_MINUS_ONE))
4246
4247 return IRQ_HANDLED;
4248
4249 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4250 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4251
4252 if (reason & GEN_INTR_TXPIC)
4253 s2io_txpic_intr_handle(sp);
4254
4255 if (reason & GEN_INTR_TXTRAFFIC)
4256 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4257
4258 for (i = 0; i < config->tx_fifo_num; i++)
4259 tx_intr_handler(&fifos[i]);
4260
4261 writeq(sp->general_int_mask, &bar0->general_int_mask);
4262 readl(&bar0->general_int_status);
4263 return IRQ_HANDLED;
4264 }
4265
4266 return IRQ_NONE;
4267}
4268
4269static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4270{
4271 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4272 u64 val64;
4273
4274 val64 = readq(&bar0->pic_int_status);
4275 if (val64 & PIC_INT_GPIO) {
4276 val64 = readq(&bar0->gpio_int_reg);
4277 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4278 (val64 & GPIO_INT_REG_LINK_UP)) {
4279
4280
4281
4282
4283 val64 |= GPIO_INT_REG_LINK_DOWN;
4284 val64 |= GPIO_INT_REG_LINK_UP;
4285 writeq(val64, &bar0->gpio_int_reg);
4286 val64 = readq(&bar0->gpio_int_mask);
4287 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4288 GPIO_INT_MASK_LINK_DOWN);
4289 writeq(val64, &bar0->gpio_int_mask);
4290 } else if (val64 & GPIO_INT_REG_LINK_UP) {
4291 val64 = readq(&bar0->adapter_status);
4292
4293 val64 = readq(&bar0->adapter_control);
4294 val64 |= ADAPTER_CNTL_EN;
4295 writeq(val64, &bar0->adapter_control);
4296 val64 |= ADAPTER_LED_ON;
4297 writeq(val64, &bar0->adapter_control);
4298 if (!sp->device_enabled_once)
4299 sp->device_enabled_once = 1;
4300
4301 s2io_link(sp, LINK_UP);
4302
4303
4304
4305
4306 val64 = readq(&bar0->gpio_int_mask);
4307 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4308 val64 |= GPIO_INT_MASK_LINK_UP;
4309 writeq(val64, &bar0->gpio_int_mask);
4310
4311 } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4312 val64 = readq(&bar0->adapter_status);
4313 s2io_link(sp, LINK_DOWN);
4314
4315 val64 = readq(&bar0->gpio_int_mask);
4316 val64 &= ~GPIO_INT_MASK_LINK_UP;
4317 val64 |= GPIO_INT_MASK_LINK_DOWN;
4318 writeq(val64, &bar0->gpio_int_mask);
4319
4320
4321 val64 = readq(&bar0->adapter_control);
4322 val64 = val64 & (~ADAPTER_LED_ON);
4323 writeq(val64, &bar0->adapter_control);
4324 }
4325 }
4326 val64 = readq(&bar0->gpio_int_mask);
4327}
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4340 unsigned long long *cnt)
4341{
4342 u64 val64;
4343 val64 = readq(addr);
4344 if (val64 & value) {
4345 writeq(val64, addr);
4346 (*cnt)++;
4347 return 1;
4348 }
4349 return 0;
4350
4351}
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361static void s2io_handle_errors(void *dev_id)
4362{
4363 struct net_device *dev = (struct net_device *)dev_id;
4364 struct s2io_nic *sp = netdev_priv(dev);
4365 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4366 u64 temp64 = 0, val64 = 0;
4367 int i = 0;
4368
4369 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4370 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4371
4372 if (!is_s2io_card_up(sp))
4373 return;
4374
4375 if (pci_channel_offline(sp->pdev))
4376 return;
4377
4378 memset(&sw_stat->ring_full_cnt, 0,
4379 sizeof(sw_stat->ring_full_cnt));
4380
4381
4382 if (stats->xpak_timer_count < 72000) {
4383
4384 stats->xpak_timer_count++;
4385 } else {
4386 s2io_updt_xpak_counter(dev);
4387
4388 stats->xpak_timer_count = 0;
4389 }
4390
4391
4392 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4393 val64 = readq(&bar0->mac_rmac_err_reg);
4394 writeq(val64, &bar0->mac_rmac_err_reg);
4395 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4396 schedule_work(&sp->set_link_task);
4397 }
4398
4399
4400 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4401 &sw_stat->serious_err_cnt))
4402 goto reset;
4403
4404
4405 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4406 &sw_stat->parity_err_cnt))
4407 goto reset;
4408
4409
4410 if (sp->device_type == XFRAME_II_DEVICE) {
4411 val64 = readq(&bar0->ring_bump_counter1);
4412 for (i = 0; i < 4; i++) {
4413 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4414 temp64 >>= 64 - ((i+1)*16);
4415 sw_stat->ring_full_cnt[i] += temp64;
4416 }
4417
4418 val64 = readq(&bar0->ring_bump_counter2);
4419 for (i = 0; i < 4; i++) {
4420 temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4421 temp64 >>= 64 - ((i+1)*16);
4422 sw_stat->ring_full_cnt[i+4] += temp64;
4423 }
4424 }
4425
4426 val64 = readq(&bar0->txdma_int_status);
4427
4428 if (val64 & TXDMA_PFC_INT) {
4429 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4430 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4431 PFC_PCIX_ERR,
4432 &bar0->pfc_err_reg,
4433 &sw_stat->pfc_err_cnt))
4434 goto reset;
4435 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4436 &bar0->pfc_err_reg,
4437 &sw_stat->pfc_err_cnt);
4438 }
4439
4440
4441 if (val64 & TXDMA_TDA_INT) {
4442 if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4443 TDA_SM0_ERR_ALARM |
4444 TDA_SM1_ERR_ALARM,
4445 &bar0->tda_err_reg,
4446 &sw_stat->tda_err_cnt))
4447 goto reset;
4448 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4449 &bar0->tda_err_reg,
4450 &sw_stat->tda_err_cnt);
4451 }
4452
4453 if (val64 & TXDMA_PCC_INT) {
4454 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4455 PCC_N_SERR | PCC_6_COF_OV_ERR |
4456 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4457 PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4458 PCC_TXB_ECC_DB_ERR,
4459 &bar0->pcc_err_reg,
4460 &sw_stat->pcc_err_cnt))
4461 goto reset;
4462 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4463 &bar0->pcc_err_reg,
4464 &sw_stat->pcc_err_cnt);
4465 }
4466
4467
4468 if (val64 & TXDMA_TTI_INT) {
4469 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4470 &bar0->tti_err_reg,
4471 &sw_stat->tti_err_cnt))
4472 goto reset;
4473 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4474 &bar0->tti_err_reg,
4475 &sw_stat->tti_err_cnt);
4476 }
4477
4478
4479 if (val64 & TXDMA_LSO_INT) {
4480 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4481 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4482 &bar0->lso_err_reg,
4483 &sw_stat->lso_err_cnt))
4484 goto reset;
4485 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4486 &bar0->lso_err_reg,
4487 &sw_stat->lso_err_cnt);
4488 }
4489
4490
4491 if (val64 & TXDMA_TPA_INT) {
4492 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4493 &bar0->tpa_err_reg,
4494 &sw_stat->tpa_err_cnt))
4495 goto reset;
4496 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4497 &bar0->tpa_err_reg,
4498 &sw_stat->tpa_err_cnt);
4499 }
4500
4501
4502 if (val64 & TXDMA_SM_INT) {
4503 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4504 &bar0->sm_err_reg,
4505 &sw_stat->sm_err_cnt))
4506 goto reset;
4507 }
4508
4509 val64 = readq(&bar0->mac_int_status);
4510 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4511 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4512 &bar0->mac_tmac_err_reg,
4513 &sw_stat->mac_tmac_err_cnt))
4514 goto reset;
4515 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4516 TMAC_DESC_ECC_SG_ERR |
4517 TMAC_DESC_ECC_DB_ERR,
4518 &bar0->mac_tmac_err_reg,
4519 &sw_stat->mac_tmac_err_cnt);
4520 }
4521
4522 val64 = readq(&bar0->xgxs_int_status);
4523 if (val64 & XGXS_INT_STATUS_TXGXS) {
4524 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4525 &bar0->xgxs_txgxs_err_reg,
4526 &sw_stat->xgxs_txgxs_err_cnt))
4527 goto reset;
4528 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4529 &bar0->xgxs_txgxs_err_reg,
4530 &sw_stat->xgxs_txgxs_err_cnt);
4531 }
4532
4533 val64 = readq(&bar0->rxdma_int_status);
4534 if (val64 & RXDMA_INT_RC_INT_M) {
4535 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4536 RC_FTC_ECC_DB_ERR |
4537 RC_PRCn_SM_ERR_ALARM |
4538 RC_FTC_SM_ERR_ALARM,
4539 &bar0->rc_err_reg,
4540 &sw_stat->rc_err_cnt))
4541 goto reset;
4542 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4543 RC_FTC_ECC_SG_ERR |
4544 RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4545 &sw_stat->rc_err_cnt);
4546 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4547 PRC_PCI_AB_WR_Rn |
4548 PRC_PCI_AB_F_WR_Rn,
4549 &bar0->prc_pcix_err_reg,
4550 &sw_stat->prc_pcix_err_cnt))
4551 goto reset;
4552 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4553 PRC_PCI_DP_WR_Rn |
4554 PRC_PCI_DP_F_WR_Rn,
4555 &bar0->prc_pcix_err_reg,
4556 &sw_stat->prc_pcix_err_cnt);
4557 }
4558
4559 if (val64 & RXDMA_INT_RPA_INT_M) {
4560 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4561 &bar0->rpa_err_reg,
4562 &sw_stat->rpa_err_cnt))
4563 goto reset;
4564 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4565 &bar0->rpa_err_reg,
4566 &sw_stat->rpa_err_cnt);
4567 }
4568
4569 if (val64 & RXDMA_INT_RDA_INT_M) {
4570 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4571 RDA_FRM_ECC_DB_N_AERR |
4572 RDA_SM1_ERR_ALARM |
4573 RDA_SM0_ERR_ALARM |
4574 RDA_RXD_ECC_DB_SERR,
4575 &bar0->rda_err_reg,
4576 &sw_stat->rda_err_cnt))
4577 goto reset;
4578 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4579 RDA_FRM_ECC_SG_ERR |
4580 RDA_MISC_ERR |
4581 RDA_PCIX_ERR,
4582 &bar0->rda_err_reg,
4583 &sw_stat->rda_err_cnt);
4584 }
4585
4586 if (val64 & RXDMA_INT_RTI_INT_M) {
4587 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4588 &bar0->rti_err_reg,
4589 &sw_stat->rti_err_cnt))
4590 goto reset;
4591 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4592 &bar0->rti_err_reg,
4593 &sw_stat->rti_err_cnt);
4594 }
4595
4596 val64 = readq(&bar0->mac_int_status);
4597 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4598 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4599 &bar0->mac_rmac_err_reg,
4600 &sw_stat->mac_rmac_err_cnt))
4601 goto reset;
4602 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4603 RMAC_SINGLE_ECC_ERR |
4604 RMAC_DOUBLE_ECC_ERR,
4605 &bar0->mac_rmac_err_reg,
4606 &sw_stat->mac_rmac_err_cnt);
4607 }
4608
4609 val64 = readq(&bar0->xgxs_int_status);
4610 if (val64 & XGXS_INT_STATUS_RXGXS) {
4611 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4612 &bar0->xgxs_rxgxs_err_reg,
4613 &sw_stat->xgxs_rxgxs_err_cnt))
4614 goto reset;
4615 }
4616
4617 val64 = readq(&bar0->mc_int_status);
4618 if (val64 & MC_INT_STATUS_MC_INT) {
4619 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4620 &bar0->mc_err_reg,
4621 &sw_stat->mc_err_cnt))
4622 goto reset;
4623
4624
4625 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4626 writeq(val64, &bar0->mc_err_reg);
4627 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4628 sw_stat->double_ecc_errs++;
4629 if (sp->device_type != XFRAME_II_DEVICE) {
4630
4631
4632
4633 if (val64 &
4634 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4635 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4636 goto reset;
4637 }
4638 } else
4639 sw_stat->single_ecc_errs++;
4640 }
4641 }
4642 return;
4643
4644reset:
4645 s2io_stop_all_tx_queue(sp);
4646 schedule_work(&sp->rst_timer_task);
4647 sw_stat->soft_reset_cnt++;
4648}
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663static irqreturn_t s2io_isr(int irq, void *dev_id)
4664{
4665 struct net_device *dev = (struct net_device *)dev_id;
4666 struct s2io_nic *sp = netdev_priv(dev);
4667 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4668 int i;
4669 u64 reason = 0;
4670 struct mac_info *mac_control;
4671 struct config_param *config;
4672
4673
4674 if (pci_channel_offline(sp->pdev))
4675 return IRQ_NONE;
4676
4677 if (!is_s2io_card_up(sp))
4678 return IRQ_NONE;
4679
4680 config = &sp->config;
4681 mac_control = &sp->mac_control;
4682
4683
4684
4685
4686
4687
4688
4689
4690 reason = readq(&bar0->general_int_status);
4691
4692 if (unlikely(reason == S2IO_MINUS_ONE))
4693 return IRQ_HANDLED;
4694
4695 if (reason &
4696 (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4697 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4698
4699 if (config->napi) {
4700 if (reason & GEN_INTR_RXTRAFFIC) {
4701 napi_schedule(&sp->napi);
4702 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4703 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4704 readl(&bar0->rx_traffic_int);
4705 }
4706 } else {
4707
4708
4709
4710
4711
4712 if (reason & GEN_INTR_RXTRAFFIC)
4713 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4714
4715 for (i = 0; i < config->rx_ring_num; i++) {
4716 struct ring_info *ring = &mac_control->rings[i];
4717
4718 rx_intr_handler(ring, 0);
4719 }
4720 }
4721
4722
4723
4724
4725
4726
4727 if (reason & GEN_INTR_TXTRAFFIC)
4728 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4729
4730 for (i = 0; i < config->tx_fifo_num; i++)
4731 tx_intr_handler(&mac_control->fifos[i]);
4732
4733 if (reason & GEN_INTR_TXPIC)
4734 s2io_txpic_intr_handle(sp);
4735
4736
4737
4738
4739 if (!config->napi) {
4740 for (i = 0; i < config->rx_ring_num; i++) {
4741 struct ring_info *ring = &mac_control->rings[i];
4742
4743 s2io_chk_rx_buffers(sp, ring);
4744 }
4745 }
4746 writeq(sp->general_int_mask, &bar0->general_int_mask);
4747 readl(&bar0->general_int_status);
4748
4749 return IRQ_HANDLED;
4750
4751 } else if (!reason) {
4752
4753 return IRQ_NONE;
4754 }
4755
4756 return IRQ_HANDLED;
4757}
4758
4759
4760
4761
4762static void s2io_updt_stats(struct s2io_nic *sp)
4763{
4764 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4765 u64 val64;
4766 int cnt = 0;
4767
4768 if (is_s2io_card_up(sp)) {
4769
4770 val64 = SET_UPDT_CLICKS(10) |
4771 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4772 writeq(val64, &bar0->stat_cfg);
4773 do {
4774 udelay(100);
4775 val64 = readq(&bar0->stat_cfg);
4776 if (!(val64 & s2BIT(0)))
4777 break;
4778 cnt++;
4779 if (cnt == 5)
4780 break;
4781 } while (1);
4782 }
4783}
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4795{
4796 struct s2io_nic *sp = netdev_priv(dev);
4797 struct mac_info *mac_control = &sp->mac_control;
4798 struct stat_block *stats = mac_control->stats_info;
4799 u64 delta;
4800
4801
4802 s2io_updt_stats(sp);
4803
4804
4805
4806
4807
4808
4809
4810
4811 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4812 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4813 sp->stats.rx_packets += delta;
4814 dev->stats.rx_packets += delta;
4815
4816 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4817 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4818 sp->stats.tx_packets += delta;
4819 dev->stats.tx_packets += delta;
4820
4821 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4822 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4823 sp->stats.rx_bytes += delta;
4824 dev->stats.rx_bytes += delta;
4825
4826 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4827 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4828 sp->stats.tx_bytes += delta;
4829 dev->stats.tx_bytes += delta;
4830
4831 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4832 sp->stats.rx_errors += delta;
4833 dev->stats.rx_errors += delta;
4834
4835 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4836 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4837 sp->stats.tx_errors += delta;
4838 dev->stats.tx_errors += delta;
4839
4840 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4841 sp->stats.rx_dropped += delta;
4842 dev->stats.rx_dropped += delta;
4843
4844 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4845 sp->stats.tx_dropped += delta;
4846 dev->stats.tx_dropped += delta;
4847
4848
4849
4850
4851
4852
4853 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4854 le32_to_cpu(stats->rmac_vld_mcst_frms);
4855 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4856 delta -= sp->stats.multicast;
4857 sp->stats.multicast += delta;
4858 dev->stats.multicast += delta;
4859
4860 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4861 le32_to_cpu(stats->rmac_usized_frms)) +
4862 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4863 sp->stats.rx_length_errors += delta;
4864 dev->stats.rx_length_errors += delta;
4865
4866 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4867 sp->stats.rx_crc_errors += delta;
4868 dev->stats.rx_crc_errors += delta;
4869
4870 return &dev->stats;
4871}
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886static void s2io_set_multicast(struct net_device *dev)
4887{
4888 int i, j, prev_cnt;
4889 struct netdev_hw_addr *ha;
4890 struct s2io_nic *sp = netdev_priv(dev);
4891 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4892 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4893 0xfeffffffffffULL;
4894 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4895 void __iomem *add;
4896 struct config_param *config = &sp->config;
4897
4898 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4899
4900 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4901 &bar0->rmac_addr_data0_mem);
4902 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4903 &bar0->rmac_addr_data1_mem);
4904 val64 = RMAC_ADDR_CMD_MEM_WE |
4905 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4907 writeq(val64, &bar0->rmac_addr_cmd_mem);
4908
4909 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911 S2IO_BIT_RESET);
4912
4913 sp->m_cast_flg = 1;
4914 sp->all_multi_pos = config->max_mc_addr - 1;
4915 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4916
4917 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4918 &bar0->rmac_addr_data0_mem);
4919 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4920 &bar0->rmac_addr_data1_mem);
4921 val64 = RMAC_ADDR_CMD_MEM_WE |
4922 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4923 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4924 writeq(val64, &bar0->rmac_addr_cmd_mem);
4925
4926 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4927 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4928 S2IO_BIT_RESET);
4929
4930 sp->m_cast_flg = 0;
4931 sp->all_multi_pos = 0;
4932 }
4933
4934 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4935
4936 add = &bar0->mac_cfg;
4937 val64 = readq(&bar0->mac_cfg);
4938 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4939
4940 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4941 writel((u32)val64, add);
4942 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4943 writel((u32) (val64 >> 32), (add + 4));
4944
4945 if (vlan_tag_strip != 1) {
4946 val64 = readq(&bar0->rx_pa_cfg);
4947 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4948 writeq(val64, &bar0->rx_pa_cfg);
4949 sp->vlan_strip_flag = 0;
4950 }
4951
4952 val64 = readq(&bar0->mac_cfg);
4953 sp->promisc_flg = 1;
4954 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4955 dev->name);
4956 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4957
4958 add = &bar0->mac_cfg;
4959 val64 = readq(&bar0->mac_cfg);
4960 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4961
4962 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4963 writel((u32)val64, add);
4964 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4965 writel((u32) (val64 >> 32), (add + 4));
4966
4967 if (vlan_tag_strip != 0) {
4968 val64 = readq(&bar0->rx_pa_cfg);
4969 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4970 writeq(val64, &bar0->rx_pa_cfg);
4971 sp->vlan_strip_flag = 1;
4972 }
4973
4974 val64 = readq(&bar0->mac_cfg);
4975 sp->promisc_flg = 0;
4976 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4977 }
4978
4979
4980 if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4981 if (netdev_mc_count(dev) >
4982 (config->max_mc_addr - config->max_mac_addr)) {
4983 DBG_PRINT(ERR_DBG,
4984 "%s: No more Rx filters can be added - "
4985 "please enable ALL_MULTI instead\n",
4986 dev->name);
4987 return;
4988 }
4989
4990 prev_cnt = sp->mc_addr_count;
4991 sp->mc_addr_count = netdev_mc_count(dev);
4992
4993
4994 for (i = 0; i < prev_cnt; i++) {
4995 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4996 &bar0->rmac_addr_data0_mem);
4997 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4998 &bar0->rmac_addr_data1_mem);
4999 val64 = RMAC_ADDR_CMD_MEM_WE |
5000 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5001 RMAC_ADDR_CMD_MEM_OFFSET
5002 (config->mc_start_offset + i);
5003 writeq(val64, &bar0->rmac_addr_cmd_mem);
5004
5005
5006 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5007 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5008 S2IO_BIT_RESET)) {
5009 DBG_PRINT(ERR_DBG,
5010 "%s: Adding Multicasts failed\n",
5011 dev->name);
5012 return;
5013 }
5014 }
5015
5016
5017 i = 0;
5018 netdev_for_each_mc_addr(ha, dev) {
5019 mac_addr = 0;
5020 for (j = 0; j < ETH_ALEN; j++) {
5021 mac_addr |= ha->addr[j];
5022 mac_addr <<= 8;
5023 }
5024 mac_addr >>= 8;
5025 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5026 &bar0->rmac_addr_data0_mem);
5027 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5028 &bar0->rmac_addr_data1_mem);
5029 val64 = RMAC_ADDR_CMD_MEM_WE |
5030 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5031 RMAC_ADDR_CMD_MEM_OFFSET
5032 (i + config->mc_start_offset);
5033 writeq(val64, &bar0->rmac_addr_cmd_mem);
5034
5035
5036 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5037 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5038 S2IO_BIT_RESET)) {
5039 DBG_PRINT(ERR_DBG,
5040 "%s: Adding Multicasts failed\n",
5041 dev->name);
5042 return;
5043 }
5044 i++;
5045 }
5046 }
5047}
5048
5049
5050
5051
5052static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5053{
5054 int offset;
5055 u64 mac_addr = 0x0;
5056 struct config_param *config = &sp->config;
5057
5058
5059 for (offset = 0; offset < config->max_mc_addr; offset++) {
5060 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5061
5062 if (mac_addr == FAILURE)
5063 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5064 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5065 }
5066}
5067
5068
5069static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5070{
5071 int offset;
5072 struct config_param *config = &sp->config;
5073
5074 for (offset = 0; offset < config->max_mac_addr; offset++)
5075 do_s2io_prog_unicast(sp->dev,
5076 sp->def_mac_addr[offset].mac_addr);
5077
5078
5079 for (offset = config->mc_start_offset;
5080 offset < config->max_mc_addr; offset++)
5081 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5082}
5083
5084
5085static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5086{
5087 int i;
5088 u64 mac_addr = 0;
5089 struct config_param *config = &sp->config;
5090
5091 for (i = 0; i < ETH_ALEN; i++) {
5092 mac_addr <<= 8;
5093 mac_addr |= addr[i];
5094 }
5095 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5096 return SUCCESS;
5097
5098
5099 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5100 u64 tmp64;
5101 tmp64 = do_s2io_read_unicast_mc(sp, i);
5102 if (tmp64 == S2IO_DISABLE_MAC_ENTRY)
5103 break;
5104
5105 if (tmp64 == mac_addr)
5106 return SUCCESS;
5107 }
5108 if (i == config->max_mc_addr) {
5109 DBG_PRINT(ERR_DBG,
5110 "CAM full no space left for multicast MAC\n");
5111 return FAILURE;
5112 }
5113
5114 do_s2io_copy_mac_addr(sp, i, mac_addr);
5115
5116 return do_s2io_add_mac(sp, mac_addr, i);
5117}
5118
5119
5120static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5121{
5122 u64 val64;
5123 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5124
5125 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5126 &bar0->rmac_addr_data0_mem);
5127
5128 val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5129 RMAC_ADDR_CMD_MEM_OFFSET(off);
5130 writeq(val64, &bar0->rmac_addr_cmd_mem);
5131
5132
5133 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5134 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5135 S2IO_BIT_RESET)) {
5136 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5137 return FAILURE;
5138 }
5139 return SUCCESS;
5140}
5141
5142static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5143{
5144 int offset;
5145 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5146 struct config_param *config = &sp->config;
5147
5148 for (offset = 1;
5149 offset < config->max_mc_addr; offset++) {
5150 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5151 if (tmp64 == addr) {
5152
5153 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5154 return FAILURE;
5155
5156 do_s2io_store_unicast_mc(sp);
5157 return SUCCESS;
5158 }
5159 }
5160 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5161 (unsigned long long)addr);
5162 return FAILURE;
5163}
5164
5165
5166static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5167{
5168 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5169 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5170
5171
5172 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5173 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5174 writeq(val64, &bar0->rmac_addr_cmd_mem);
5175
5176
5177 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5178 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5179 S2IO_BIT_RESET)) {
5180 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5181 return FAILURE;
5182 }
5183 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5184
5185 return tmp64 >> 16;
5186}
5187
5188
5189
5190
5191
5192static int s2io_set_mac_addr(struct net_device *dev, void *p)
5193{
5194 struct sockaddr *addr = p;
5195
5196 if (!is_valid_ether_addr(addr->sa_data))
5197 return -EADDRNOTAVAIL;
5198
5199 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5200
5201
5202 return do_s2io_prog_unicast(dev, dev->dev_addr);
5203}
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5215{
5216 struct s2io_nic *sp = netdev_priv(dev);
5217 register u64 mac_addr = 0, perm_addr = 0;
5218 int i;
5219 u64 tmp64;
5220 struct config_param *config = &sp->config;
5221
5222
5223
5224
5225
5226
5227 for (i = 0; i < ETH_ALEN; i++) {
5228 mac_addr <<= 8;
5229 mac_addr |= addr[i];
5230 perm_addr <<= 8;
5231 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5232 }
5233
5234
5235 if (mac_addr == perm_addr)
5236 return SUCCESS;
5237
5238
5239 for (i = 1; i < config->max_mac_addr; i++) {
5240 tmp64 = do_s2io_read_unicast_mc(sp, i);
5241 if (tmp64 == S2IO_DISABLE_MAC_ENTRY)
5242 break;
5243
5244 if (tmp64 == mac_addr) {
5245 DBG_PRINT(INFO_DBG,
5246 "MAC addr:0x%llx already present in CAM\n",
5247 (unsigned long long)mac_addr);
5248 return SUCCESS;
5249 }
5250 }
5251 if (i == config->max_mac_addr) {
5252 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5253 return FAILURE;
5254 }
5255
5256 do_s2io_copy_mac_addr(sp, i, mac_addr);
5257
5258 return do_s2io_add_mac(sp, mac_addr, i);
5259}
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274static int
5275s2io_ethtool_set_link_ksettings(struct net_device *dev,
5276 const struct ethtool_link_ksettings *cmd)
5277{
5278 struct s2io_nic *sp = netdev_priv(dev);
5279 if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5280 (cmd->base.speed != SPEED_10000) ||
5281 (cmd->base.duplex != DUPLEX_FULL))
5282 return -EINVAL;
5283 else {
5284 s2io_close(sp->dev);
5285 s2io_open(sp->dev);
5286 }
5287
5288 return 0;
5289}
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303static int
5304s2io_ethtool_get_link_ksettings(struct net_device *dev,
5305 struct ethtool_link_ksettings *cmd)
5306{
5307 struct s2io_nic *sp = netdev_priv(dev);
5308
5309 ethtool_link_ksettings_zero_link_mode(cmd, supported);
5310 ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5311 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5312
5313 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5314 ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5315 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5316
5317 cmd->base.port = PORT_FIBRE;
5318
5319 if (netif_carrier_ok(sp->dev)) {
5320 cmd->base.speed = SPEED_10000;
5321 cmd->base.duplex = DUPLEX_FULL;
5322 } else {
5323 cmd->base.speed = SPEED_UNKNOWN;
5324 cmd->base.duplex = DUPLEX_UNKNOWN;
5325 }
5326
5327 cmd->base.autoneg = AUTONEG_DISABLE;
5328 return 0;
5329}
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5344 struct ethtool_drvinfo *info)
5345{
5346 struct s2io_nic *sp = netdev_priv(dev);
5347
5348 strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5349 strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5350 strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5351}
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367static void s2io_ethtool_gregs(struct net_device *dev,
5368 struct ethtool_regs *regs, void *space)
5369{
5370 int i;
5371 u64 reg;
5372 u8 *reg_space = (u8 *)space;
5373 struct s2io_nic *sp = netdev_priv(dev);
5374
5375 regs->len = XENA_REG_SPACE;
5376 regs->version = sp->pdev->subsystem_device;
5377
5378 for (i = 0; i < regs->len; i += 8) {
5379 reg = readq(sp->bar0 + i);
5380 memcpy((reg_space + i), ®, 8);
5381 }
5382}
5383
5384
5385
5386
5387static void s2io_set_led(struct s2io_nic *sp, bool on)
5388{
5389 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5390 u16 subid = sp->pdev->subsystem_device;
5391 u64 val64;
5392
5393 if ((sp->device_type == XFRAME_II_DEVICE) ||
5394 ((subid & 0xFF) >= 0x07)) {
5395 val64 = readq(&bar0->gpio_control);
5396 if (on)
5397 val64 |= GPIO_CTRL_GPIO_0;
5398 else
5399 val64 &= ~GPIO_CTRL_GPIO_0;
5400
5401 writeq(val64, &bar0->gpio_control);
5402 } else {
5403 val64 = readq(&bar0->adapter_control);
5404 if (on)
5405 val64 |= ADAPTER_LED_ON;
5406 else
5407 val64 &= ~ADAPTER_LED_ON;
5408
5409 writeq(val64, &bar0->adapter_control);
5410 }
5411
5412}
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426static int s2io_ethtool_set_led(struct net_device *dev,
5427 enum ethtool_phys_id_state state)
5428{
5429 struct s2io_nic *sp = netdev_priv(dev);
5430 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5431 u16 subid = sp->pdev->subsystem_device;
5432
5433 if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5434 u64 val64 = readq(&bar0->adapter_control);
5435 if (!(val64 & ADAPTER_CNTL_EN)) {
5436 pr_err("Adapter Link down, cannot blink LED\n");
5437 return -EAGAIN;
5438 }
5439 }
5440
5441 switch (state) {
5442 case ETHTOOL_ID_ACTIVE:
5443 sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5444 return 1;
5445
5446 case ETHTOOL_ID_ON:
5447 s2io_set_led(sp, true);
5448 break;
5449
5450 case ETHTOOL_ID_OFF:
5451 s2io_set_led(sp, false);
5452 break;
5453
5454 case ETHTOOL_ID_INACTIVE:
5455 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5456 writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5457 }
5458
5459 return 0;
5460}
5461
5462static void s2io_ethtool_gringparam(struct net_device *dev,
5463 struct ethtool_ringparam *ering)
5464{
5465 struct s2io_nic *sp = netdev_priv(dev);
5466 int i, tx_desc_count = 0, rx_desc_count = 0;
5467
5468 if (sp->rxd_mode == RXD_MODE_1) {
5469 ering->rx_max_pending = MAX_RX_DESC_1;
5470 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5471 } else {
5472 ering->rx_max_pending = MAX_RX_DESC_2;
5473 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5474 }
5475
5476 ering->tx_max_pending = MAX_TX_DESC;
5477
5478 for (i = 0; i < sp->config.rx_ring_num; i++)
5479 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5480 ering->rx_pending = rx_desc_count;
5481 ering->rx_jumbo_pending = rx_desc_count;
5482
5483 for (i = 0; i < sp->config.tx_fifo_num; i++)
5484 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5485 ering->tx_pending = tx_desc_count;
5486 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5487}
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499static void s2io_ethtool_getpause_data(struct net_device *dev,
5500 struct ethtool_pauseparam *ep)
5501{
5502 u64 val64;
5503 struct s2io_nic *sp = netdev_priv(dev);
5504 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5505
5506 val64 = readq(&bar0->rmac_pause_cfg);
5507 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5508 ep->tx_pause = true;
5509 if (val64 & RMAC_PAUSE_RX_ENABLE)
5510 ep->rx_pause = true;
5511 ep->autoneg = false;
5512}
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526static int s2io_ethtool_setpause_data(struct net_device *dev,
5527 struct ethtool_pauseparam *ep)
5528{
5529 u64 val64;
5530 struct s2io_nic *sp = netdev_priv(dev);
5531 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5532
5533 val64 = readq(&bar0->rmac_pause_cfg);
5534 if (ep->tx_pause)
5535 val64 |= RMAC_PAUSE_GEN_ENABLE;
5536 else
5537 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5538 if (ep->rx_pause)
5539 val64 |= RMAC_PAUSE_RX_ENABLE;
5540 else
5541 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5542 writeq(val64, &bar0->rmac_pause_cfg);
5543 return 0;
5544}
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562#define S2IO_DEV_ID 5
5563static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5564{
5565 int ret = -1;
5566 u32 exit_cnt = 0;
5567 u64 val64;
5568 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5569
5570 if (sp->device_type == XFRAME_I_DEVICE) {
5571 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5572 I2C_CONTROL_ADDR(off) |
5573 I2C_CONTROL_BYTE_CNT(0x3) |
5574 I2C_CONTROL_READ |
5575 I2C_CONTROL_CNTL_START;
5576 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5577
5578 while (exit_cnt < 5) {
5579 val64 = readq(&bar0->i2c_control);
5580 if (I2C_CONTROL_CNTL_END(val64)) {
5581 *data = I2C_CONTROL_GET_DATA(val64);
5582 ret = 0;
5583 break;
5584 }
5585 msleep(50);
5586 exit_cnt++;
5587 }
5588 }
5589
5590 if (sp->device_type == XFRAME_II_DEVICE) {
5591 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5592 SPI_CONTROL_BYTECNT(0x3) |
5593 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5594 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5595 val64 |= SPI_CONTROL_REQ;
5596 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5597 while (exit_cnt < 5) {
5598 val64 = readq(&bar0->spi_control);
5599 if (val64 & SPI_CONTROL_NACK) {
5600 ret = 1;
5601 break;
5602 } else if (val64 & SPI_CONTROL_DONE) {
5603 *data = readq(&bar0->spi_data);
5604 *data &= 0xffffff;
5605 ret = 0;
5606 break;
5607 }
5608 msleep(50);
5609 exit_cnt++;
5610 }
5611 }
5612 return ret;
5613}
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5631{
5632 int exit_cnt = 0, ret = -1;
5633 u64 val64;
5634 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5635
5636 if (sp->device_type == XFRAME_I_DEVICE) {
5637 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5638 I2C_CONTROL_ADDR(off) |
5639 I2C_CONTROL_BYTE_CNT(cnt) |
5640 I2C_CONTROL_SET_DATA((u32)data) |
5641 I2C_CONTROL_CNTL_START;
5642 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5643
5644 while (exit_cnt < 5) {
5645 val64 = readq(&bar0->i2c_control);
5646 if (I2C_CONTROL_CNTL_END(val64)) {
5647 if (!(val64 & I2C_CONTROL_NACK))
5648 ret = 0;
5649 break;
5650 }
5651 msleep(50);
5652 exit_cnt++;
5653 }
5654 }
5655
5656 if (sp->device_type == XFRAME_II_DEVICE) {
5657 int write_cnt = (cnt == 8) ? 0 : cnt;
5658 writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5659
5660 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5661 SPI_CONTROL_BYTECNT(write_cnt) |
5662 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5663 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5664 val64 |= SPI_CONTROL_REQ;
5665 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5666 while (exit_cnt < 5) {
5667 val64 = readq(&bar0->spi_control);
5668 if (val64 & SPI_CONTROL_NACK) {
5669 ret = 1;
5670 break;
5671 } else if (val64 & SPI_CONTROL_DONE) {
5672 ret = 0;
5673 break;
5674 }
5675 msleep(50);
5676 exit_cnt++;
5677 }
5678 }
5679 return ret;
5680}
5681static void s2io_vpd_read(struct s2io_nic *nic)
5682{
5683 u8 *vpd_data;
5684 u8 data;
5685 int i = 0, cnt, len, fail = 0;
5686 int vpd_addr = 0x80;
5687 struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5688
5689 if (nic->device_type == XFRAME_II_DEVICE) {
5690 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5691 vpd_addr = 0x80;
5692 } else {
5693 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5694 vpd_addr = 0x50;
5695 }
5696 strcpy(nic->serial_num, "NOT AVAILABLE");
5697
5698 vpd_data = kmalloc(256, GFP_KERNEL);
5699 if (!vpd_data) {
5700 swstats->mem_alloc_fail_cnt++;
5701 return;
5702 }
5703 swstats->mem_allocated += 256;
5704
5705 for (i = 0; i < 256; i += 4) {
5706 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5707 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5708 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5709 for (cnt = 0; cnt < 5; cnt++) {
5710 msleep(2);
5711 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5712 if (data == 0x80)
5713 break;
5714 }
5715 if (cnt >= 5) {
5716 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5717 fail = 1;
5718 break;
5719 }
5720 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5721 (u32 *)&vpd_data[i]);
5722 }
5723
5724 if (!fail) {
5725
5726 for (cnt = 0; cnt < 252; cnt++) {
5727 if ((vpd_data[cnt] == 'S') &&
5728 (vpd_data[cnt+1] == 'N')) {
5729 len = vpd_data[cnt+2];
5730 if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5731 memcpy(nic->serial_num,
5732 &vpd_data[cnt + 3],
5733 len);
5734 memset(nic->serial_num+len,
5735 0,
5736 VPD_STRING_LEN-len);
5737 break;
5738 }
5739 }
5740 }
5741 }
5742
5743 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5744 len = vpd_data[1];
5745 memcpy(nic->product_name, &vpd_data[3], len);
5746 nic->product_name[len] = 0;
5747 }
5748 kfree(vpd_data);
5749 swstats->mem_freed += 256;
5750}
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766static int s2io_ethtool_geeprom(struct net_device *dev,
5767 struct ethtool_eeprom *eeprom, u8 * data_buf)
5768{
5769 u32 i, valid;
5770 u64 data;
5771 struct s2io_nic *sp = netdev_priv(dev);
5772
5773 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5774
5775 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5776 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5777
5778 for (i = 0; i < eeprom->len; i += 4) {
5779 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5780 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5781 return -EFAULT;
5782 }
5783 valid = INV(data);
5784 memcpy((data_buf + i), &valid, 4);
5785 }
5786 return 0;
5787}
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803static int s2io_ethtool_seeprom(struct net_device *dev,
5804 struct ethtool_eeprom *eeprom,
5805 u8 *data_buf)
5806{
5807 int len = eeprom->len, cnt = 0;
5808 u64 valid = 0, data;
5809 struct s2io_nic *sp = netdev_priv(dev);
5810
5811 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5812 DBG_PRINT(ERR_DBG,
5813 "ETHTOOL_WRITE_EEPROM Err: "
5814 "Magic value is wrong, it is 0x%x should be 0x%x\n",
5815 (sp->pdev->vendor | (sp->pdev->device << 16)),
5816 eeprom->magic);
5817 return -EFAULT;
5818 }
5819
5820 while (len) {
5821 data = (u32)data_buf[cnt] & 0x000000FF;
5822 if (data)
5823 valid = (u32)(data << 24);
5824 else
5825 valid = data;
5826
5827 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5828 DBG_PRINT(ERR_DBG,
5829 "ETHTOOL_WRITE_EEPROM Err: "
5830 "Cannot write into the specified offset\n");
5831 return -EFAULT;
5832 }
5833 cnt++;
5834 len--;
5835 }
5836
5837 return 0;
5838}
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5854{
5855 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5856 u64 val64 = 0, exp_val;
5857 int fail = 0;
5858
5859 val64 = readq(&bar0->pif_rd_swapper_fb);
5860 if (val64 != 0x123456789abcdefULL) {
5861 fail = 1;
5862 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5863 }
5864
5865 val64 = readq(&bar0->rmac_pause_cfg);
5866 if (val64 != 0xc000ffff00000000ULL) {
5867 fail = 1;
5868 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5869 }
5870
5871 val64 = readq(&bar0->rx_queue_cfg);
5872 if (sp->device_type == XFRAME_II_DEVICE)
5873 exp_val = 0x0404040404040404ULL;
5874 else
5875 exp_val = 0x0808080808080808ULL;
5876 if (val64 != exp_val) {
5877 fail = 1;
5878 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5879 }
5880
5881 val64 = readq(&bar0->xgxs_efifo_cfg);
5882 if (val64 != 0x000000001923141EULL) {
5883 fail = 1;
5884 DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5885 }
5886
5887 val64 = 0x5A5A5A5A5A5A5A5AULL;
5888 writeq(val64, &bar0->xmsi_data);
5889 val64 = readq(&bar0->xmsi_data);
5890 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5891 fail = 1;
5892 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5893 }
5894
5895 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5896 writeq(val64, &bar0->xmsi_data);
5897 val64 = readq(&bar0->xmsi_data);
5898 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5899 fail = 1;
5900 DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5901 }
5902
5903 *data = fail;
5904 return fail;
5905}
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5921{
5922 int fail = 0;
5923 u64 ret_data, org_4F0, org_7F0;
5924 u8 saved_4F0 = 0, saved_7F0 = 0;
5925 struct net_device *dev = sp->dev;
5926
5927
5928
5929
5930
5931 if (sp->device_type == XFRAME_I_DEVICE)
5932 if (!write_eeprom(sp, 0, 0, 3))
5933 fail = 1;
5934
5935
5936 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5937 saved_4F0 = 1;
5938 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5939 saved_7F0 = 1;
5940
5941
5942 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5943 fail = 1;
5944 if (read_eeprom(sp, 0x4F0, &ret_data))
5945 fail = 1;
5946
5947 if (ret_data != 0x012345) {
5948 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5949 "Data written %llx Data read %llx\n",
5950 dev->name, (unsigned long long)0x12345,
5951 (unsigned long long)ret_data);
5952 fail = 1;
5953 }
5954
5955
5956 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5957
5958
5959 if (sp->device_type == XFRAME_I_DEVICE)
5960 if (!write_eeprom(sp, 0x07C, 0, 3))
5961 fail = 1;
5962
5963
5964 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5965 fail = 1;
5966 if (read_eeprom(sp, 0x7F0, &ret_data))
5967 fail = 1;
5968
5969 if (ret_data != 0x012345) {
5970 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5971 "Data written %llx Data read %llx\n",
5972 dev->name, (unsigned long long)0x12345,
5973 (unsigned long long)ret_data);
5974 fail = 1;
5975 }
5976
5977
5978 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5979
5980 if (sp->device_type == XFRAME_I_DEVICE) {
5981
5982 if (!write_eeprom(sp, 0x080, 0, 3))
5983 fail = 1;
5984
5985
5986 if (!write_eeprom(sp, 0x0FC, 0, 3))
5987 fail = 1;
5988
5989
5990 if (!write_eeprom(sp, 0x100, 0, 3))
5991 fail = 1;
5992
5993
5994 if (!write_eeprom(sp, 0x4EC, 0, 3))
5995 fail = 1;
5996 }
5997
5998
5999 if (saved_4F0)
6000 write_eeprom(sp, 0x4F0, org_4F0, 3);
6001 if (saved_7F0)
6002 write_eeprom(sp, 0x7F0, org_7F0, 3);
6003
6004 *data = fail;
6005 return fail;
6006}
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6023{
6024 u8 bist = 0;
6025 int cnt = 0, ret = -1;
6026
6027 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6028 bist |= PCI_BIST_START;
6029 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6030
6031 while (cnt < 20) {
6032 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6033 if (!(bist & PCI_BIST_START)) {
6034 *data = (bist & PCI_BIST_CODE_MASK);
6035 ret = 0;
6036 break;
6037 }
6038 msleep(100);
6039 cnt++;
6040 }
6041
6042 return ret;
6043}
6044
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6059{
6060 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6061 u64 val64;
6062
6063 val64 = readq(&bar0->adapter_status);
6064 if (!(LINK_IS_UP(val64)))
6065 *data = 1;
6066 else
6067 *data = 0;
6068
6069 return *data;
6070}
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6086{
6087 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6088 u64 val64;
6089 int cnt, iteration = 0, test_fail = 0;
6090
6091 val64 = readq(&bar0->adapter_control);
6092 val64 &= ~ADAPTER_ECC_EN;
6093 writeq(val64, &bar0->adapter_control);
6094
6095 val64 = readq(&bar0->mc_rldram_test_ctrl);
6096 val64 |= MC_RLDRAM_TEST_MODE;
6097 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6098
6099 val64 = readq(&bar0->mc_rldram_mrs);
6100 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6101 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6102
6103 val64 |= MC_RLDRAM_MRS_ENABLE;
6104 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6105
6106 while (iteration < 2) {
6107 val64 = 0x55555555aaaa0000ULL;
6108 if (iteration == 1)
6109 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6110 writeq(val64, &bar0->mc_rldram_test_d0);
6111
6112 val64 = 0xaaaa5a5555550000ULL;
6113 if (iteration == 1)
6114 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6115 writeq(val64, &bar0->mc_rldram_test_d1);
6116
6117 val64 = 0x55aaaaaaaa5a0000ULL;
6118 if (iteration == 1)
6119 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6120 writeq(val64, &bar0->mc_rldram_test_d2);
6121
6122 val64 = (u64) (0x0000003ffffe0100ULL);
6123 writeq(val64, &bar0->mc_rldram_test_add);
6124
6125 val64 = MC_RLDRAM_TEST_MODE |
6126 MC_RLDRAM_TEST_WRITE |
6127 MC_RLDRAM_TEST_GO;
6128 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6129
6130 for (cnt = 0; cnt < 5; cnt++) {
6131 val64 = readq(&bar0->mc_rldram_test_ctrl);
6132 if (val64 & MC_RLDRAM_TEST_DONE)
6133 break;
6134 msleep(200);
6135 }
6136
6137 if (cnt == 5)
6138 break;
6139
6140 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6141 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6142
6143 for (cnt = 0; cnt < 5; cnt++) {
6144 val64 = readq(&bar0->mc_rldram_test_ctrl);
6145 if (val64 & MC_RLDRAM_TEST_DONE)
6146 break;
6147 msleep(500);
6148 }
6149
6150 if (cnt == 5)
6151 break;
6152
6153 val64 = readq(&bar0->mc_rldram_test_ctrl);
6154 if (!(val64 & MC_RLDRAM_TEST_PASS))
6155 test_fail = 1;
6156
6157 iteration++;
6158 }
6159
6160 *data = test_fail;
6161
6162
6163 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6164
6165 return test_fail;
6166}
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183static void s2io_ethtool_test(struct net_device *dev,
6184 struct ethtool_test *ethtest,
6185 uint64_t *data)
6186{
6187 struct s2io_nic *sp = netdev_priv(dev);
6188 int orig_state = netif_running(sp->dev);
6189
6190 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6191
6192 if (orig_state)
6193 s2io_close(sp->dev);
6194
6195 if (s2io_register_test(sp, &data[0]))
6196 ethtest->flags |= ETH_TEST_FL_FAILED;
6197
6198 s2io_reset(sp);
6199
6200 if (s2io_rldram_test(sp, &data[3]))
6201 ethtest->flags |= ETH_TEST_FL_FAILED;
6202
6203 s2io_reset(sp);
6204
6205 if (s2io_eeprom_test(sp, &data[1]))
6206 ethtest->flags |= ETH_TEST_FL_FAILED;
6207
6208 if (s2io_bist_test(sp, &data[4]))
6209 ethtest->flags |= ETH_TEST_FL_FAILED;
6210
6211 if (orig_state)
6212 s2io_open(sp->dev);
6213
6214 data[2] = 0;
6215 } else {
6216
6217 if (!orig_state) {
6218 DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6219 dev->name);
6220 data[0] = -1;
6221 data[1] = -1;
6222 data[2] = -1;
6223 data[3] = -1;
6224 data[4] = -1;
6225 }
6226
6227 if (s2io_link_test(sp, &data[2]))
6228 ethtest->flags |= ETH_TEST_FL_FAILED;
6229
6230 data[0] = 0;
6231 data[1] = 0;
6232 data[3] = 0;
6233 data[4] = 0;
6234 }
6235}
6236
6237static void s2io_get_ethtool_stats(struct net_device *dev,
6238 struct ethtool_stats *estats,
6239 u64 *tmp_stats)
6240{
6241 int i = 0, k;
6242 struct s2io_nic *sp = netdev_priv(dev);
6243 struct stat_block *stats = sp->mac_control.stats_info;
6244 struct swStat *swstats = &stats->sw_stat;
6245 struct xpakStat *xstats = &stats->xpak_stat;
6246
6247 s2io_updt_stats(sp);
6248 tmp_stats[i++] =
6249 (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32 |
6250 le32_to_cpu(stats->tmac_frms);
6251 tmp_stats[i++] =
6252 (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6253 le32_to_cpu(stats->tmac_data_octets);
6254 tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6255 tmp_stats[i++] =
6256 (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6257 le32_to_cpu(stats->tmac_mcst_frms);
6258 tmp_stats[i++] =
6259 (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6260 le32_to_cpu(stats->tmac_bcst_frms);
6261 tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6262 tmp_stats[i++] =
6263 (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6264 le32_to_cpu(stats->tmac_ttl_octets);
6265 tmp_stats[i++] =
6266 (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6267 le32_to_cpu(stats->tmac_ucst_frms);
6268 tmp_stats[i++] =
6269 (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6270 le32_to_cpu(stats->tmac_nucst_frms);
6271 tmp_stats[i++] =
6272 (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6273 le32_to_cpu(stats->tmac_any_err_frms);
6274 tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6275 tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6276 tmp_stats[i++] =
6277 (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6278 le32_to_cpu(stats->tmac_vld_ip);
6279 tmp_stats[i++] =
6280 (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6281 le32_to_cpu(stats->tmac_drop_ip);
6282 tmp_stats[i++] =
6283 (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6284 le32_to_cpu(stats->tmac_icmp);
6285 tmp_stats[i++] =
6286 (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6287 le32_to_cpu(stats->tmac_rst_tcp);
6288 tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6289 tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6290 le32_to_cpu(stats->tmac_udp);
6291 tmp_stats[i++] =
6292 (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6293 le32_to_cpu(stats->rmac_vld_frms);
6294 tmp_stats[i++] =
6295 (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6296 le32_to_cpu(stats->rmac_data_octets);
6297 tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6298 tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6299 tmp_stats[i++] =
6300 (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6301 le32_to_cpu(stats->rmac_vld_mcst_frms);
6302 tmp_stats[i++] =
6303 (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6304 le32_to_cpu(stats->rmac_vld_bcst_frms);
6305 tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6306 tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6307 tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6308 tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6309 tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6310 tmp_stats[i++] =
6311 (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6312 le32_to_cpu(stats->rmac_ttl_octets);
6313 tmp_stats[i++] =
6314 (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6315 | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6316 tmp_stats[i++] =
6317 (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6318 << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6319 tmp_stats[i++] =
6320 (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6321 le32_to_cpu(stats->rmac_discarded_frms);
6322 tmp_stats[i++] =
6323 (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6324 << 32 | le32_to_cpu(stats->rmac_drop_events);
6325 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6326 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6327 tmp_stats[i++] =
6328 (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6329 le32_to_cpu(stats->rmac_usized_frms);
6330 tmp_stats[i++] =
6331 (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6332 le32_to_cpu(stats->rmac_osized_frms);
6333 tmp_stats[i++] =
6334 (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6335 le32_to_cpu(stats->rmac_frag_frms);
6336 tmp_stats[i++] =
6337 (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6338 le32_to_cpu(stats->rmac_jabber_frms);
6339 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6340 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6341 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6342 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6343 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6344 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6345 tmp_stats[i++] =
6346 (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6347 le32_to_cpu(stats->rmac_ip);
6348 tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6349 tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6350 tmp_stats[i++] =
6351 (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6352 le32_to_cpu(stats->rmac_drop_ip);
6353 tmp_stats[i++] =
6354 (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6355 le32_to_cpu(stats->rmac_icmp);
6356 tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6357 tmp_stats[i++] =
6358 (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6359 le32_to_cpu(stats->rmac_udp);
6360 tmp_stats[i++] =
6361 (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6362 le32_to_cpu(stats->rmac_err_drp_udp);
6363 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6364 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6365 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6366 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6367 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6368 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6369 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6370 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6371 tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6372 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6373 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6374 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6375 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6376 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6377 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6378 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6379 tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6380 tmp_stats[i++] =
6381 (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6382 le32_to_cpu(stats->rmac_pause_cnt);
6383 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6384 tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6385 tmp_stats[i++] =
6386 (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6387 le32_to_cpu(stats->rmac_accepted_ip);
6388 tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6389 tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6390 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6391 tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6392 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6393 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6394 tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6395 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6396 tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6397 tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6398 tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6399 tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6400 tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6401 tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6402 tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6403 tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6404 tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6405 tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6406 tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6407
6408
6409 if (sp->device_type == XFRAME_II_DEVICE) {
6410 tmp_stats[i++] =
6411 le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6412 tmp_stats[i++] =
6413 le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6414 tmp_stats[i++] =
6415 le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6416 tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6417 tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6418 tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6419 tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6420 tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6421 tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6422 tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6423 tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6424 tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6425 tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6426 tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6427 tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6428 tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6429 }
6430
6431 tmp_stats[i++] = 0;
6432 tmp_stats[i++] = swstats->single_ecc_errs;
6433 tmp_stats[i++] = swstats->double_ecc_errs;
6434 tmp_stats[i++] = swstats->parity_err_cnt;
6435 tmp_stats[i++] = swstats->serious_err_cnt;
6436 tmp_stats[i++] = swstats->soft_reset_cnt;
6437 tmp_stats[i++] = swstats->fifo_full_cnt;
6438 for (k = 0; k < MAX_RX_RINGS; k++)
6439 tmp_stats[i++] = swstats->ring_full_cnt[k];
6440 tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6441 tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6442 tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6443 tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6444 tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6445 tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6446 tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6447 tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6448 tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6449 tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6450 tmp_stats[i++] = xstats->warn_laser_output_power_high;
6451 tmp_stats[i++] = xstats->warn_laser_output_power_low;
6452 tmp_stats[i++] = swstats->clubbed_frms_cnt;
6453 tmp_stats[i++] = swstats->sending_both;
6454 tmp_stats[i++] = swstats->outof_sequence_pkts;
6455 tmp_stats[i++] = swstats->flush_max_pkts;
6456 if (swstats->num_aggregations) {
6457 u64 tmp = swstats->sum_avg_pkts_aggregated;
6458 int count = 0;
6459
6460
6461
6462
6463 while (tmp >= swstats->num_aggregations) {
6464 tmp -= swstats->num_aggregations;
6465 count++;
6466 }
6467 tmp_stats[i++] = count;
6468 } else
6469 tmp_stats[i++] = 0;
6470 tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6471 tmp_stats[i++] = swstats->pci_map_fail_cnt;
6472 tmp_stats[i++] = swstats->watchdog_timer_cnt;
6473 tmp_stats[i++] = swstats->mem_allocated;
6474 tmp_stats[i++] = swstats->mem_freed;
6475 tmp_stats[i++] = swstats->link_up_cnt;
6476 tmp_stats[i++] = swstats->link_down_cnt;
6477 tmp_stats[i++] = swstats->link_up_time;
6478 tmp_stats[i++] = swstats->link_down_time;
6479
6480 tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6481 tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6482 tmp_stats[i++] = swstats->tx_parity_err_cnt;
6483 tmp_stats[i++] = swstats->tx_link_loss_cnt;
6484 tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6485
6486 tmp_stats[i++] = swstats->rx_parity_err_cnt;
6487 tmp_stats[i++] = swstats->rx_abort_cnt;
6488 tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6489 tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6490 tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6491 tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6492 tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6493 tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6494 tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6495 tmp_stats[i++] = swstats->tda_err_cnt;
6496 tmp_stats[i++] = swstats->pfc_err_cnt;
6497 tmp_stats[i++] = swstats->pcc_err_cnt;
6498 tmp_stats[i++] = swstats->tti_err_cnt;
6499 tmp_stats[i++] = swstats->tpa_err_cnt;
6500 tmp_stats[i++] = swstats->sm_err_cnt;
6501 tmp_stats[i++] = swstats->lso_err_cnt;
6502 tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6503 tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6504 tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6505 tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6506 tmp_stats[i++] = swstats->rc_err_cnt;
6507 tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6508 tmp_stats[i++] = swstats->rpa_err_cnt;
6509 tmp_stats[i++] = swstats->rda_err_cnt;
6510 tmp_stats[i++] = swstats->rti_err_cnt;
6511 tmp_stats[i++] = swstats->mc_err_cnt;
6512}
6513
6514static int s2io_ethtool_get_regs_len(struct net_device *dev)
6515{
6516 return XENA_REG_SPACE;
6517}
6518
6519
6520static int s2io_get_eeprom_len(struct net_device *dev)
6521{
6522 return XENA_EEPROM_SPACE;
6523}
6524
6525static int s2io_get_sset_count(struct net_device *dev, int sset)
6526{
6527 struct s2io_nic *sp = netdev_priv(dev);
6528
6529 switch (sset) {
6530 case ETH_SS_TEST:
6531 return S2IO_TEST_LEN;
6532 case ETH_SS_STATS:
6533 switch (sp->device_type) {
6534 case XFRAME_I_DEVICE:
6535 return XFRAME_I_STAT_LEN;
6536 case XFRAME_II_DEVICE:
6537 return XFRAME_II_STAT_LEN;
6538 default:
6539 return 0;
6540 }
6541 default:
6542 return -EOPNOTSUPP;
6543 }
6544}
6545
6546static void s2io_ethtool_get_strings(struct net_device *dev,
6547 u32 stringset, u8 *data)
6548{
6549 int stat_size = 0;
6550 struct s2io_nic *sp = netdev_priv(dev);
6551
6552 switch (stringset) {
6553 case ETH_SS_TEST:
6554 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6555 break;
6556 case ETH_SS_STATS:
6557 stat_size = sizeof(ethtool_xena_stats_keys);
6558 memcpy(data, ðtool_xena_stats_keys, stat_size);
6559 if (sp->device_type == XFRAME_II_DEVICE) {
6560 memcpy(data + stat_size,
6561 ðtool_enhanced_stats_keys,
6562 sizeof(ethtool_enhanced_stats_keys));
6563 stat_size += sizeof(ethtool_enhanced_stats_keys);
6564 }
6565
6566 memcpy(data + stat_size, ðtool_driver_stats_keys,
6567 sizeof(ethtool_driver_stats_keys));
6568 }
6569}
6570
6571static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6572{
6573 struct s2io_nic *sp = netdev_priv(dev);
6574 netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6575
6576 if (changed && netif_running(dev)) {
6577 int rc;
6578
6579 s2io_stop_all_tx_queue(sp);
6580 s2io_card_down(sp);
6581 dev->features = features;
6582 rc = s2io_card_up(sp);
6583 if (rc)
6584 s2io_reset(sp);
6585 else
6586 s2io_start_all_tx_queue(sp);
6587
6588 return rc ? rc : 1;
6589 }
6590
6591 return 0;
6592}
6593
6594static const struct ethtool_ops netdev_ethtool_ops = {
6595 .get_drvinfo = s2io_ethtool_gdrvinfo,
6596 .get_regs_len = s2io_ethtool_get_regs_len,
6597 .get_regs = s2io_ethtool_gregs,
6598 .get_link = ethtool_op_get_link,
6599 .get_eeprom_len = s2io_get_eeprom_len,
6600 .get_eeprom = s2io_ethtool_geeprom,
6601 .set_eeprom = s2io_ethtool_seeprom,
6602 .get_ringparam = s2io_ethtool_gringparam,
6603 .get_pauseparam = s2io_ethtool_getpause_data,
6604 .set_pauseparam = s2io_ethtool_setpause_data,
6605 .self_test = s2io_ethtool_test,
6606 .get_strings = s2io_ethtool_get_strings,
6607 .set_phys_id = s2io_ethtool_set_led,
6608 .get_ethtool_stats = s2io_get_ethtool_stats,
6609 .get_sset_count = s2io_get_sset_count,
6610 .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6611 .set_link_ksettings = s2io_ethtool_set_link_ksettings,
6612};
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6627{
6628 return -EOPNOTSUPP;
6629}
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6643{
6644 struct s2io_nic *sp = netdev_priv(dev);
6645 int ret = 0;
6646
6647 dev->mtu = new_mtu;
6648 if (netif_running(dev)) {
6649 s2io_stop_all_tx_queue(sp);
6650 s2io_card_down(sp);
6651 ret = s2io_card_up(sp);
6652 if (ret) {
6653 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6654 __func__);
6655 return ret;
6656 }
6657 s2io_wake_all_tx_queue(sp);
6658 } else {
6659 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6660 u64 val64 = new_mtu;
6661
6662 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6663 }
6664
6665 return ret;
6666}
6667
6668
6669
6670
6671
6672
6673
6674static void s2io_set_link(struct work_struct *work)
6675{
6676 struct s2io_nic *nic = container_of(work, struct s2io_nic,
6677 set_link_task);
6678 struct net_device *dev = nic->dev;
6679 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6680 register u64 val64;
6681 u16 subid;
6682
6683 rtnl_lock();
6684
6685 if (!netif_running(dev))
6686 goto out_unlock;
6687
6688 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6689
6690 goto out_unlock;
6691 }
6692
6693 subid = nic->pdev->subsystem_device;
6694 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6695
6696
6697
6698
6699 msleep(100);
6700 }
6701
6702 val64 = readq(&bar0->adapter_status);
6703 if (LINK_IS_UP(val64)) {
6704 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6705 if (verify_xena_quiescence(nic)) {
6706 val64 = readq(&bar0->adapter_control);
6707 val64 |= ADAPTER_CNTL_EN;
6708 writeq(val64, &bar0->adapter_control);
6709 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6710 nic->device_type, subid)) {
6711 val64 = readq(&bar0->gpio_control);
6712 val64 |= GPIO_CTRL_GPIO_0;
6713 writeq(val64, &bar0->gpio_control);
6714 val64 = readq(&bar0->gpio_control);
6715 } else {
6716 val64 |= ADAPTER_LED_ON;
6717 writeq(val64, &bar0->adapter_control);
6718 }
6719 nic->device_enabled_once = true;
6720 } else {
6721 DBG_PRINT(ERR_DBG,
6722 "%s: Error: device is not Quiescent\n",
6723 dev->name);
6724 s2io_stop_all_tx_queue(nic);
6725 }
6726 }
6727 val64 = readq(&bar0->adapter_control);
6728 val64 |= ADAPTER_LED_ON;
6729 writeq(val64, &bar0->adapter_control);
6730 s2io_link(nic, LINK_UP);
6731 } else {
6732 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6733 subid)) {
6734 val64 = readq(&bar0->gpio_control);
6735 val64 &= ~GPIO_CTRL_GPIO_0;
6736 writeq(val64, &bar0->gpio_control);
6737 val64 = readq(&bar0->gpio_control);
6738 }
6739
6740 val64 = readq(&bar0->adapter_control);
6741 val64 = val64 & (~ADAPTER_LED_ON);
6742 writeq(val64, &bar0->adapter_control);
6743 s2io_link(nic, LINK_DOWN);
6744 }
6745 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6746
6747out_unlock:
6748 rtnl_unlock();
6749}
6750
6751static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6752 struct buffAdd *ba,
6753 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6754 u64 *temp2, int size)
6755{
6756 struct net_device *dev = sp->dev;
6757 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6758
6759 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6760 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6761
6762 if (*skb) {
6763 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6764
6765
6766
6767
6768
6769 rxdp1->Buffer0_ptr = *temp0;
6770 } else {
6771 *skb = netdev_alloc_skb(dev, size);
6772 if (!(*skb)) {
6773 DBG_PRINT(INFO_DBG,
6774 "%s: Out of memory to allocate %s\n",
6775 dev->name, "1 buf mode SKBs");
6776 stats->mem_alloc_fail_cnt++;
6777 return -ENOMEM ;
6778 }
6779 stats->mem_allocated += (*skb)->truesize;
6780
6781
6782
6783
6784 rxdp1->Buffer0_ptr = *temp0 =
6785 pci_map_single(sp->pdev, (*skb)->data,
6786 size - NET_IP_ALIGN,
6787 PCI_DMA_FROMDEVICE);
6788 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6789 goto memalloc_failed;
6790 rxdp->Host_Control = (unsigned long) (*skb);
6791 }
6792 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6793 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6794
6795 if (*skb) {
6796 rxdp3->Buffer2_ptr = *temp2;
6797 rxdp3->Buffer0_ptr = *temp0;
6798 rxdp3->Buffer1_ptr = *temp1;
6799 } else {
6800 *skb = netdev_alloc_skb(dev, size);
6801 if (!(*skb)) {
6802 DBG_PRINT(INFO_DBG,
6803 "%s: Out of memory to allocate %s\n",
6804 dev->name,
6805 "2 buf mode SKBs");
6806 stats->mem_alloc_fail_cnt++;
6807 return -ENOMEM;
6808 }
6809 stats->mem_allocated += (*skb)->truesize;
6810 rxdp3->Buffer2_ptr = *temp2 =
6811 pci_map_single(sp->pdev, (*skb)->data,
6812 dev->mtu + 4,
6813 PCI_DMA_FROMDEVICE);
6814 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6815 goto memalloc_failed;
6816 rxdp3->Buffer0_ptr = *temp0 =
6817 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6818 PCI_DMA_FROMDEVICE);
6819 if (pci_dma_mapping_error(sp->pdev,
6820 rxdp3->Buffer0_ptr)) {
6821 pci_unmap_single(sp->pdev,
6822 (dma_addr_t)rxdp3->Buffer2_ptr,
6823 dev->mtu + 4,
6824 PCI_DMA_FROMDEVICE);
6825 goto memalloc_failed;
6826 }
6827 rxdp->Host_Control = (unsigned long) (*skb);
6828
6829
6830 rxdp3->Buffer1_ptr = *temp1 =
6831 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6832 PCI_DMA_FROMDEVICE);
6833 if (pci_dma_mapping_error(sp->pdev,
6834 rxdp3->Buffer1_ptr)) {
6835 pci_unmap_single(sp->pdev,
6836 (dma_addr_t)rxdp3->Buffer0_ptr,
6837 BUF0_LEN, PCI_DMA_FROMDEVICE);
6838 pci_unmap_single(sp->pdev,
6839 (dma_addr_t)rxdp3->Buffer2_ptr,
6840 dev->mtu + 4,
6841 PCI_DMA_FROMDEVICE);
6842 goto memalloc_failed;
6843 }
6844 }
6845 }
6846 return 0;
6847
6848memalloc_failed:
6849 stats->pci_map_fail_cnt++;
6850 stats->mem_freed += (*skb)->truesize;
6851 dev_kfree_skb(*skb);
6852 return -ENOMEM;
6853}
6854
6855static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6856 int size)
6857{
6858 struct net_device *dev = sp->dev;
6859 if (sp->rxd_mode == RXD_MODE_1) {
6860 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6861 } else if (sp->rxd_mode == RXD_MODE_3B) {
6862 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6863 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6864 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6865 }
6866}
6867
6868static int rxd_owner_bit_reset(struct s2io_nic *sp)
6869{
6870 int i, j, k, blk_cnt = 0, size;
6871 struct config_param *config = &sp->config;
6872 struct mac_info *mac_control = &sp->mac_control;
6873 struct net_device *dev = sp->dev;
6874 struct RxD_t *rxdp = NULL;
6875 struct sk_buff *skb = NULL;
6876 struct buffAdd *ba = NULL;
6877 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6878
6879
6880 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6881 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6882 if (sp->rxd_mode == RXD_MODE_1)
6883 size += NET_IP_ALIGN;
6884 else if (sp->rxd_mode == RXD_MODE_3B)
6885 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6886
6887 for (i = 0; i < config->rx_ring_num; i++) {
6888 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6889 struct ring_info *ring = &mac_control->rings[i];
6890
6891 blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6892
6893 for (j = 0; j < blk_cnt; j++) {
6894 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6895 rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6896 if (sp->rxd_mode == RXD_MODE_3B)
6897 ba = &ring->ba[j][k];
6898 if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6899 &temp0_64,
6900 &temp1_64,
6901 &temp2_64,
6902 size) == -ENOMEM) {
6903 return 0;
6904 }
6905
6906 set_rxd_buffer_size(sp, rxdp, size);
6907 dma_wmb();
6908
6909 rxdp->Control_1 |= RXD_OWN_XENA;
6910 }
6911 }
6912 }
6913 return 0;
6914
6915}
6916
6917static int s2io_add_isr(struct s2io_nic *sp)
6918{
6919 int ret = 0;
6920 struct net_device *dev = sp->dev;
6921 int err = 0;
6922
6923 if (sp->config.intr_type == MSI_X)
6924 ret = s2io_enable_msi_x(sp);
6925 if (ret) {
6926 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6927 sp->config.intr_type = INTA;
6928 }
6929
6930
6931
6932
6933
6934 store_xmsi_data(sp);
6935
6936
6937 if (sp->config.intr_type == MSI_X) {
6938 int i, msix_rx_cnt = 0;
6939
6940 for (i = 0; i < sp->num_entries; i++) {
6941 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6942 if (sp->s2io_entries[i].type ==
6943 MSIX_RING_TYPE) {
6944 snprintf(sp->desc[i],
6945 sizeof(sp->desc[i]),
6946 "%s:MSI-X-%d-RX",
6947 dev->name, i);
6948 err = request_irq(sp->entries[i].vector,
6949 s2io_msix_ring_handle,
6950 0,
6951 sp->desc[i],
6952 sp->s2io_entries[i].arg);
6953 } else if (sp->s2io_entries[i].type ==
6954 MSIX_ALARM_TYPE) {
6955 snprintf(sp->desc[i],
6956 sizeof(sp->desc[i]),
6957 "%s:MSI-X-%d-TX",
6958 dev->name, i);
6959 err = request_irq(sp->entries[i].vector,
6960 s2io_msix_fifo_handle,
6961 0,
6962 sp->desc[i],
6963 sp->s2io_entries[i].arg);
6964
6965 }
6966
6967 if (!(sp->msix_info[i].addr &&
6968 sp->msix_info[i].data)) {
6969 DBG_PRINT(ERR_DBG,
6970 "%s @Addr:0x%llx Data:0x%llx\n",
6971 sp->desc[i],
6972 (unsigned long long)
6973 sp->msix_info[i].addr,
6974 (unsigned long long)
6975 ntohl(sp->msix_info[i].data));
6976 } else
6977 msix_rx_cnt++;
6978 if (err) {
6979 remove_msix_isr(sp);
6980
6981 DBG_PRINT(ERR_DBG,
6982 "%s:MSI-X-%d registration "
6983 "failed\n", dev->name, i);
6984
6985 DBG_PRINT(ERR_DBG,
6986 "%s: Defaulting to INTA\n",
6987 dev->name);
6988 sp->config.intr_type = INTA;
6989 break;
6990 }
6991 sp->s2io_entries[i].in_use =
6992 MSIX_REGISTERED_SUCCESS;
6993 }
6994 }
6995 if (!err) {
6996 pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6997 DBG_PRINT(INFO_DBG,
6998 "MSI-X-TX entries enabled through alarm vector\n");
6999 }
7000 }
7001 if (sp->config.intr_type == INTA) {
7002 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7003 sp->name, dev);
7004 if (err) {
7005 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7006 dev->name);
7007 return -1;
7008 }
7009 }
7010 return 0;
7011}
7012
7013static void s2io_rem_isr(struct s2io_nic *sp)
7014{
7015 if (sp->config.intr_type == MSI_X)
7016 remove_msix_isr(sp);
7017 else
7018 remove_inta_isr(sp);
7019}
7020
7021static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7022{
7023 int cnt = 0;
7024 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7025 register u64 val64 = 0;
7026 struct config_param *config;
7027 config = &sp->config;
7028
7029 if (!is_s2io_card_up(sp))
7030 return;
7031
7032 del_timer_sync(&sp->alarm_timer);
7033
7034 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7035 msleep(50);
7036 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7037
7038
7039 if (sp->config.napi) {
7040 int off = 0;
7041 if (config->intr_type == MSI_X) {
7042 for (; off < sp->config.rx_ring_num; off++)
7043 napi_disable(&sp->mac_control.rings[off].napi);
7044 }
7045 else
7046 napi_disable(&sp->napi);
7047 }
7048
7049
7050 if (do_io)
7051 stop_nic(sp);
7052
7053 s2io_rem_isr(sp);
7054
7055
7056 s2io_link(sp, LINK_DOWN);
7057
7058
7059 while (do_io) {
7060
7061
7062
7063
7064
7065
7066
7067 rxd_owner_bit_reset(sp);
7068
7069 val64 = readq(&bar0->adapter_status);
7070 if (verify_xena_quiescence(sp)) {
7071 if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7072 break;
7073 }
7074
7075 msleep(50);
7076 cnt++;
7077 if (cnt == 10) {
7078 DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7079 "adapter status reads 0x%llx\n",
7080 (unsigned long long)val64);
7081 break;
7082 }
7083 }
7084 if (do_io)
7085 s2io_reset(sp);
7086
7087
7088 free_tx_buffers(sp);
7089
7090
7091 free_rx_buffers(sp);
7092
7093 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7094}
7095
7096static void s2io_card_down(struct s2io_nic *sp)
7097{
7098 do_s2io_card_down(sp, 1);
7099}
7100
7101static int s2io_card_up(struct s2io_nic *sp)
7102{
7103 int i, ret = 0;
7104 struct config_param *config;
7105 struct mac_info *mac_control;
7106 struct net_device *dev = sp->dev;
7107 u16 interruptible;
7108
7109
7110 ret = init_nic(sp);
7111 if (ret != 0) {
7112 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7113 dev->name);
7114 if (ret != -EIO)
7115 s2io_reset(sp);
7116 return ret;
7117 }
7118
7119
7120
7121
7122
7123 config = &sp->config;
7124 mac_control = &sp->mac_control;
7125
7126 for (i = 0; i < config->rx_ring_num; i++) {
7127 struct ring_info *ring = &mac_control->rings[i];
7128
7129 ring->mtu = dev->mtu;
7130 ring->lro = !!(dev->features & NETIF_F_LRO);
7131 ret = fill_rx_buffers(sp, ring, 1);
7132 if (ret) {
7133 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7134 dev->name);
7135 s2io_reset(sp);
7136 free_rx_buffers(sp);
7137 return -ENOMEM;
7138 }
7139 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7140 ring->rx_bufs_left);
7141 }
7142
7143
7144 if (config->napi) {
7145 if (config->intr_type == MSI_X) {
7146 for (i = 0; i < sp->config.rx_ring_num; i++)
7147 napi_enable(&sp->mac_control.rings[i].napi);
7148 } else {
7149 napi_enable(&sp->napi);
7150 }
7151 }
7152
7153
7154 if (sp->promisc_flg)
7155 sp->promisc_flg = 0;
7156 if (sp->m_cast_flg) {
7157 sp->m_cast_flg = 0;
7158 sp->all_multi_pos = 0;
7159 }
7160
7161
7162 s2io_set_multicast(dev);
7163
7164 if (dev->features & NETIF_F_LRO) {
7165
7166 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7167
7168 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7169 sp->lro_max_aggr_per_sess = lro_max_pkts;
7170 }
7171
7172
7173 if (start_nic(sp)) {
7174 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7175 s2io_reset(sp);
7176 free_rx_buffers(sp);
7177 return -ENODEV;
7178 }
7179
7180
7181 if (s2io_add_isr(sp) != 0) {
7182 if (sp->config.intr_type == MSI_X)
7183 s2io_rem_isr(sp);
7184 s2io_reset(sp);
7185 free_rx_buffers(sp);
7186 return -ENODEV;
7187 }
7188
7189 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7190
7191 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7192
7193
7194 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7195 if (sp->config.intr_type != INTA) {
7196 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7197 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7198 } else {
7199 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7200 interruptible |= TX_PIC_INTR;
7201 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7202 }
7203
7204 return 0;
7205}
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217static void s2io_restart_nic(struct work_struct *work)
7218{
7219 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7220 struct net_device *dev = sp->dev;
7221
7222 rtnl_lock();
7223
7224 if (!netif_running(dev))
7225 goto out_unlock;
7226
7227 s2io_card_down(sp);
7228 if (s2io_card_up(sp)) {
7229 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7230 }
7231 s2io_wake_all_tx_queue(sp);
7232 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7233out_unlock:
7234 rtnl_unlock();
7235}
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245
7246
7247
7248
7249
7250static void s2io_tx_watchdog(struct net_device *dev)
7251{
7252 struct s2io_nic *sp = netdev_priv(dev);
7253 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7254
7255 if (netif_carrier_ok(dev)) {
7256 swstats->watchdog_timer_cnt++;
7257 schedule_work(&sp->rst_timer_task);
7258 swstats->soft_reset_cnt++;
7259 }
7260}
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7280{
7281 struct s2io_nic *sp = ring_data->nic;
7282 struct net_device *dev = ring_data->dev;
7283 struct sk_buff *skb = (struct sk_buff *)
7284 ((unsigned long)rxdp->Host_Control);
7285 int ring_no = ring_data->ring_no;
7286 u16 l3_csum, l4_csum;
7287 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7288 struct lro *uninitialized_var(lro);
7289 u8 err_mask;
7290 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7291
7292 skb->dev = dev;
7293
7294 if (err) {
7295
7296 if (err & 0x1)
7297 swstats->parity_err_cnt++;
7298
7299 err_mask = err >> 48;
7300 switch (err_mask) {
7301 case 1:
7302 swstats->rx_parity_err_cnt++;
7303 break;
7304
7305 case 2:
7306 swstats->rx_abort_cnt++;
7307 break;
7308
7309 case 3:
7310 swstats->rx_parity_abort_cnt++;
7311 break;
7312
7313 case 4:
7314 swstats->rx_rda_fail_cnt++;
7315 break;
7316
7317 case 5:
7318 swstats->rx_unkn_prot_cnt++;
7319 break;
7320
7321 case 6:
7322 swstats->rx_fcs_err_cnt++;
7323 break;
7324
7325 case 7:
7326 swstats->rx_buf_size_err_cnt++;
7327 break;
7328
7329 case 8:
7330 swstats->rx_rxd_corrupt_cnt++;
7331 break;
7332
7333 case 15:
7334 swstats->rx_unkn_err_cnt++;
7335 break;
7336 }
7337
7338
7339
7340
7341
7342
7343
7344 if (err_mask != 0x5) {
7345 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7346 dev->name, err_mask);
7347 dev->stats.rx_crc_errors++;
7348 swstats->mem_freed
7349 += skb->truesize;
7350 dev_kfree_skb(skb);
7351 ring_data->rx_bufs_left -= 1;
7352 rxdp->Host_Control = 0;
7353 return 0;
7354 }
7355 }
7356
7357 rxdp->Host_Control = 0;
7358 if (sp->rxd_mode == RXD_MODE_1) {
7359 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7360
7361 skb_put(skb, len);
7362 } else if (sp->rxd_mode == RXD_MODE_3B) {
7363 int get_block = ring_data->rx_curr_get_info.block_index;
7364 int get_off = ring_data->rx_curr_get_info.offset;
7365 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7366 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7367 unsigned char *buff = skb_push(skb, buf0_len);
7368
7369 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7370 memcpy(buff, ba->ba_0, buf0_len);
7371 skb_put(skb, buf2_len);
7372 }
7373
7374 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7375 ((!ring_data->lro) ||
7376 (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7377 (dev->features & NETIF_F_RXCSUM)) {
7378 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7379 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7380 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7381
7382
7383
7384
7385
7386 skb->ip_summed = CHECKSUM_UNNECESSARY;
7387 if (ring_data->lro) {
7388 u32 tcp_len = 0;
7389 u8 *tcp;
7390 int ret = 0;
7391
7392 ret = s2io_club_tcp_session(ring_data,
7393 skb->data, &tcp,
7394 &tcp_len, &lro,
7395 rxdp, sp);
7396 switch (ret) {
7397 case 3:
7398 lro->parent = skb;
7399 goto aggregate;
7400 case 1:
7401 lro_append_pkt(sp, lro, skb, tcp_len);
7402 goto aggregate;
7403 case 4:
7404 lro_append_pkt(sp, lro, skb, tcp_len);
7405 queue_rx_frame(lro->parent,
7406 lro->vlan_tag);
7407 clear_lro_session(lro);
7408 swstats->flush_max_pkts++;
7409 goto aggregate;
7410 case 2:
7411 lro->parent->data_len = lro->frags_len;
7412 swstats->sending_both++;
7413 queue_rx_frame(lro->parent,
7414 lro->vlan_tag);
7415 clear_lro_session(lro);
7416 goto send_up;
7417 case 0:
7418 case -1:
7419 case 5:
7420
7421
7422
7423 break;
7424 default:
7425 DBG_PRINT(ERR_DBG,
7426 "%s: Samadhana!!\n",
7427 __func__);
7428 BUG();
7429 }
7430 }
7431 } else {
7432
7433
7434
7435
7436 skb_checksum_none_assert(skb);
7437 }
7438 } else
7439 skb_checksum_none_assert(skb);
7440
7441 swstats->mem_freed += skb->truesize;
7442send_up:
7443 skb_record_rx_queue(skb, ring_no);
7444 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7445aggregate:
7446 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7447 return SUCCESS;
7448}
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463static void s2io_link(struct s2io_nic *sp, int link)
7464{
7465 struct net_device *dev = sp->dev;
7466 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7467
7468 if (link != sp->last_link_state) {
7469 init_tti(sp, link);
7470 if (link == LINK_DOWN) {
7471 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7472 s2io_stop_all_tx_queue(sp);
7473 netif_carrier_off(dev);
7474 if (swstats->link_up_cnt)
7475 swstats->link_up_time =
7476 jiffies - sp->start_time;
7477 swstats->link_down_cnt++;
7478 } else {
7479 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7480 if (swstats->link_down_cnt)
7481 swstats->link_down_time =
7482 jiffies - sp->start_time;
7483 swstats->link_up_cnt++;
7484 netif_carrier_on(dev);
7485 s2io_wake_all_tx_queue(sp);
7486 }
7487 }
7488 sp->last_link_state = link;
7489 sp->start_time = jiffies;
7490}
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503static void s2io_init_pci(struct s2io_nic *sp)
7504{
7505 u16 pci_cmd = 0, pcix_cmd = 0;
7506
7507
7508 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7509 &(pcix_cmd));
7510 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7511 (pcix_cmd | 1));
7512 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7513 &(pcix_cmd));
7514
7515
7516 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7517 pci_write_config_word(sp->pdev, PCI_COMMAND,
7518 (pci_cmd | PCI_COMMAND_PARITY));
7519 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7520}
7521
7522static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7523 u8 *dev_multiq)
7524{
7525 int i;
7526
7527 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7528 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7529 "(%d) not supported\n", tx_fifo_num);
7530
7531 if (tx_fifo_num < 1)
7532 tx_fifo_num = 1;
7533 else
7534 tx_fifo_num = MAX_TX_FIFOS;
7535
7536 DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7537 }
7538
7539 if (multiq)
7540 *dev_multiq = multiq;
7541
7542 if (tx_steering_type && (1 == tx_fifo_num)) {
7543 if (tx_steering_type != TX_DEFAULT_STEERING)
7544 DBG_PRINT(ERR_DBG,
7545 "Tx steering is not supported with "
7546 "one fifo. Disabling Tx steering.\n");
7547 tx_steering_type = NO_STEERING;
7548 }
7549
7550 if ((tx_steering_type < NO_STEERING) ||
7551 (tx_steering_type > TX_DEFAULT_STEERING)) {
7552 DBG_PRINT(ERR_DBG,
7553 "Requested transmit steering not supported\n");
7554 DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7555 tx_steering_type = NO_STEERING;
7556 }
7557
7558 if (rx_ring_num > MAX_RX_RINGS) {
7559 DBG_PRINT(ERR_DBG,
7560 "Requested number of rx rings not supported\n");
7561 DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7562 MAX_RX_RINGS);
7563 rx_ring_num = MAX_RX_RINGS;
7564 }
7565
7566 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7567 DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7568 "Defaulting to INTA\n");
7569 *dev_intr_type = INTA;
7570 }
7571
7572 if ((*dev_intr_type == MSI_X) &&
7573 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7574 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7575 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7576 "Defaulting to INTA\n");
7577 *dev_intr_type = INTA;
7578 }
7579
7580 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7581 DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7582 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7583 rx_ring_mode = 1;
7584 }
7585
7586 for (i = 0; i < MAX_RX_RINGS; i++)
7587 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7588 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7589 "supported\nDefaulting to %d\n",
7590 MAX_RX_BLOCKS_PER_RING);
7591 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7592 }
7593
7594 return SUCCESS;
7595}
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7607{
7608 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7609 register u64 val64 = 0;
7610
7611 if (ds_codepoint > 63)
7612 return FAILURE;
7613
7614 val64 = RTS_DS_MEM_DATA(ring);
7615 writeq(val64, &bar0->rts_ds_mem_data);
7616
7617 val64 = RTS_DS_MEM_CTRL_WE |
7618 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7619 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7620
7621 writeq(val64, &bar0->rts_ds_mem_ctrl);
7622
7623 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7624 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7625 S2IO_BIT_RESET);
7626}
7627
7628static const struct net_device_ops s2io_netdev_ops = {
7629 .ndo_open = s2io_open,
7630 .ndo_stop = s2io_close,
7631 .ndo_get_stats = s2io_get_stats,
7632 .ndo_start_xmit = s2io_xmit,
7633 .ndo_validate_addr = eth_validate_addr,
7634 .ndo_set_rx_mode = s2io_set_multicast,
7635 .ndo_do_ioctl = s2io_ioctl,
7636 .ndo_set_mac_address = s2io_set_mac_addr,
7637 .ndo_change_mtu = s2io_change_mtu,
7638 .ndo_set_features = s2io_set_features,
7639 .ndo_tx_timeout = s2io_tx_watchdog,
7640#ifdef CONFIG_NET_POLL_CONTROLLER
7641 .ndo_poll_controller = s2io_netpoll,
7642#endif
7643};
7644
7645
7646
7647
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659static int
7660s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7661{
7662 struct s2io_nic *sp;
7663 struct net_device *dev;
7664 int i, j, ret;
7665 int dma_flag = false;
7666 u32 mac_up, mac_down;
7667 u64 val64 = 0, tmp64 = 0;
7668 struct XENA_dev_config __iomem *bar0 = NULL;
7669 u16 subid;
7670 struct config_param *config;
7671 struct mac_info *mac_control;
7672 int mode;
7673 u8 dev_intr_type = intr_type;
7674 u8 dev_multiq = 0;
7675
7676 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7677 if (ret)
7678 return ret;
7679
7680 ret = pci_enable_device(pdev);
7681 if (ret) {
7682 DBG_PRINT(ERR_DBG,
7683 "%s: pci_enable_device failed\n", __func__);
7684 return ret;
7685 }
7686
7687 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7688 DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7689 dma_flag = true;
7690 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7691 DBG_PRINT(ERR_DBG,
7692 "Unable to obtain 64bit DMA "
7693 "for consistent allocations\n");
7694 pci_disable_device(pdev);
7695 return -ENOMEM;
7696 }
7697 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7698 DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7699 } else {
7700 pci_disable_device(pdev);
7701 return -ENOMEM;
7702 }
7703 ret = pci_request_regions(pdev, s2io_driver_name);
7704 if (ret) {
7705 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7706 __func__, ret);
7707 pci_disable_device(pdev);
7708 return -ENODEV;
7709 }
7710 if (dev_multiq)
7711 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7712 else
7713 dev = alloc_etherdev(sizeof(struct s2io_nic));
7714 if (dev == NULL) {
7715 pci_disable_device(pdev);
7716 pci_release_regions(pdev);
7717 return -ENODEV;
7718 }
7719
7720 pci_set_master(pdev);
7721 pci_set_drvdata(pdev, dev);
7722 SET_NETDEV_DEV(dev, &pdev->dev);
7723
7724
7725 sp = netdev_priv(dev);
7726 sp->dev = dev;
7727 sp->pdev = pdev;
7728 sp->high_dma_flag = dma_flag;
7729 sp->device_enabled_once = false;
7730 if (rx_ring_mode == 1)
7731 sp->rxd_mode = RXD_MODE_1;
7732 if (rx_ring_mode == 2)
7733 sp->rxd_mode = RXD_MODE_3B;
7734
7735 sp->config.intr_type = dev_intr_type;
7736
7737 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7738 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7739 sp->device_type = XFRAME_II_DEVICE;
7740 else
7741 sp->device_type = XFRAME_I_DEVICE;
7742
7743
7744
7745 s2io_init_pci(sp);
7746
7747
7748
7749
7750
7751
7752
7753
7754 config = &sp->config;
7755 mac_control = &sp->mac_control;
7756
7757 config->napi = napi;
7758 config->tx_steering_type = tx_steering_type;
7759
7760
7761 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7762 config->tx_fifo_num = MAX_TX_FIFOS;
7763 else
7764 config->tx_fifo_num = tx_fifo_num;
7765
7766
7767 if (config->tx_fifo_num < 5) {
7768 if (config->tx_fifo_num == 1)
7769 sp->total_tcp_fifos = 1;
7770 else
7771 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7772 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7773 sp->total_udp_fifos = 1;
7774 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7775 } else {
7776 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7777 FIFO_OTHER_MAX_NUM);
7778 sp->udp_fifo_idx = sp->total_tcp_fifos;
7779 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7780 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7781 }
7782
7783 config->multiq = dev_multiq;
7784 for (i = 0; i < config->tx_fifo_num; i++) {
7785 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7786
7787 tx_cfg->fifo_len = tx_fifo_len[i];
7788 tx_cfg->fifo_priority = i;
7789 }
7790
7791
7792 for (i = 0; i < MAX_TX_FIFOS; i++)
7793 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7794
7795
7796 for (i = 0; i < config->tx_fifo_num; i++)
7797 sp->fifo_selector[i] = fifo_selector[i];
7798
7799
7800 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7801 for (i = 0; i < config->tx_fifo_num; i++) {
7802 struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7803
7804 tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7805 if (tx_cfg->fifo_len < 65) {
7806 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7807 break;
7808 }
7809 }
7810
7811 config->max_txds = MAX_SKB_FRAGS + 2;
7812
7813
7814 config->rx_ring_num = rx_ring_num;
7815 for (i = 0; i < config->rx_ring_num; i++) {
7816 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7817 struct ring_info *ring = &mac_control->rings[i];
7818
7819 rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7820 rx_cfg->ring_priority = i;
7821 ring->rx_bufs_left = 0;
7822 ring->rxd_mode = sp->rxd_mode;
7823 ring->rxd_count = rxd_count[sp->rxd_mode];
7824 ring->pdev = sp->pdev;
7825 ring->dev = sp->dev;
7826 }
7827
7828 for (i = 0; i < rx_ring_num; i++) {
7829 struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7830
7831 rx_cfg->ring_org = RING_ORG_BUFF1;
7832 rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7833 }
7834
7835
7836 mac_control->rmac_pause_time = rmac_pause_time;
7837 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7838 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7839
7840
7841
7842 if (init_shared_mem(sp)) {
7843 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7844 ret = -ENOMEM;
7845 goto mem_alloc_failed;
7846 }
7847
7848 sp->bar0 = pci_ioremap_bar(pdev, 0);
7849 if (!sp->bar0) {
7850 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7851 dev->name);
7852 ret = -ENOMEM;
7853 goto bar0_remap_failed;
7854 }
7855
7856 sp->bar1 = pci_ioremap_bar(pdev, 2);
7857 if (!sp->bar1) {
7858 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7859 dev->name);
7860 ret = -ENOMEM;
7861 goto bar1_remap_failed;
7862 }
7863
7864
7865 for (j = 0; j < MAX_TX_FIFOS; j++) {
7866 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7867 }
7868
7869
7870 dev->netdev_ops = &s2io_netdev_ops;
7871 dev->ethtool_ops = &netdev_ethtool_ops;
7872 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7873 NETIF_F_TSO | NETIF_F_TSO6 |
7874 NETIF_F_RXCSUM | NETIF_F_LRO;
7875 dev->features |= dev->hw_features |
7876 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7877 if (sp->high_dma_flag == true)
7878 dev->features |= NETIF_F_HIGHDMA;
7879 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7880 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7881 INIT_WORK(&sp->set_link_task, s2io_set_link);
7882
7883 pci_save_state(sp->pdev);
7884
7885
7886 if (s2io_set_swapper(sp)) {
7887 DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7888 dev->name);
7889 ret = -EAGAIN;
7890 goto set_swap_failed;
7891 }
7892
7893
7894 if (sp->device_type & XFRAME_II_DEVICE) {
7895 mode = s2io_verify_pci_mode(sp);
7896 if (mode < 0) {
7897 DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7898 __func__);
7899 ret = -EBADSLT;
7900 goto set_swap_failed;
7901 }
7902 }
7903
7904 if (sp->config.intr_type == MSI_X) {
7905 sp->num_entries = config->rx_ring_num + 1;
7906 ret = s2io_enable_msi_x(sp);
7907
7908 if (!ret) {
7909 ret = s2io_test_msi(sp);
7910
7911 remove_msix_isr(sp);
7912 }
7913 if (ret) {
7914
7915 DBG_PRINT(ERR_DBG,
7916 "MSI-X requested but failed to enable\n");
7917 sp->config.intr_type = INTA;
7918 }
7919 }
7920
7921 if (config->intr_type == MSI_X) {
7922 for (i = 0; i < config->rx_ring_num ; i++) {
7923 struct ring_info *ring = &mac_control->rings[i];
7924
7925 netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7926 }
7927 } else {
7928 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7929 }
7930
7931
7932 if (sp->device_type & XFRAME_I_DEVICE) {
7933
7934
7935
7936
7937 fix_mac_address(sp);
7938 s2io_reset(sp);
7939 }
7940
7941
7942
7943
7944
7945 bar0 = sp->bar0;
7946 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7947 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7948 writeq(val64, &bar0->rmac_addr_cmd_mem);
7949 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7950 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7951 S2IO_BIT_RESET);
7952 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7953 mac_down = (u32)tmp64;
7954 mac_up = (u32) (tmp64 >> 32);
7955
7956 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7957 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7958 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7959 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7960 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7961 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7962
7963
7964 dev->addr_len = ETH_ALEN;
7965 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7966
7967
7968 if (sp->device_type == XFRAME_I_DEVICE) {
7969 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7970 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7971 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7972 } else if (sp->device_type == XFRAME_II_DEVICE) {
7973 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7974 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7975 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7976 }
7977
7978
7979 dev->min_mtu = MIN_MTU;
7980 dev->max_mtu = S2IO_JUMBO_SIZE;
7981
7982
7983 do_s2io_store_unicast_mc(sp);
7984
7985
7986 if ((sp->device_type == XFRAME_II_DEVICE) &&
7987 (config->intr_type == MSI_X))
7988 sp->num_entries = config->rx_ring_num + 1;
7989
7990
7991 store_xmsi_data(sp);
7992
7993 s2io_reset(sp);
7994
7995
7996
7997
7998
7999 sp->state = 0;
8000
8001
8002 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8003 struct fifo_info *fifo = &mac_control->fifos[i];
8004
8005 spin_lock_init(&fifo->tx_lock);
8006 }
8007
8008
8009
8010
8011
8012 subid = sp->pdev->subsystem_device;
8013 if ((subid & 0xFF) >= 0x07) {
8014 val64 = readq(&bar0->gpio_control);
8015 val64 |= 0x0000800000000000ULL;
8016 writeq(val64, &bar0->gpio_control);
8017 val64 = 0x0411040400000000ULL;
8018 writeq(val64, (void __iomem *)bar0 + 0x2700);
8019 val64 = readq(&bar0->gpio_control);
8020 }
8021
8022 sp->rx_csum = 1;
8023
8024 if (register_netdev(dev)) {
8025 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8026 ret = -ENODEV;
8027 goto register_failed;
8028 }
8029 s2io_vpd_read(sp);
8030 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8031 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8032 sp->product_name, pdev->revision);
8033 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8034 s2io_driver_version);
8035 DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8036 DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8037 if (sp->device_type & XFRAME_II_DEVICE) {
8038 mode = s2io_print_pci_mode(sp);
8039 if (mode < 0) {
8040 ret = -EBADSLT;
8041 unregister_netdev(dev);
8042 goto set_swap_failed;
8043 }
8044 }
8045 switch (sp->rxd_mode) {
8046 case RXD_MODE_1:
8047 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8048 dev->name);
8049 break;
8050 case RXD_MODE_3B:
8051 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8052 dev->name);
8053 break;
8054 }
8055
8056 switch (sp->config.napi) {
8057 case 0:
8058 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8059 break;
8060 case 1:
8061 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8062 break;
8063 }
8064
8065 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8066 sp->config.tx_fifo_num);
8067
8068 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8069 sp->config.rx_ring_num);
8070
8071 switch (sp->config.intr_type) {
8072 case INTA:
8073 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8074 break;
8075 case MSI_X:
8076 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8077 break;
8078 }
8079 if (sp->config.multiq) {
8080 for (i = 0; i < sp->config.tx_fifo_num; i++) {
8081 struct fifo_info *fifo = &mac_control->fifos[i];
8082
8083 fifo->multiq = config->multiq;
8084 }
8085 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8086 dev->name);
8087 } else
8088 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8089 dev->name);
8090
8091 switch (sp->config.tx_steering_type) {
8092 case NO_STEERING:
8093 DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8094 dev->name);
8095 break;
8096 case TX_PRIORITY_STEERING:
8097 DBG_PRINT(ERR_DBG,
8098 "%s: Priority steering enabled for transmit\n",
8099 dev->name);
8100 break;
8101 case TX_DEFAULT_STEERING:
8102 DBG_PRINT(ERR_DBG,
8103 "%s: Default steering enabled for transmit\n",
8104 dev->name);
8105 }
8106
8107 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8108 dev->name);
8109
8110 snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8111 sp->product_name);
8112
8113 if (vlan_tag_strip)
8114 sp->vlan_strip_flag = 1;
8115 else
8116 sp->vlan_strip_flag = 0;
8117
8118
8119
8120
8121
8122
8123 netif_carrier_off(dev);
8124
8125 return 0;
8126
8127register_failed:
8128set_swap_failed:
8129 iounmap(sp->bar1);
8130bar1_remap_failed:
8131 iounmap(sp->bar0);
8132bar0_remap_failed:
8133mem_alloc_failed:
8134 free_shared_mem(sp);
8135 pci_disable_device(pdev);
8136 pci_release_regions(pdev);
8137 free_netdev(dev);
8138
8139 return ret;
8140}
8141
8142
8143
8144
8145
8146
8147
8148
8149
8150
8151static void s2io_rem_nic(struct pci_dev *pdev)
8152{
8153 struct net_device *dev = pci_get_drvdata(pdev);
8154 struct s2io_nic *sp;
8155
8156 if (dev == NULL) {
8157 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8158 return;
8159 }
8160
8161 sp = netdev_priv(dev);
8162
8163 cancel_work_sync(&sp->rst_timer_task);
8164 cancel_work_sync(&sp->set_link_task);
8165
8166 unregister_netdev(dev);
8167
8168 free_shared_mem(sp);
8169 iounmap(sp->bar0);
8170 iounmap(sp->bar1);
8171 pci_release_regions(pdev);
8172 free_netdev(dev);
8173 pci_disable_device(pdev);
8174}
8175
8176module_pci_driver(s2io_driver);
8177
8178static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8179 struct tcphdr **tcp, struct RxD_t *rxdp,
8180 struct s2io_nic *sp)
8181{
8182 int ip_off;
8183 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8184
8185 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8186 DBG_PRINT(INIT_DBG,
8187 "%s: Non-TCP frames not supported for LRO\n",
8188 __func__);
8189 return -1;
8190 }
8191
8192
8193 if ((l2_type == 0) || (l2_type == 4)) {
8194 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8195
8196
8197
8198
8199 if ((!sp->vlan_strip_flag) &&
8200 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8201 ip_off += HEADER_VLAN_SIZE;
8202 } else {
8203
8204 return -1;
8205 }
8206
8207 *ip = (struct iphdr *)(buffer + ip_off);
8208 ip_len = (u8)((*ip)->ihl);
8209 ip_len <<= 2;
8210 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8211
8212 return 0;
8213}
8214
8215static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8216 struct tcphdr *tcp)
8217{
8218 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8219 if ((lro->iph->saddr != ip->saddr) ||
8220 (lro->iph->daddr != ip->daddr) ||
8221 (lro->tcph->source != tcp->source) ||
8222 (lro->tcph->dest != tcp->dest))
8223 return -1;
8224 return 0;
8225}
8226
8227static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8228{
8229 return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8230}
8231
8232static void initiate_new_session(struct lro *lro, u8 *l2h,
8233 struct iphdr *ip, struct tcphdr *tcp,
8234 u32 tcp_pyld_len, u16 vlan_tag)
8235{
8236 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8237 lro->l2h = l2h;
8238 lro->iph = ip;
8239 lro->tcph = tcp;
8240 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8241 lro->tcp_ack = tcp->ack_seq;
8242 lro->sg_num = 1;
8243 lro->total_len = ntohs(ip->tot_len);
8244 lro->frags_len = 0;
8245 lro->vlan_tag = vlan_tag;
8246
8247
8248
8249
8250 if (tcp->doff == 8) {
8251 __be32 *ptr;
8252 ptr = (__be32 *)(tcp+1);
8253 lro->saw_ts = 1;
8254 lro->cur_tsval = ntohl(*(ptr+1));
8255 lro->cur_tsecr = *(ptr+2);
8256 }
8257 lro->in_use = 1;
8258}
8259
8260static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8261{
8262 struct iphdr *ip = lro->iph;
8263 struct tcphdr *tcp = lro->tcph;
8264 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8265
8266 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8267
8268
8269 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8270 ip->tot_len = htons(lro->total_len);
8271
8272
8273 tcp->ack_seq = lro->tcp_ack;
8274 tcp->window = lro->window;
8275
8276
8277 if (lro->saw_ts) {
8278 __be32 *ptr = (__be32 *)(tcp + 1);
8279 *(ptr+2) = lro->cur_tsecr;
8280 }
8281
8282
8283
8284
8285 swstats->sum_avg_pkts_aggregated += lro->sg_num;
8286 swstats->num_aggregations++;
8287}
8288
8289static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8290 struct tcphdr *tcp, u32 l4_pyld)
8291{
8292 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8293 lro->total_len += l4_pyld;
8294 lro->frags_len += l4_pyld;
8295 lro->tcp_next_seq += l4_pyld;
8296 lro->sg_num++;
8297
8298
8299 lro->tcp_ack = tcp->ack_seq;
8300 lro->window = tcp->window;
8301
8302 if (lro->saw_ts) {
8303 __be32 *ptr;
8304
8305 ptr = (__be32 *)(tcp+1);
8306 lro->cur_tsval = ntohl(*(ptr+1));
8307 lro->cur_tsecr = *(ptr + 2);
8308 }
8309}
8310
8311static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8312 struct tcphdr *tcp, u32 tcp_pyld_len)
8313{
8314 u8 *ptr;
8315
8316 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8317
8318 if (!tcp_pyld_len) {
8319
8320 return -1;
8321 }
8322
8323 if (ip->ihl != 5)
8324 return -1;
8325
8326
8327 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8328 return -1;
8329
8330
8331 if (tcp->urg || tcp->psh || tcp->rst ||
8332 tcp->syn || tcp->fin ||
8333 tcp->ece || tcp->cwr || !tcp->ack) {
8334
8335
8336
8337
8338
8339 return -1;
8340 }
8341
8342
8343
8344
8345
8346 if (tcp->doff != 5 && tcp->doff != 8)
8347 return -1;
8348
8349 if (tcp->doff == 8) {
8350 ptr = (u8 *)(tcp + 1);
8351 while (*ptr == TCPOPT_NOP)
8352 ptr++;
8353 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8354 return -1;
8355
8356
8357 if (l_lro)
8358 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8359 return -1;
8360
8361
8362 if (*((__be32 *)(ptr+6)) == 0)
8363 return -1;
8364 }
8365
8366 return 0;
8367}
8368
8369static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8370 u8 **tcp, u32 *tcp_len, struct lro **lro,
8371 struct RxD_t *rxdp, struct s2io_nic *sp)
8372{
8373 struct iphdr *ip;
8374 struct tcphdr *tcph;
8375 int ret = 0, i;
8376 u16 vlan_tag = 0;
8377 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8378
8379 ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8380 rxdp, sp);
8381 if (ret)
8382 return ret;
8383
8384 DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8385
8386 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8387 tcph = (struct tcphdr *)*tcp;
8388 *tcp_len = get_l4_pyld_length(ip, tcph);
8389 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8390 struct lro *l_lro = &ring_data->lro0_n[i];
8391 if (l_lro->in_use) {
8392 if (check_for_socket_match(l_lro, ip, tcph))
8393 continue;
8394
8395 *lro = l_lro;
8396
8397 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8398 DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8399 "expected 0x%x, actual 0x%x\n",
8400 __func__,
8401 (*lro)->tcp_next_seq,
8402 ntohl(tcph->seq));
8403
8404 swstats->outof_sequence_pkts++;
8405 ret = 2;
8406 break;
8407 }
8408
8409 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8410 *tcp_len))
8411 ret = 1;
8412 else
8413 ret = 2;
8414 break;
8415 }
8416 }
8417
8418 if (ret == 0) {
8419
8420
8421
8422
8423
8424 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8425 return 5;
8426
8427 for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8428 struct lro *l_lro = &ring_data->lro0_n[i];
8429 if (!(l_lro->in_use)) {
8430 *lro = l_lro;
8431 ret = 3;
8432 break;
8433 }
8434 }
8435 }
8436
8437 if (ret == 0) {
8438 DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8439 __func__);
8440 *lro = NULL;
8441 return ret;
8442 }
8443
8444 switch (ret) {
8445 case 3:
8446 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8447 vlan_tag);
8448 break;
8449 case 2:
8450 update_L3L4_header(sp, *lro);
8451 break;
8452 case 1:
8453 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8454 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8455 update_L3L4_header(sp, *lro);
8456 ret = 4;
8457 }
8458 break;
8459 default:
8460 DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8461 break;
8462 }
8463
8464 return ret;
8465}
8466
8467static void clear_lro_session(struct lro *lro)
8468{
8469 static u16 lro_struct_size = sizeof(struct lro);
8470
8471 memset(lro, 0, lro_struct_size);
8472}
8473
8474static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8475{
8476 struct net_device *dev = skb->dev;
8477 struct s2io_nic *sp = netdev_priv(dev);
8478
8479 skb->protocol = eth_type_trans(skb, dev);
8480 if (vlan_tag && sp->vlan_strip_flag)
8481 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8482 if (sp->config.napi)
8483 netif_receive_skb(skb);
8484 else
8485 netif_rx(skb);
8486}
8487
8488static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8489 struct sk_buff *skb, u32 tcp_len)
8490{
8491 struct sk_buff *first = lro->parent;
8492 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8493
8494 first->len += tcp_len;
8495 first->data_len = lro->frags_len;
8496 skb_pull(skb, (skb->len - tcp_len));
8497 if (skb_shinfo(first)->frag_list)
8498 lro->last_frag->next = skb;
8499 else
8500 skb_shinfo(first)->frag_list = skb;
8501 first->truesize += skb->truesize;
8502 lro->last_frag = skb;
8503 swstats->clubbed_frms_cnt++;
8504}
8505
8506
8507
8508
8509
8510
8511
8512
8513
8514static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8515 pci_channel_state_t state)
8516{
8517 struct net_device *netdev = pci_get_drvdata(pdev);
8518 struct s2io_nic *sp = netdev_priv(netdev);
8519
8520 netif_device_detach(netdev);
8521
8522 if (state == pci_channel_io_perm_failure)
8523 return PCI_ERS_RESULT_DISCONNECT;
8524
8525 if (netif_running(netdev)) {
8526
8527 do_s2io_card_down(sp, 0);
8528 }
8529 pci_disable_device(pdev);
8530
8531 return PCI_ERS_RESULT_NEED_RESET;
8532}
8533
8534
8535
8536
8537
8538
8539
8540
8541
8542
8543static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8544{
8545 struct net_device *netdev = pci_get_drvdata(pdev);
8546 struct s2io_nic *sp = netdev_priv(netdev);
8547
8548 if (pci_enable_device(pdev)) {
8549 pr_err("Cannot re-enable PCI device after reset.\n");
8550 return PCI_ERS_RESULT_DISCONNECT;
8551 }
8552
8553 pci_set_master(pdev);
8554 s2io_reset(sp);
8555
8556 return PCI_ERS_RESULT_RECOVERED;
8557}
8558
8559
8560
8561
8562
8563
8564
8565
8566static void s2io_io_resume(struct pci_dev *pdev)
8567{
8568 struct net_device *netdev = pci_get_drvdata(pdev);
8569 struct s2io_nic *sp = netdev_priv(netdev);
8570
8571 if (netif_running(netdev)) {
8572 if (s2io_card_up(sp)) {
8573 pr_err("Can't bring device back up after reset.\n");
8574 return;
8575 }
8576
8577 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8578 s2io_card_down(sp);
8579 pr_err("Can't restore mac addr after reset.\n");
8580 return;
8581 }
8582 }
8583
8584 netif_device_attach(netdev);
8585 netif_tx_wake_all_queues(netdev);
8586}
8587