linux/drivers/net/ethernet/neterion/s2io.c
<<
>>
Prefs
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik          : For pointing out the improper error condition
  15 *                        check in the s2io_xmit routine and also some
  16 *                        issues in the Tx watch dog function. Also for
  17 *                        patiently answering all those innumerable
  18 *                        questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
  20 *                        macros available only in 2.6 Kernel.
  21 * Francois Romieu      : For pointing out all code part that were
  22 *                        deprecated and also styling related comments.
  23 * Grant Grundler       : For helping me get rid of some Architecture
  24 *                        dependent code.
  25 * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *              values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
  45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  46 *                 Possible values '1' for enable , '0' for disable.
  47 *                 Default is '2' - which means disable in promisc mode
  48 *                 and enable in non-promiscuous mode.
  49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  50 *      Possible values '1' for enable and '0' for disable. Default is '0'
  51 ************************************************************************/
  52
  53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54
  55#include <linux/module.h>
  56#include <linux/types.h>
  57#include <linux/errno.h>
  58#include <linux/ioport.h>
  59#include <linux/pci.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/kernel.h>
  62#include <linux/netdevice.h>
  63#include <linux/etherdevice.h>
  64#include <linux/mdio.h>
  65#include <linux/skbuff.h>
  66#include <linux/init.h>
  67#include <linux/delay.h>
  68#include <linux/stddef.h>
  69#include <linux/ioctl.h>
  70#include <linux/timex.h>
  71#include <linux/ethtool.h>
  72#include <linux/workqueue.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/uaccess.h>
  77#include <linux/io.h>
  78#include <linux/slab.h>
  79#include <linux/prefetch.h>
  80#include <net/tcp.h>
  81#include <net/checksum.h>
  82
  83#include <asm/div64.h>
  84#include <asm/irq.h>
  85
  86/* local include */
  87#include "s2io.h"
  88#include "s2io-regs.h"
  89
  90#define DRV_VERSION "2.0.26.28"
  91
  92/* S2io Driver name & version. */
  93static const char s2io_driver_name[] = "Neterion";
  94static const char s2io_driver_version[] = DRV_VERSION;
  95
  96static const int rxd_size[2] = {32, 48};
  97static const int rxd_count[2] = {127, 85};
  98
  99static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 100{
 101        int ret;
 102
 103        ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 104               (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 105
 106        return ret;
 107}
 108
 109/*
 110 * Cards with following subsystem_id have a link state indication
 111 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 112 * macro below identifies these cards given the subsystem_id.
 113 */
 114#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)              \
 115        (dev_type == XFRAME_I_DEVICE) ?                                 \
 116        ((((subid >= 0x600B) && (subid <= 0x600D)) ||                   \
 117          ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 118
 119#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 120                                      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 121
 122static inline int is_s2io_card_up(const struct s2io_nic *sp)
 123{
 124        return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 125}
 126
 127/* Ethtool related variables and Macros. */
 128static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 129        "Register test\t(offline)",
 130        "Eeprom test\t(offline)",
 131        "Link test\t(online)",
 132        "RLDRAM test\t(offline)",
 133        "BIST Test\t(offline)"
 134};
 135
 136static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 137        {"tmac_frms"},
 138        {"tmac_data_octets"},
 139        {"tmac_drop_frms"},
 140        {"tmac_mcst_frms"},
 141        {"tmac_bcst_frms"},
 142        {"tmac_pause_ctrl_frms"},
 143        {"tmac_ttl_octets"},
 144        {"tmac_ucst_frms"},
 145        {"tmac_nucst_frms"},
 146        {"tmac_any_err_frms"},
 147        {"tmac_ttl_less_fb_octets"},
 148        {"tmac_vld_ip_octets"},
 149        {"tmac_vld_ip"},
 150        {"tmac_drop_ip"},
 151        {"tmac_icmp"},
 152        {"tmac_rst_tcp"},
 153        {"tmac_tcp"},
 154        {"tmac_udp"},
 155        {"rmac_vld_frms"},
 156        {"rmac_data_octets"},
 157        {"rmac_fcs_err_frms"},
 158        {"rmac_drop_frms"},
 159        {"rmac_vld_mcst_frms"},
 160        {"rmac_vld_bcst_frms"},
 161        {"rmac_in_rng_len_err_frms"},
 162        {"rmac_out_rng_len_err_frms"},
 163        {"rmac_long_frms"},
 164        {"rmac_pause_ctrl_frms"},
 165        {"rmac_unsup_ctrl_frms"},
 166        {"rmac_ttl_octets"},
 167        {"rmac_accepted_ucst_frms"},
 168        {"rmac_accepted_nucst_frms"},
 169        {"rmac_discarded_frms"},
 170        {"rmac_drop_events"},
 171        {"rmac_ttl_less_fb_octets"},
 172        {"rmac_ttl_frms"},
 173        {"rmac_usized_frms"},
 174        {"rmac_osized_frms"},
 175        {"rmac_frag_frms"},
 176        {"rmac_jabber_frms"},
 177        {"rmac_ttl_64_frms"},
 178        {"rmac_ttl_65_127_frms"},
 179        {"rmac_ttl_128_255_frms"},
 180        {"rmac_ttl_256_511_frms"},
 181        {"rmac_ttl_512_1023_frms"},
 182        {"rmac_ttl_1024_1518_frms"},
 183        {"rmac_ip"},
 184        {"rmac_ip_octets"},
 185        {"rmac_hdr_err_ip"},
 186        {"rmac_drop_ip"},
 187        {"rmac_icmp"},
 188        {"rmac_tcp"},
 189        {"rmac_udp"},
 190        {"rmac_err_drp_udp"},
 191        {"rmac_xgmii_err_sym"},
 192        {"rmac_frms_q0"},
 193        {"rmac_frms_q1"},
 194        {"rmac_frms_q2"},
 195        {"rmac_frms_q3"},
 196        {"rmac_frms_q4"},
 197        {"rmac_frms_q5"},
 198        {"rmac_frms_q6"},
 199        {"rmac_frms_q7"},
 200        {"rmac_full_q0"},
 201        {"rmac_full_q1"},
 202        {"rmac_full_q2"},
 203        {"rmac_full_q3"},
 204        {"rmac_full_q4"},
 205        {"rmac_full_q5"},
 206        {"rmac_full_q6"},
 207        {"rmac_full_q7"},
 208        {"rmac_pause_cnt"},
 209        {"rmac_xgmii_data_err_cnt"},
 210        {"rmac_xgmii_ctrl_err_cnt"},
 211        {"rmac_accepted_ip"},
 212        {"rmac_err_tcp"},
 213        {"rd_req_cnt"},
 214        {"new_rd_req_cnt"},
 215        {"new_rd_req_rtry_cnt"},
 216        {"rd_rtry_cnt"},
 217        {"wr_rtry_rd_ack_cnt"},
 218        {"wr_req_cnt"},
 219        {"new_wr_req_cnt"},
 220        {"new_wr_req_rtry_cnt"},
 221        {"wr_rtry_cnt"},
 222        {"wr_disc_cnt"},
 223        {"rd_rtry_wr_ack_cnt"},
 224        {"txp_wr_cnt"},
 225        {"txd_rd_cnt"},
 226        {"txd_wr_cnt"},
 227        {"rxd_rd_cnt"},
 228        {"rxd_wr_cnt"},
 229        {"txf_rd_cnt"},
 230        {"rxf_wr_cnt"}
 231};
 232
 233static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 234        {"rmac_ttl_1519_4095_frms"},
 235        {"rmac_ttl_4096_8191_frms"},
 236        {"rmac_ttl_8192_max_frms"},
 237        {"rmac_ttl_gt_max_frms"},
 238        {"rmac_osized_alt_frms"},
 239        {"rmac_jabber_alt_frms"},
 240        {"rmac_gt_max_alt_frms"},
 241        {"rmac_vlan_frms"},
 242        {"rmac_len_discard"},
 243        {"rmac_fcs_discard"},
 244        {"rmac_pf_discard"},
 245        {"rmac_da_discard"},
 246        {"rmac_red_discard"},
 247        {"rmac_rts_discard"},
 248        {"rmac_ingm_full_discard"},
 249        {"link_fault_cnt"}
 250};
 251
 252static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 253        {"\n DRIVER STATISTICS"},
 254        {"single_bit_ecc_errs"},
 255        {"double_bit_ecc_errs"},
 256        {"parity_err_cnt"},
 257        {"serious_err_cnt"},
 258        {"soft_reset_cnt"},
 259        {"fifo_full_cnt"},
 260        {"ring_0_full_cnt"},
 261        {"ring_1_full_cnt"},
 262        {"ring_2_full_cnt"},
 263        {"ring_3_full_cnt"},
 264        {"ring_4_full_cnt"},
 265        {"ring_5_full_cnt"},
 266        {"ring_6_full_cnt"},
 267        {"ring_7_full_cnt"},
 268        {"alarm_transceiver_temp_high"},
 269        {"alarm_transceiver_temp_low"},
 270        {"alarm_laser_bias_current_high"},
 271        {"alarm_laser_bias_current_low"},
 272        {"alarm_laser_output_power_high"},
 273        {"alarm_laser_output_power_low"},
 274        {"warn_transceiver_temp_high"},
 275        {"warn_transceiver_temp_low"},
 276        {"warn_laser_bias_current_high"},
 277        {"warn_laser_bias_current_low"},
 278        {"warn_laser_output_power_high"},
 279        {"warn_laser_output_power_low"},
 280        {"lro_aggregated_pkts"},
 281        {"lro_flush_both_count"},
 282        {"lro_out_of_sequence_pkts"},
 283        {"lro_flush_due_to_max_pkts"},
 284        {"lro_avg_aggr_pkts"},
 285        {"mem_alloc_fail_cnt"},
 286        {"pci_map_fail_cnt"},
 287        {"watchdog_timer_cnt"},
 288        {"mem_allocated"},
 289        {"mem_freed"},
 290        {"link_up_cnt"},
 291        {"link_down_cnt"},
 292        {"link_up_time"},
 293        {"link_down_time"},
 294        {"tx_tcode_buf_abort_cnt"},
 295        {"tx_tcode_desc_abort_cnt"},
 296        {"tx_tcode_parity_err_cnt"},
 297        {"tx_tcode_link_loss_cnt"},
 298        {"tx_tcode_list_proc_err_cnt"},
 299        {"rx_tcode_parity_err_cnt"},
 300        {"rx_tcode_abort_cnt"},
 301        {"rx_tcode_parity_abort_cnt"},
 302        {"rx_tcode_rda_fail_cnt"},
 303        {"rx_tcode_unkn_prot_cnt"},
 304        {"rx_tcode_fcs_err_cnt"},
 305        {"rx_tcode_buf_size_err_cnt"},
 306        {"rx_tcode_rxd_corrupt_cnt"},
 307        {"rx_tcode_unkn_err_cnt"},
 308        {"tda_err_cnt"},
 309        {"pfc_err_cnt"},
 310        {"pcc_err_cnt"},
 311        {"tti_err_cnt"},
 312        {"tpa_err_cnt"},
 313        {"sm_err_cnt"},
 314        {"lso_err_cnt"},
 315        {"mac_tmac_err_cnt"},
 316        {"mac_rmac_err_cnt"},
 317        {"xgxs_txgxs_err_cnt"},
 318        {"xgxs_rxgxs_err_cnt"},
 319        {"rc_err_cnt"},
 320        {"prc_pcix_err_cnt"},
 321        {"rpa_err_cnt"},
 322        {"rda_err_cnt"},
 323        {"rti_err_cnt"},
 324        {"mc_err_cnt"}
 325};
 326
 327#define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
 328#define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
 329#define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
 330
 331#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 332#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 333
 334#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 335#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 336
 337#define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
 338#define S2IO_STRINGS_LEN        (S2IO_TEST_LEN * ETH_GSTRING_LEN)
 339
 340/* copy mac addr to def_mac_addr array */
 341static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 342{
 343        sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 344        sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 345        sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 346        sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 347        sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 348        sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 349}
 350
 351/*
 352 * Constants to be programmed into the Xena's registers, to configure
 353 * the XAUI.
 354 */
 355
 356#define END_SIGN        0x0
 357static const u64 herc_act_dtx_cfg[] = {
 358        /* Set address */
 359        0x8000051536750000ULL, 0x80000515367500E0ULL,
 360        /* Write data */
 361        0x8000051536750004ULL, 0x80000515367500E4ULL,
 362        /* Set address */
 363        0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 364        /* Write data */
 365        0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 366        /* Set address */
 367        0x801205150D440000ULL, 0x801205150D4400E0ULL,
 368        /* Write data */
 369        0x801205150D440004ULL, 0x801205150D4400E4ULL,
 370        /* Set address */
 371        0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 372        /* Write data */
 373        0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 374        /* Done */
 375        END_SIGN
 376};
 377
 378static const u64 xena_dtx_cfg[] = {
 379        /* Set address */
 380        0x8000051500000000ULL, 0x80000515000000E0ULL,
 381        /* Write data */
 382        0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 383        /* Set address */
 384        0x8001051500000000ULL, 0x80010515000000E0ULL,
 385        /* Write data */
 386        0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 387        /* Set address */
 388        0x8002051500000000ULL, 0x80020515000000E0ULL,
 389        /* Write data */
 390        0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 391        END_SIGN
 392};
 393
 394/*
 395 * Constants for Fixing the MacAddress problem seen mostly on
 396 * Alpha machines.
 397 */
 398static const u64 fix_mac[] = {
 399        0x0060000000000000ULL, 0x0060600000000000ULL,
 400        0x0040600000000000ULL, 0x0000600000000000ULL,
 401        0x0020600000000000ULL, 0x0060600000000000ULL,
 402        0x0020600000000000ULL, 0x0060600000000000ULL,
 403        0x0020600000000000ULL, 0x0060600000000000ULL,
 404        0x0020600000000000ULL, 0x0060600000000000ULL,
 405        0x0020600000000000ULL, 0x0060600000000000ULL,
 406        0x0020600000000000ULL, 0x0060600000000000ULL,
 407        0x0020600000000000ULL, 0x0060600000000000ULL,
 408        0x0020600000000000ULL, 0x0060600000000000ULL,
 409        0x0020600000000000ULL, 0x0060600000000000ULL,
 410        0x0020600000000000ULL, 0x0060600000000000ULL,
 411        0x0020600000000000ULL, 0x0000600000000000ULL,
 412        0x0040600000000000ULL, 0x0060600000000000ULL,
 413        END_SIGN
 414};
 415
 416MODULE_LICENSE("GPL");
 417MODULE_VERSION(DRV_VERSION);
 418
 419
 420/* Module Loadable parameters. */
 421S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 422S2IO_PARM_INT(rx_ring_num, 1);
 423S2IO_PARM_INT(multiq, 0);
 424S2IO_PARM_INT(rx_ring_mode, 1);
 425S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 426S2IO_PARM_INT(rmac_pause_time, 0x100);
 427S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 428S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 429S2IO_PARM_INT(shared_splits, 0);
 430S2IO_PARM_INT(tmac_util_period, 5);
 431S2IO_PARM_INT(rmac_util_period, 5);
 432S2IO_PARM_INT(l3l4hdr_size, 128);
 433/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 434S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 435/* Frequency of Rx desc syncs expressed as power of 2 */
 436S2IO_PARM_INT(rxsync_frequency, 3);
 437/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 438S2IO_PARM_INT(intr_type, 2);
 439/* Large receive offload feature */
 440
 441/* Max pkts to be aggregated by LRO at one time. If not specified,
 442 * aggregation happens until we hit max IP pkt size(64K)
 443 */
 444S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 445S2IO_PARM_INT(indicate_max_pkts, 0);
 446
 447S2IO_PARM_INT(napi, 1);
 448S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 449
 450static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 451{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 452static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 453{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 454static unsigned int rts_frm_len[MAX_RX_RINGS] =
 455{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 456
 457module_param_array(tx_fifo_len, uint, NULL, 0);
 458module_param_array(rx_ring_sz, uint, NULL, 0);
 459module_param_array(rts_frm_len, uint, NULL, 0);
 460
 461/*
 462 * S2IO device table.
 463 * This table lists all the devices that this driver supports.
 464 */
 465static const struct pci_device_id s2io_tbl[] = {
 466        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 467         PCI_ANY_ID, PCI_ANY_ID},
 468        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 469         PCI_ANY_ID, PCI_ANY_ID},
 470        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 471         PCI_ANY_ID, PCI_ANY_ID},
 472        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 473         PCI_ANY_ID, PCI_ANY_ID},
 474        {0,}
 475};
 476
 477MODULE_DEVICE_TABLE(pci, s2io_tbl);
 478
 479static const struct pci_error_handlers s2io_err_handler = {
 480        .error_detected = s2io_io_error_detected,
 481        .slot_reset = s2io_io_slot_reset,
 482        .resume = s2io_io_resume,
 483};
 484
 485static struct pci_driver s2io_driver = {
 486        .name = "S2IO",
 487        .id_table = s2io_tbl,
 488        .probe = s2io_init_nic,
 489        .remove = s2io_rem_nic,
 490        .err_handler = &s2io_err_handler,
 491};
 492
 493/* A simplifier macro used both by init and free shared_mem Fns(). */
 494#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
 495
 496/* netqueue manipulation helper functions */
 497static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 498{
 499        if (!sp->config.multiq) {
 500                int i;
 501
 502                for (i = 0; i < sp->config.tx_fifo_num; i++)
 503                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 504        }
 505        netif_tx_stop_all_queues(sp->dev);
 506}
 507
 508static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 509{
 510        if (!sp->config.multiq)
 511                sp->mac_control.fifos[fifo_no].queue_state =
 512                        FIFO_QUEUE_STOP;
 513
 514        netif_tx_stop_all_queues(sp->dev);
 515}
 516
 517static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 518{
 519        if (!sp->config.multiq) {
 520                int i;
 521
 522                for (i = 0; i < sp->config.tx_fifo_num; i++)
 523                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 524        }
 525        netif_tx_start_all_queues(sp->dev);
 526}
 527
 528static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 529{
 530        if (!sp->config.multiq) {
 531                int i;
 532
 533                for (i = 0; i < sp->config.tx_fifo_num; i++)
 534                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 535        }
 536        netif_tx_wake_all_queues(sp->dev);
 537}
 538
 539static inline void s2io_wake_tx_queue(
 540        struct fifo_info *fifo, int cnt, u8 multiq)
 541{
 542
 543        if (multiq) {
 544                if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 545                        netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 546        } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 547                if (netif_queue_stopped(fifo->dev)) {
 548                        fifo->queue_state = FIFO_QUEUE_START;
 549                        netif_wake_queue(fifo->dev);
 550                }
 551        }
 552}
 553
 554/**
 555 * init_shared_mem - Allocation and Initialization of Memory
 556 * @nic: Device private variable.
 557 * Description: The function allocates all the memory areas shared
 558 * between the NIC and the driver. This includes Tx descriptors,
 559 * Rx descriptors and the statistics block.
 560 */
 561
 562static int init_shared_mem(struct s2io_nic *nic)
 563{
 564        u32 size;
 565        void *tmp_v_addr, *tmp_v_addr_next;
 566        dma_addr_t tmp_p_addr, tmp_p_addr_next;
 567        struct RxD_block *pre_rxd_blk = NULL;
 568        int i, j, blk_cnt;
 569        int lst_size, lst_per_page;
 570        struct net_device *dev = nic->dev;
 571        unsigned long tmp;
 572        struct buffAdd *ba;
 573        struct config_param *config = &nic->config;
 574        struct mac_info *mac_control = &nic->mac_control;
 575        unsigned long long mem_allocated = 0;
 576
 577        /* Allocation and initialization of TXDLs in FIFOs */
 578        size = 0;
 579        for (i = 0; i < config->tx_fifo_num; i++) {
 580                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 581
 582                size += tx_cfg->fifo_len;
 583        }
 584        if (size > MAX_AVAILABLE_TXDS) {
 585                DBG_PRINT(ERR_DBG,
 586                          "Too many TxDs requested: %d, max supported: %d\n",
 587                          size, MAX_AVAILABLE_TXDS);
 588                return -EINVAL;
 589        }
 590
 591        size = 0;
 592        for (i = 0; i < config->tx_fifo_num; i++) {
 593                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 594
 595                size = tx_cfg->fifo_len;
 596                /*
 597                 * Legal values are from 2 to 8192
 598                 */
 599                if (size < 2) {
 600                        DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 601                                  "Valid lengths are 2 through 8192\n",
 602                                  i, size);
 603                        return -EINVAL;
 604                }
 605        }
 606
 607        lst_size = (sizeof(struct TxD) * config->max_txds);
 608        lst_per_page = PAGE_SIZE / lst_size;
 609
 610        for (i = 0; i < config->tx_fifo_num; i++) {
 611                struct fifo_info *fifo = &mac_control->fifos[i];
 612                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 613                int fifo_len = tx_cfg->fifo_len;
 614                int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 615
 616                fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 617                if (!fifo->list_info) {
 618                        DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 619                        return -ENOMEM;
 620                }
 621                mem_allocated += list_holder_size;
 622        }
 623        for (i = 0; i < config->tx_fifo_num; i++) {
 624                int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 625                                                lst_per_page);
 626                struct fifo_info *fifo = &mac_control->fifos[i];
 627                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 628
 629                fifo->tx_curr_put_info.offset = 0;
 630                fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 631                fifo->tx_curr_get_info.offset = 0;
 632                fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 633                fifo->fifo_no = i;
 634                fifo->nic = nic;
 635                fifo->max_txds = MAX_SKB_FRAGS + 2;
 636                fifo->dev = dev;
 637
 638                for (j = 0; j < page_num; j++) {
 639                        int k = 0;
 640                        dma_addr_t tmp_p;
 641                        void *tmp_v;
 642                        tmp_v = pci_alloc_consistent(nic->pdev,
 643                                                     PAGE_SIZE, &tmp_p);
 644                        if (!tmp_v) {
 645                                DBG_PRINT(INFO_DBG,
 646                                          "pci_alloc_consistent failed for TxDL\n");
 647                                return -ENOMEM;
 648                        }
 649                        /* If we got a zero DMA address(can happen on
 650                         * certain platforms like PPC), reallocate.
 651                         * Store virtual address of page we don't want,
 652                         * to be freed later.
 653                         */
 654                        if (!tmp_p) {
 655                                mac_control->zerodma_virt_addr = tmp_v;
 656                                DBG_PRINT(INIT_DBG,
 657                                          "%s: Zero DMA address for TxDL. "
 658                                          "Virtual address %p\n",
 659                                          dev->name, tmp_v);
 660                                tmp_v = pci_alloc_consistent(nic->pdev,
 661                                                             PAGE_SIZE, &tmp_p);
 662                                if (!tmp_v) {
 663                                        DBG_PRINT(INFO_DBG,
 664                                                  "pci_alloc_consistent failed for TxDL\n");
 665                                        return -ENOMEM;
 666                                }
 667                                mem_allocated += PAGE_SIZE;
 668                        }
 669                        while (k < lst_per_page) {
 670                                int l = (j * lst_per_page) + k;
 671                                if (l == tx_cfg->fifo_len)
 672                                        break;
 673                                fifo->list_info[l].list_virt_addr =
 674                                        tmp_v + (k * lst_size);
 675                                fifo->list_info[l].list_phy_addr =
 676                                        tmp_p + (k * lst_size);
 677                                k++;
 678                        }
 679                }
 680        }
 681
 682        for (i = 0; i < config->tx_fifo_num; i++) {
 683                struct fifo_info *fifo = &mac_control->fifos[i];
 684                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 685
 686                size = tx_cfg->fifo_len;
 687                fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 688                if (!fifo->ufo_in_band_v)
 689                        return -ENOMEM;
 690                mem_allocated += (size * sizeof(u64));
 691        }
 692
 693        /* Allocation and initialization of RXDs in Rings */
 694        size = 0;
 695        for (i = 0; i < config->rx_ring_num; i++) {
 696                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 697                struct ring_info *ring = &mac_control->rings[i];
 698
 699                if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 700                        DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 701                                  "multiple of RxDs per Block\n",
 702                                  dev->name, i);
 703                        return FAILURE;
 704                }
 705                size += rx_cfg->num_rxd;
 706                ring->block_count = rx_cfg->num_rxd /
 707                        (rxd_count[nic->rxd_mode] + 1);
 708                ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 709        }
 710        if (nic->rxd_mode == RXD_MODE_1)
 711                size = (size * (sizeof(struct RxD1)));
 712        else
 713                size = (size * (sizeof(struct RxD3)));
 714
 715        for (i = 0; i < config->rx_ring_num; i++) {
 716                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 717                struct ring_info *ring = &mac_control->rings[i];
 718
 719                ring->rx_curr_get_info.block_index = 0;
 720                ring->rx_curr_get_info.offset = 0;
 721                ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 722                ring->rx_curr_put_info.block_index = 0;
 723                ring->rx_curr_put_info.offset = 0;
 724                ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 725                ring->nic = nic;
 726                ring->ring_no = i;
 727
 728                blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 729                /*  Allocating all the Rx blocks */
 730                for (j = 0; j < blk_cnt; j++) {
 731                        struct rx_block_info *rx_blocks;
 732                        int l;
 733
 734                        rx_blocks = &ring->rx_blocks[j];
 735                        size = SIZE_OF_BLOCK;   /* size is always page size */
 736                        tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
 737                                                          &tmp_p_addr);
 738                        if (tmp_v_addr == NULL) {
 739                                /*
 740                                 * In case of failure, free_shared_mem()
 741                                 * is called, which should free any
 742                                 * memory that was alloced till the
 743                                 * failure happened.
 744                                 */
 745                                rx_blocks->block_virt_addr = tmp_v_addr;
 746                                return -ENOMEM;
 747                        }
 748                        mem_allocated += size;
 749                        memset(tmp_v_addr, 0, size);
 750
 751                        size = sizeof(struct rxd_info) *
 752                                rxd_count[nic->rxd_mode];
 753                        rx_blocks->block_virt_addr = tmp_v_addr;
 754                        rx_blocks->block_dma_addr = tmp_p_addr;
 755                        rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 756                        if (!rx_blocks->rxds)
 757                                return -ENOMEM;
 758                        mem_allocated += size;
 759                        for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 760                                rx_blocks->rxds[l].virt_addr =
 761                                        rx_blocks->block_virt_addr +
 762                                        (rxd_size[nic->rxd_mode] * l);
 763                                rx_blocks->rxds[l].dma_addr =
 764                                        rx_blocks->block_dma_addr +
 765                                        (rxd_size[nic->rxd_mode] * l);
 766                        }
 767                }
 768                /* Interlinking all Rx Blocks */
 769                for (j = 0; j < blk_cnt; j++) {
 770                        int next = (j + 1) % blk_cnt;
 771                        tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 772                        tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 773                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 774                        tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 775
 776                        pre_rxd_blk = tmp_v_addr;
 777                        pre_rxd_blk->reserved_2_pNext_RxD_block =
 778                                (unsigned long)tmp_v_addr_next;
 779                        pre_rxd_blk->pNext_RxD_Blk_physical =
 780                                (u64)tmp_p_addr_next;
 781                }
 782        }
 783        if (nic->rxd_mode == RXD_MODE_3B) {
 784                /*
 785                 * Allocation of Storages for buffer addresses in 2BUFF mode
 786                 * and the buffers as well.
 787                 */
 788                for (i = 0; i < config->rx_ring_num; i++) {
 789                        struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 790                        struct ring_info *ring = &mac_control->rings[i];
 791
 792                        blk_cnt = rx_cfg->num_rxd /
 793                                (rxd_count[nic->rxd_mode] + 1);
 794                        size = sizeof(struct buffAdd *) * blk_cnt;
 795                        ring->ba = kmalloc(size, GFP_KERNEL);
 796                        if (!ring->ba)
 797                                return -ENOMEM;
 798                        mem_allocated += size;
 799                        for (j = 0; j < blk_cnt; j++) {
 800                                int k = 0;
 801
 802                                size = sizeof(struct buffAdd) *
 803                                        (rxd_count[nic->rxd_mode] + 1);
 804                                ring->ba[j] = kmalloc(size, GFP_KERNEL);
 805                                if (!ring->ba[j])
 806                                        return -ENOMEM;
 807                                mem_allocated += size;
 808                                while (k != rxd_count[nic->rxd_mode]) {
 809                                        ba = &ring->ba[j][k];
 810                                        size = BUF0_LEN + ALIGN_SIZE;
 811                                        ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 812                                        if (!ba->ba_0_org)
 813                                                return -ENOMEM;
 814                                        mem_allocated += size;
 815                                        tmp = (unsigned long)ba->ba_0_org;
 816                                        tmp += ALIGN_SIZE;
 817                                        tmp &= ~((unsigned long)ALIGN_SIZE);
 818                                        ba->ba_0 = (void *)tmp;
 819
 820                                        size = BUF1_LEN + ALIGN_SIZE;
 821                                        ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 822                                        if (!ba->ba_1_org)
 823                                                return -ENOMEM;
 824                                        mem_allocated += size;
 825                                        tmp = (unsigned long)ba->ba_1_org;
 826                                        tmp += ALIGN_SIZE;
 827                                        tmp &= ~((unsigned long)ALIGN_SIZE);
 828                                        ba->ba_1 = (void *)tmp;
 829                                        k++;
 830                                }
 831                        }
 832                }
 833        }
 834
 835        /* Allocation and initialization of Statistics block */
 836        size = sizeof(struct stat_block);
 837        mac_control->stats_mem =
 838                pci_alloc_consistent(nic->pdev, size,
 839                                     &mac_control->stats_mem_phy);
 840
 841        if (!mac_control->stats_mem) {
 842                /*
 843                 * In case of failure, free_shared_mem() is called, which
 844                 * should free any memory that was alloced till the
 845                 * failure happened.
 846                 */
 847                return -ENOMEM;
 848        }
 849        mem_allocated += size;
 850        mac_control->stats_mem_sz = size;
 851
 852        tmp_v_addr = mac_control->stats_mem;
 853        mac_control->stats_info = tmp_v_addr;
 854        memset(tmp_v_addr, 0, size);
 855        DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 856                dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 857        mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 858        return SUCCESS;
 859}
 860
 861/**
 862 * free_shared_mem - Free the allocated Memory
 863 * @nic:  Device private variable.
 864 * Description: This function is to free all memory locations allocated by
 865 * the init_shared_mem() function and return it to the kernel.
 866 */
 867
 868static void free_shared_mem(struct s2io_nic *nic)
 869{
 870        int i, j, blk_cnt, size;
 871        void *tmp_v_addr;
 872        dma_addr_t tmp_p_addr;
 873        int lst_size, lst_per_page;
 874        struct net_device *dev;
 875        int page_num = 0;
 876        struct config_param *config;
 877        struct mac_info *mac_control;
 878        struct stat_block *stats;
 879        struct swStat *swstats;
 880
 881        if (!nic)
 882                return;
 883
 884        dev = nic->dev;
 885
 886        config = &nic->config;
 887        mac_control = &nic->mac_control;
 888        stats = mac_control->stats_info;
 889        swstats = &stats->sw_stat;
 890
 891        lst_size = sizeof(struct TxD) * config->max_txds;
 892        lst_per_page = PAGE_SIZE / lst_size;
 893
 894        for (i = 0; i < config->tx_fifo_num; i++) {
 895                struct fifo_info *fifo = &mac_control->fifos[i];
 896                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 897
 898                page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 899                for (j = 0; j < page_num; j++) {
 900                        int mem_blks = (j * lst_per_page);
 901                        struct list_info_hold *fli;
 902
 903                        if (!fifo->list_info)
 904                                return;
 905
 906                        fli = &fifo->list_info[mem_blks];
 907                        if (!fli->list_virt_addr)
 908                                break;
 909                        pci_free_consistent(nic->pdev, PAGE_SIZE,
 910                                            fli->list_virt_addr,
 911                                            fli->list_phy_addr);
 912                        swstats->mem_freed += PAGE_SIZE;
 913                }
 914                /* If we got a zero DMA address during allocation,
 915                 * free the page now
 916                 */
 917                if (mac_control->zerodma_virt_addr) {
 918                        pci_free_consistent(nic->pdev, PAGE_SIZE,
 919                                            mac_control->zerodma_virt_addr,
 920                                            (dma_addr_t)0);
 921                        DBG_PRINT(INIT_DBG,
 922                                  "%s: Freeing TxDL with zero DMA address. "
 923                                  "Virtual address %p\n",
 924                                  dev->name, mac_control->zerodma_virt_addr);
 925                        swstats->mem_freed += PAGE_SIZE;
 926                }
 927                kfree(fifo->list_info);
 928                swstats->mem_freed += tx_cfg->fifo_len *
 929                        sizeof(struct list_info_hold);
 930        }
 931
 932        size = SIZE_OF_BLOCK;
 933        for (i = 0; i < config->rx_ring_num; i++) {
 934                struct ring_info *ring = &mac_control->rings[i];
 935
 936                blk_cnt = ring->block_count;
 937                for (j = 0; j < blk_cnt; j++) {
 938                        tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 939                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 940                        if (tmp_v_addr == NULL)
 941                                break;
 942                        pci_free_consistent(nic->pdev, size,
 943                                            tmp_v_addr, tmp_p_addr);
 944                        swstats->mem_freed += size;
 945                        kfree(ring->rx_blocks[j].rxds);
 946                        swstats->mem_freed += sizeof(struct rxd_info) *
 947                                rxd_count[nic->rxd_mode];
 948                }
 949        }
 950
 951        if (nic->rxd_mode == RXD_MODE_3B) {
 952                /* Freeing buffer storage addresses in 2BUFF mode. */
 953                for (i = 0; i < config->rx_ring_num; i++) {
 954                        struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 955                        struct ring_info *ring = &mac_control->rings[i];
 956
 957                        blk_cnt = rx_cfg->num_rxd /
 958                                (rxd_count[nic->rxd_mode] + 1);
 959                        for (j = 0; j < blk_cnt; j++) {
 960                                int k = 0;
 961                                if (!ring->ba[j])
 962                                        continue;
 963                                while (k != rxd_count[nic->rxd_mode]) {
 964                                        struct buffAdd *ba = &ring->ba[j][k];
 965                                        kfree(ba->ba_0_org);
 966                                        swstats->mem_freed +=
 967                                                BUF0_LEN + ALIGN_SIZE;
 968                                        kfree(ba->ba_1_org);
 969                                        swstats->mem_freed +=
 970                                                BUF1_LEN + ALIGN_SIZE;
 971                                        k++;
 972                                }
 973                                kfree(ring->ba[j]);
 974                                swstats->mem_freed += sizeof(struct buffAdd) *
 975                                        (rxd_count[nic->rxd_mode] + 1);
 976                        }
 977                        kfree(ring->ba);
 978                        swstats->mem_freed += sizeof(struct buffAdd *) *
 979                                blk_cnt;
 980                }
 981        }
 982
 983        for (i = 0; i < nic->config.tx_fifo_num; i++) {
 984                struct fifo_info *fifo = &mac_control->fifos[i];
 985                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 986
 987                if (fifo->ufo_in_band_v) {
 988                        swstats->mem_freed += tx_cfg->fifo_len *
 989                                sizeof(u64);
 990                        kfree(fifo->ufo_in_band_v);
 991                }
 992        }
 993
 994        if (mac_control->stats_mem) {
 995                swstats->mem_freed += mac_control->stats_mem_sz;
 996                pci_free_consistent(nic->pdev,
 997                                    mac_control->stats_mem_sz,
 998                                    mac_control->stats_mem,
 999                                    mac_control->stats_mem_phy);
1000        }
1001}
1002
1003/**
1004 * s2io_verify_pci_mode -
1005 */
1006
1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008{
1009        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010        register u64 val64 = 0;
1011        int     mode;
1012
1013        val64 = readq(&bar0->pci_mode);
1014        mode = (u8)GET_PCI_MODE(val64);
1015
1016        if (val64 & PCI_MODE_UNKNOWN_MODE)
1017                return -1;      /* Unknown PCI mode */
1018        return mode;
1019}
1020
1021#define NEC_VENID   0x1033
1022#define NEC_DEVID   0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025        struct pci_dev *tdev = NULL;
1026        for_each_pci_dev(tdev) {
1027                if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028                        if (tdev->bus == s2io_pdev->bus->parent) {
1029                                pci_dev_put(tdev);
1030                                return 1;
1031                        }
1032                }
1033        }
1034        return 0;
1035}
1036
1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038/**
1039 * s2io_print_pci_mode -
1040 */
1041static int s2io_print_pci_mode(struct s2io_nic *nic)
1042{
1043        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044        register u64 val64 = 0;
1045        int     mode;
1046        struct config_param *config = &nic->config;
1047        const char *pcimode;
1048
1049        val64 = readq(&bar0->pci_mode);
1050        mode = (u8)GET_PCI_MODE(val64);
1051
1052        if (val64 & PCI_MODE_UNKNOWN_MODE)
1053                return -1;      /* Unknown PCI mode */
1054
1055        config->bus_speed = bus_speed[mode];
1056
1057        if (s2io_on_nec_bridge(nic->pdev)) {
1058                DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059                          nic->dev->name);
1060                return mode;
1061        }
1062
1063        switch (mode) {
1064        case PCI_MODE_PCI_33:
1065                pcimode = "33MHz PCI bus";
1066                break;
1067        case PCI_MODE_PCI_66:
1068                pcimode = "66MHz PCI bus";
1069                break;
1070        case PCI_MODE_PCIX_M1_66:
1071                pcimode = "66MHz PCIX(M1) bus";
1072                break;
1073        case PCI_MODE_PCIX_M1_100:
1074                pcimode = "100MHz PCIX(M1) bus";
1075                break;
1076        case PCI_MODE_PCIX_M1_133:
1077                pcimode = "133MHz PCIX(M1) bus";
1078                break;
1079        case PCI_MODE_PCIX_M2_66:
1080                pcimode = "133MHz PCIX(M2) bus";
1081                break;
1082        case PCI_MODE_PCIX_M2_100:
1083                pcimode = "200MHz PCIX(M2) bus";
1084                break;
1085        case PCI_MODE_PCIX_M2_133:
1086                pcimode = "266MHz PCIX(M2) bus";
1087                break;
1088        default:
1089                pcimode = "unsupported bus!";
1090                mode = -1;
1091        }
1092
1093        DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094                  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
1096        return mode;
1097}
1098
1099/**
1100 *  init_tti - Initialization transmit traffic interrupt scheme
1101 *  @nic: device private variable
1102 *  @link: link status (UP/DOWN) used to enable/disable continuous
1103 *  transmit interrupts
1104 *  Description: The function configures transmit traffic interrupts
1105 *  Return Value:  SUCCESS on success and
1106 *  '-1' on failure
1107 */
1108
1109static int init_tti(struct s2io_nic *nic, int link)
1110{
1111        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112        register u64 val64 = 0;
1113        int i;
1114        struct config_param *config = &nic->config;
1115
1116        for (i = 0; i < config->tx_fifo_num; i++) {
1117                /*
1118                 * TTI Initialization. Default Tx timer gets us about
1119                 * 250 interrupts per sec. Continuous interrupts are enabled
1120                 * by default.
1121                 */
1122                if (nic->device_type == XFRAME_II_DEVICE) {
1123                        int count = (nic->config.bus_speed * 125)/2;
1124                        val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125                } else
1126                        val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127
1128                val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129                        TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130                        TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131                        TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132                if (i == 0)
1133                        if (use_continuous_tx_intrs && (link == LINK_UP))
1134                                val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135                writeq(val64, &bar0->tti_data1_mem);
1136
1137                if (nic->config.intr_type == MSI_X) {
1138                        val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139                                TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140                                TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141                                TTI_DATA2_MEM_TX_UFC_D(0x300);
1142                } else {
1143                        if ((nic->config.tx_steering_type ==
1144                             TX_DEFAULT_STEERING) &&
1145                            (config->tx_fifo_num > 1) &&
1146                            (i >= nic->udp_fifo_idx) &&
1147                            (i < (nic->udp_fifo_idx +
1148                                  nic->total_udp_fifos)))
1149                                val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150                                        TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151                                        TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152                                        TTI_DATA2_MEM_TX_UFC_D(0x120);
1153                        else
1154                                val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155                                        TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156                                        TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157                                        TTI_DATA2_MEM_TX_UFC_D(0x80);
1158                }
1159
1160                writeq(val64, &bar0->tti_data2_mem);
1161
1162                val64 = TTI_CMD_MEM_WE |
1163                        TTI_CMD_MEM_STROBE_NEW_CMD |
1164                        TTI_CMD_MEM_OFFSET(i);
1165                writeq(val64, &bar0->tti_command_mem);
1166
1167                if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168                                          TTI_CMD_MEM_STROBE_NEW_CMD,
1169                                          S2IO_BIT_RESET) != SUCCESS)
1170                        return FAILURE;
1171        }
1172
1173        return SUCCESS;
1174}
1175
1176/**
1177 *  init_nic - Initialization of hardware
1178 *  @nic: device private variable
1179 *  Description: The function sequentially configures every block
1180 *  of the H/W from their reset values.
1181 *  Return Value:  SUCCESS on success and
1182 *  '-1' on failure (endian settings incorrect).
1183 */
1184
1185static int init_nic(struct s2io_nic *nic)
1186{
1187        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188        struct net_device *dev = nic->dev;
1189        register u64 val64 = 0;
1190        void __iomem *add;
1191        u32 time;
1192        int i, j;
1193        int dtx_cnt = 0;
1194        unsigned long long mem_share;
1195        int mem_size;
1196        struct config_param *config = &nic->config;
1197        struct mac_info *mac_control = &nic->mac_control;
1198
1199        /* to set the swapper controle on the card */
1200        if (s2io_set_swapper(nic)) {
1201                DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202                return -EIO;
1203        }
1204
1205        /*
1206         * Herc requires EOI to be removed from reset before XGXS, so..
1207         */
1208        if (nic->device_type & XFRAME_II_DEVICE) {
1209                val64 = 0xA500000000ULL;
1210                writeq(val64, &bar0->sw_reset);
1211                msleep(500);
1212                val64 = readq(&bar0->sw_reset);
1213        }
1214
1215        /* Remove XGXS from reset state */
1216        val64 = 0;
1217        writeq(val64, &bar0->sw_reset);
1218        msleep(500);
1219        val64 = readq(&bar0->sw_reset);
1220
1221        /* Ensure that it's safe to access registers by checking
1222         * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223         */
1224        if (nic->device_type == XFRAME_II_DEVICE) {
1225                for (i = 0; i < 50; i++) {
1226                        val64 = readq(&bar0->adapter_status);
1227                        if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228                                break;
1229                        msleep(10);
1230                }
1231                if (i == 50)
1232                        return -ENODEV;
1233        }
1234
1235        /*  Enable Receiving broadcasts */
1236        add = &bar0->mac_cfg;
1237        val64 = readq(&bar0->mac_cfg);
1238        val64 |= MAC_RMAC_BCAST_ENABLE;
1239        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240        writel((u32)val64, add);
1241        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242        writel((u32) (val64 >> 32), (add + 4));
1243
1244        /* Read registers in all blocks */
1245        val64 = readq(&bar0->mac_int_mask);
1246        val64 = readq(&bar0->mc_int_mask);
1247        val64 = readq(&bar0->xgxs_int_mask);
1248
1249        /*  Set MTU */
1250        val64 = dev->mtu;
1251        writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252
1253        if (nic->device_type & XFRAME_II_DEVICE) {
1254                while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255                        SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256                                          &bar0->dtx_control, UF);
1257                        if (dtx_cnt & 0x1)
1258                                msleep(1); /* Necessary!! */
1259                        dtx_cnt++;
1260                }
1261        } else {
1262                while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263                        SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264                                          &bar0->dtx_control, UF);
1265                        val64 = readq(&bar0->dtx_control);
1266                        dtx_cnt++;
1267                }
1268        }
1269
1270        /*  Tx DMA Initialization */
1271        val64 = 0;
1272        writeq(val64, &bar0->tx_fifo_partition_0);
1273        writeq(val64, &bar0->tx_fifo_partition_1);
1274        writeq(val64, &bar0->tx_fifo_partition_2);
1275        writeq(val64, &bar0->tx_fifo_partition_3);
1276
1277        for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279
1280                val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281                        vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282
1283                if (i == (config->tx_fifo_num - 1)) {
1284                        if (i % 2 == 0)
1285                                i++;
1286                }
1287
1288                switch (i) {
1289                case 1:
1290                        writeq(val64, &bar0->tx_fifo_partition_0);
1291                        val64 = 0;
1292                        j = 0;
1293                        break;
1294                case 3:
1295                        writeq(val64, &bar0->tx_fifo_partition_1);
1296                        val64 = 0;
1297                        j = 0;
1298                        break;
1299                case 5:
1300                        writeq(val64, &bar0->tx_fifo_partition_2);
1301                        val64 = 0;
1302                        j = 0;
1303                        break;
1304                case 7:
1305                        writeq(val64, &bar0->tx_fifo_partition_3);
1306                        val64 = 0;
1307                        j = 0;
1308                        break;
1309                default:
1310                        j++;
1311                        break;
1312                }
1313        }
1314
1315        /*
1316         * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317         * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318         */
1319        if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320                writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321
1322        val64 = readq(&bar0->tx_fifo_partition_0);
1323        DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324                  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325
1326        /*
1327         * Initialization of Tx_PA_CONFIG register to ignore packet
1328         * integrity checking.
1329         */
1330        val64 = readq(&bar0->tx_pa_cfg);
1331        val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332                TX_PA_CFG_IGNORE_SNAP_OUI |
1333                TX_PA_CFG_IGNORE_LLC_CTRL |
1334                TX_PA_CFG_IGNORE_L2_ERR;
1335        writeq(val64, &bar0->tx_pa_cfg);
1336
1337        /* Rx DMA initialization. */
1338        val64 = 0;
1339        for (i = 0; i < config->rx_ring_num; i++) {
1340                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341
1342                val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343        }
1344        writeq(val64, &bar0->rx_queue_priority);
1345
1346        /*
1347         * Allocating equal share of memory to all the
1348         * configured Rings.
1349         */
1350        val64 = 0;
1351        if (nic->device_type & XFRAME_II_DEVICE)
1352                mem_size = 32;
1353        else
1354                mem_size = 64;
1355
1356        for (i = 0; i < config->rx_ring_num; i++) {
1357                switch (i) {
1358                case 0:
1359                        mem_share = (mem_size / config->rx_ring_num +
1360                                     mem_size % config->rx_ring_num);
1361                        val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362                        continue;
1363                case 1:
1364                        mem_share = (mem_size / config->rx_ring_num);
1365                        val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366                        continue;
1367                case 2:
1368                        mem_share = (mem_size / config->rx_ring_num);
1369                        val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370                        continue;
1371                case 3:
1372                        mem_share = (mem_size / config->rx_ring_num);
1373                        val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374                        continue;
1375                case 4:
1376                        mem_share = (mem_size / config->rx_ring_num);
1377                        val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378                        continue;
1379                case 5:
1380                        mem_share = (mem_size / config->rx_ring_num);
1381                        val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382                        continue;
1383                case 6:
1384                        mem_share = (mem_size / config->rx_ring_num);
1385                        val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386                        continue;
1387                case 7:
1388                        mem_share = (mem_size / config->rx_ring_num);
1389                        val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390                        continue;
1391                }
1392        }
1393        writeq(val64, &bar0->rx_queue_cfg);
1394
1395        /*
1396         * Filling Tx round robin registers
1397         * as per the number of FIFOs for equal scheduling priority
1398         */
1399        switch (config->tx_fifo_num) {
1400        case 1:
1401                val64 = 0x0;
1402                writeq(val64, &bar0->tx_w_round_robin_0);
1403                writeq(val64, &bar0->tx_w_round_robin_1);
1404                writeq(val64, &bar0->tx_w_round_robin_2);
1405                writeq(val64, &bar0->tx_w_round_robin_3);
1406                writeq(val64, &bar0->tx_w_round_robin_4);
1407                break;
1408        case 2:
1409                val64 = 0x0001000100010001ULL;
1410                writeq(val64, &bar0->tx_w_round_robin_0);
1411                writeq(val64, &bar0->tx_w_round_robin_1);
1412                writeq(val64, &bar0->tx_w_round_robin_2);
1413                writeq(val64, &bar0->tx_w_round_robin_3);
1414                val64 = 0x0001000100000000ULL;
1415                writeq(val64, &bar0->tx_w_round_robin_4);
1416                break;
1417        case 3:
1418                val64 = 0x0001020001020001ULL;
1419                writeq(val64, &bar0->tx_w_round_robin_0);
1420                val64 = 0x0200010200010200ULL;
1421                writeq(val64, &bar0->tx_w_round_robin_1);
1422                val64 = 0x0102000102000102ULL;
1423                writeq(val64, &bar0->tx_w_round_robin_2);
1424                val64 = 0x0001020001020001ULL;
1425                writeq(val64, &bar0->tx_w_round_robin_3);
1426                val64 = 0x0200010200000000ULL;
1427                writeq(val64, &bar0->tx_w_round_robin_4);
1428                break;
1429        case 4:
1430                val64 = 0x0001020300010203ULL;
1431                writeq(val64, &bar0->tx_w_round_robin_0);
1432                writeq(val64, &bar0->tx_w_round_robin_1);
1433                writeq(val64, &bar0->tx_w_round_robin_2);
1434                writeq(val64, &bar0->tx_w_round_robin_3);
1435                val64 = 0x0001020300000000ULL;
1436                writeq(val64, &bar0->tx_w_round_robin_4);
1437                break;
1438        case 5:
1439                val64 = 0x0001020304000102ULL;
1440                writeq(val64, &bar0->tx_w_round_robin_0);
1441                val64 = 0x0304000102030400ULL;
1442                writeq(val64, &bar0->tx_w_round_robin_1);
1443                val64 = 0x0102030400010203ULL;
1444                writeq(val64, &bar0->tx_w_round_robin_2);
1445                val64 = 0x0400010203040001ULL;
1446                writeq(val64, &bar0->tx_w_round_robin_3);
1447                val64 = 0x0203040000000000ULL;
1448                writeq(val64, &bar0->tx_w_round_robin_4);
1449                break;
1450        case 6:
1451                val64 = 0x0001020304050001ULL;
1452                writeq(val64, &bar0->tx_w_round_robin_0);
1453                val64 = 0x0203040500010203ULL;
1454                writeq(val64, &bar0->tx_w_round_robin_1);
1455                val64 = 0x0405000102030405ULL;
1456                writeq(val64, &bar0->tx_w_round_robin_2);
1457                val64 = 0x0001020304050001ULL;
1458                writeq(val64, &bar0->tx_w_round_robin_3);
1459                val64 = 0x0203040500000000ULL;
1460                writeq(val64, &bar0->tx_w_round_robin_4);
1461                break;
1462        case 7:
1463                val64 = 0x0001020304050600ULL;
1464                writeq(val64, &bar0->tx_w_round_robin_0);
1465                val64 = 0x0102030405060001ULL;
1466                writeq(val64, &bar0->tx_w_round_robin_1);
1467                val64 = 0x0203040506000102ULL;
1468                writeq(val64, &bar0->tx_w_round_robin_2);
1469                val64 = 0x0304050600010203ULL;
1470                writeq(val64, &bar0->tx_w_round_robin_3);
1471                val64 = 0x0405060000000000ULL;
1472                writeq(val64, &bar0->tx_w_round_robin_4);
1473                break;
1474        case 8:
1475                val64 = 0x0001020304050607ULL;
1476                writeq(val64, &bar0->tx_w_round_robin_0);
1477                writeq(val64, &bar0->tx_w_round_robin_1);
1478                writeq(val64, &bar0->tx_w_round_robin_2);
1479                writeq(val64, &bar0->tx_w_round_robin_3);
1480                val64 = 0x0001020300000000ULL;
1481                writeq(val64, &bar0->tx_w_round_robin_4);
1482                break;
1483        }
1484
1485        /* Enable all configured Tx FIFO partitions */
1486        val64 = readq(&bar0->tx_fifo_partition_0);
1487        val64 |= (TX_FIFO_PARTITION_EN);
1488        writeq(val64, &bar0->tx_fifo_partition_0);
1489
1490        /* Filling the Rx round robin registers as per the
1491         * number of Rings and steering based on QoS with
1492         * equal priority.
1493         */
1494        switch (config->rx_ring_num) {
1495        case 1:
1496                val64 = 0x0;
1497                writeq(val64, &bar0->rx_w_round_robin_0);
1498                writeq(val64, &bar0->rx_w_round_robin_1);
1499                writeq(val64, &bar0->rx_w_round_robin_2);
1500                writeq(val64, &bar0->rx_w_round_robin_3);
1501                writeq(val64, &bar0->rx_w_round_robin_4);
1502
1503                val64 = 0x8080808080808080ULL;
1504                writeq(val64, &bar0->rts_qos_steering);
1505                break;
1506        case 2:
1507                val64 = 0x0001000100010001ULL;
1508                writeq(val64, &bar0->rx_w_round_robin_0);
1509                writeq(val64, &bar0->rx_w_round_robin_1);
1510                writeq(val64, &bar0->rx_w_round_robin_2);
1511                writeq(val64, &bar0->rx_w_round_robin_3);
1512                val64 = 0x0001000100000000ULL;
1513                writeq(val64, &bar0->rx_w_round_robin_4);
1514
1515                val64 = 0x8080808040404040ULL;
1516                writeq(val64, &bar0->rts_qos_steering);
1517                break;
1518        case 3:
1519                val64 = 0x0001020001020001ULL;
1520                writeq(val64, &bar0->rx_w_round_robin_0);
1521                val64 = 0x0200010200010200ULL;
1522                writeq(val64, &bar0->rx_w_round_robin_1);
1523                val64 = 0x0102000102000102ULL;
1524                writeq(val64, &bar0->rx_w_round_robin_2);
1525                val64 = 0x0001020001020001ULL;
1526                writeq(val64, &bar0->rx_w_round_robin_3);
1527                val64 = 0x0200010200000000ULL;
1528                writeq(val64, &bar0->rx_w_round_robin_4);
1529
1530                val64 = 0x8080804040402020ULL;
1531                writeq(val64, &bar0->rts_qos_steering);
1532                break;
1533        case 4:
1534                val64 = 0x0001020300010203ULL;
1535                writeq(val64, &bar0->rx_w_round_robin_0);
1536                writeq(val64, &bar0->rx_w_round_robin_1);
1537                writeq(val64, &bar0->rx_w_round_robin_2);
1538                writeq(val64, &bar0->rx_w_round_robin_3);
1539                val64 = 0x0001020300000000ULL;
1540                writeq(val64, &bar0->rx_w_round_robin_4);
1541
1542                val64 = 0x8080404020201010ULL;
1543                writeq(val64, &bar0->rts_qos_steering);
1544                break;
1545        case 5:
1546                val64 = 0x0001020304000102ULL;
1547                writeq(val64, &bar0->rx_w_round_robin_0);
1548                val64 = 0x0304000102030400ULL;
1549                writeq(val64, &bar0->rx_w_round_robin_1);
1550                val64 = 0x0102030400010203ULL;
1551                writeq(val64, &bar0->rx_w_round_robin_2);
1552                val64 = 0x0400010203040001ULL;
1553                writeq(val64, &bar0->rx_w_round_robin_3);
1554                val64 = 0x0203040000000000ULL;
1555                writeq(val64, &bar0->rx_w_round_robin_4);
1556
1557                val64 = 0x8080404020201008ULL;
1558                writeq(val64, &bar0->rts_qos_steering);
1559                break;
1560        case 6:
1561                val64 = 0x0001020304050001ULL;
1562                writeq(val64, &bar0->rx_w_round_robin_0);
1563                val64 = 0x0203040500010203ULL;
1564                writeq(val64, &bar0->rx_w_round_robin_1);
1565                val64 = 0x0405000102030405ULL;
1566                writeq(val64, &bar0->rx_w_round_robin_2);
1567                val64 = 0x0001020304050001ULL;
1568                writeq(val64, &bar0->rx_w_round_robin_3);
1569                val64 = 0x0203040500000000ULL;
1570                writeq(val64, &bar0->rx_w_round_robin_4);
1571
1572                val64 = 0x8080404020100804ULL;
1573                writeq(val64, &bar0->rts_qos_steering);
1574                break;
1575        case 7:
1576                val64 = 0x0001020304050600ULL;
1577                writeq(val64, &bar0->rx_w_round_robin_0);
1578                val64 = 0x0102030405060001ULL;
1579                writeq(val64, &bar0->rx_w_round_robin_1);
1580                val64 = 0x0203040506000102ULL;
1581                writeq(val64, &bar0->rx_w_round_robin_2);
1582                val64 = 0x0304050600010203ULL;
1583                writeq(val64, &bar0->rx_w_round_robin_3);
1584                val64 = 0x0405060000000000ULL;
1585                writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587                val64 = 0x8080402010080402ULL;
1588                writeq(val64, &bar0->rts_qos_steering);
1589                break;
1590        case 8:
1591                val64 = 0x0001020304050607ULL;
1592                writeq(val64, &bar0->rx_w_round_robin_0);
1593                writeq(val64, &bar0->rx_w_round_robin_1);
1594                writeq(val64, &bar0->rx_w_round_robin_2);
1595                writeq(val64, &bar0->rx_w_round_robin_3);
1596                val64 = 0x0001020300000000ULL;
1597                writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599                val64 = 0x8040201008040201ULL;
1600                writeq(val64, &bar0->rts_qos_steering);
1601                break;
1602        }
1603
1604        /* UDP Fix */
1605        val64 = 0;
1606        for (i = 0; i < 8; i++)
1607                writeq(val64, &bar0->rts_frm_len_n[i]);
1608
1609        /* Set the default rts frame length for the rings configured */
1610        val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611        for (i = 0 ; i < config->rx_ring_num ; i++)
1612                writeq(val64, &bar0->rts_frm_len_n[i]);
1613
1614        /* Set the frame length for the configured rings
1615         * desired by the user
1616         */
1617        for (i = 0; i < config->rx_ring_num; i++) {
1618                /* If rts_frm_len[i] == 0 then it is assumed that user not
1619                 * specified frame length steering.
1620                 * If the user provides the frame length then program
1621                 * the rts_frm_len register for those values or else
1622                 * leave it as it is.
1623                 */
1624                if (rts_frm_len[i] != 0) {
1625                        writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626                               &bar0->rts_frm_len_n[i]);
1627                }
1628        }
1629
1630        /* Disable differentiated services steering logic */
1631        for (i = 0; i < 64; i++) {
1632                if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633                        DBG_PRINT(ERR_DBG,
1634                                  "%s: rts_ds_steer failed on codepoint %d\n",
1635                                  dev->name, i);
1636                        return -ENODEV;
1637                }
1638        }
1639
1640        /* Program statistics memory */
1641        writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642
1643        if (nic->device_type == XFRAME_II_DEVICE) {
1644                val64 = STAT_BC(0x320);
1645                writeq(val64, &bar0->stat_byte_cnt);
1646        }
1647
1648        /*
1649         * Initializing the sampling rate for the device to calculate the
1650         * bandwidth utilization.
1651         */
1652        val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653                MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654        writeq(val64, &bar0->mac_link_util);
1655
1656        /*
1657         * Initializing the Transmit and Receive Traffic Interrupt
1658         * Scheme.
1659         */
1660
1661        /* Initialize TTI */
1662        if (SUCCESS != init_tti(nic, nic->last_link_state))
1663                return -ENODEV;
1664
1665        /* RTI Initialization */
1666        if (nic->device_type == XFRAME_II_DEVICE) {
1667                /*
1668                 * Programmed to generate Apprx 500 Intrs per
1669                 * second
1670                 */
1671                int count = (nic->config.bus_speed * 125)/4;
1672                val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673        } else
1674                val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675        val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676                RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677                RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678                RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679
1680        writeq(val64, &bar0->rti_data1_mem);
1681
1682        val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683                RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684        if (nic->config.intr_type == MSI_X)
1685                val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686                          RTI_DATA2_MEM_RX_UFC_D(0x40));
1687        else
1688                val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689                          RTI_DATA2_MEM_RX_UFC_D(0x80));
1690        writeq(val64, &bar0->rti_data2_mem);
1691
1692        for (i = 0; i < config->rx_ring_num; i++) {
1693                val64 = RTI_CMD_MEM_WE |
1694                        RTI_CMD_MEM_STROBE_NEW_CMD |
1695                        RTI_CMD_MEM_OFFSET(i);
1696                writeq(val64, &bar0->rti_command_mem);
1697
1698                /*
1699                 * Once the operation completes, the Strobe bit of the
1700                 * command register will be reset. We poll for this
1701                 * particular condition. We wait for a maximum of 500ms
1702                 * for the operation to complete, if it's not complete
1703                 * by then we return error.
1704                 */
1705                time = 0;
1706                while (true) {
1707                        val64 = readq(&bar0->rti_command_mem);
1708                        if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709                                break;
1710
1711                        if (time > 10) {
1712                                DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713                                          dev->name);
1714                                return -ENODEV;
1715                        }
1716                        time++;
1717                        msleep(50);
1718                }
1719        }
1720
1721        /*
1722         * Initializing proper values as Pause threshold into all
1723         * the 8 Queues on Rx side.
1724         */
1725        writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726        writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727
1728        /* Disable RMAC PAD STRIPPING */
1729        add = &bar0->mac_cfg;
1730        val64 = readq(&bar0->mac_cfg);
1731        val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733        writel((u32) (val64), add);
1734        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735        writel((u32) (val64 >> 32), (add + 4));
1736        val64 = readq(&bar0->mac_cfg);
1737
1738        /* Enable FCS stripping by adapter */
1739        add = &bar0->mac_cfg;
1740        val64 = readq(&bar0->mac_cfg);
1741        val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742        if (nic->device_type == XFRAME_II_DEVICE)
1743                writeq(val64, &bar0->mac_cfg);
1744        else {
1745                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746                writel((u32) (val64), add);
1747                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748                writel((u32) (val64 >> 32), (add + 4));
1749        }
1750
1751        /*
1752         * Set the time value to be inserted in the pause frame
1753         * generated by xena.
1754         */
1755        val64 = readq(&bar0->rmac_pause_cfg);
1756        val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757        val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758        writeq(val64, &bar0->rmac_pause_cfg);
1759
1760        /*
1761         * Set the Threshold Limit for Generating the pause frame
1762         * If the amount of data in any Queue exceeds ratio of
1763         * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764         * pause frame is generated
1765         */
1766        val64 = 0;
1767        for (i = 0; i < 4; i++) {
1768                val64 |= (((u64)0xFF00 |
1769                           nic->mac_control.mc_pause_threshold_q0q3)
1770                          << (i * 2 * 8));
1771        }
1772        writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773
1774        val64 = 0;
1775        for (i = 0; i < 4; i++) {
1776                val64 |= (((u64)0xFF00 |
1777                           nic->mac_control.mc_pause_threshold_q4q7)
1778                          << (i * 2 * 8));
1779        }
1780        writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781
1782        /*
1783         * TxDMA will stop Read request if the number of read split has
1784         * exceeded the limit pointed by shared_splits
1785         */
1786        val64 = readq(&bar0->pic_control);
1787        val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788        writeq(val64, &bar0->pic_control);
1789
1790        if (nic->config.bus_speed == 266) {
1791                writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792                writeq(0x0, &bar0->read_retry_delay);
1793                writeq(0x0, &bar0->write_retry_delay);
1794        }
1795
1796        /*
1797         * Programming the Herc to split every write transaction
1798         * that does not start on an ADB to reduce disconnects.
1799         */
1800        if (nic->device_type == XFRAME_II_DEVICE) {
1801                val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802                        MISC_LINK_STABILITY_PRD(3);
1803                writeq(val64, &bar0->misc_control);
1804                val64 = readq(&bar0->pic_control2);
1805                val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806                writeq(val64, &bar0->pic_control2);
1807        }
1808        if (strstr(nic->product_name, "CX4")) {
1809                val64 = TMAC_AVG_IPG(0x17);
1810                writeq(val64, &bar0->tmac_avg_ipg);
1811        }
1812
1813        return SUCCESS;
1814}
1815#define LINK_UP_DOWN_INTERRUPT          1
1816#define MAC_RMAC_ERR_TIMER              2
1817
1818static int s2io_link_fault_indication(struct s2io_nic *nic)
1819{
1820        if (nic->device_type == XFRAME_II_DEVICE)
1821                return LINK_UP_DOWN_INTERRUPT;
1822        else
1823                return MAC_RMAC_ERR_TIMER;
1824}
1825
1826/**
1827 *  do_s2io_write_bits -  update alarm bits in alarm register
1828 *  @value: alarm bits
1829 *  @flag: interrupt status
1830 *  @addr: address value
1831 *  Description: update alarm bits in alarm register
1832 *  Return Value:
1833 *  NONE.
1834 */
1835static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836{
1837        u64 temp64;
1838
1839        temp64 = readq(addr);
1840
1841        if (flag == ENABLE_INTRS)
1842                temp64 &= ~((u64)value);
1843        else
1844                temp64 |= ((u64)value);
1845        writeq(temp64, addr);
1846}
1847
1848static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849{
1850        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851        register u64 gen_int_mask = 0;
1852        u64 interruptible;
1853
1854        writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855        if (mask & TX_DMA_INTR) {
1856                gen_int_mask |= TXDMA_INT_M;
1857
1858                do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859                                   TXDMA_PCC_INT | TXDMA_TTI_INT |
1860                                   TXDMA_LSO_INT | TXDMA_TPA_INT |
1861                                   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862
1863                do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864                                   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865                                   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866                                   &bar0->pfc_err_mask);
1867
1868                do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869                                   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870                                   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871
1872                do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873                                   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874                                   PCC_N_SERR | PCC_6_COF_OV_ERR |
1875                                   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876                                   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877                                   PCC_TXB_ECC_SG_ERR,
1878                                   flag, &bar0->pcc_err_mask);
1879
1880                do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881                                   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882
1883                do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884                                   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885                                   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886                                   flag, &bar0->lso_err_mask);
1887
1888                do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889                                   flag, &bar0->tpa_err_mask);
1890
1891                do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892        }
1893
1894        if (mask & TX_MAC_INTR) {
1895                gen_int_mask |= TXMAC_INT_M;
1896                do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897                                   &bar0->mac_int_mask);
1898                do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899                                   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900                                   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901                                   flag, &bar0->mac_tmac_err_mask);
1902        }
1903
1904        if (mask & TX_XGXS_INTR) {
1905                gen_int_mask |= TXXGXS_INT_M;
1906                do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907                                   &bar0->xgxs_int_mask);
1908                do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909                                   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910                                   flag, &bar0->xgxs_txgxs_err_mask);
1911        }
1912
1913        if (mask & RX_DMA_INTR) {
1914                gen_int_mask |= RXDMA_INT_M;
1915                do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916                                   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917                                   flag, &bar0->rxdma_int_mask);
1918                do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919                                   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920                                   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921                                   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922                do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923                                   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924                                   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925                                   &bar0->prc_pcix_err_mask);
1926                do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927                                   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928                                   &bar0->rpa_err_mask);
1929                do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930                                   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931                                   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932                                   RDA_FRM_ECC_SG_ERR |
1933                                   RDA_MISC_ERR|RDA_PCIX_ERR,
1934                                   flag, &bar0->rda_err_mask);
1935                do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936                                   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937                                   flag, &bar0->rti_err_mask);
1938        }
1939
1940        if (mask & RX_MAC_INTR) {
1941                gen_int_mask |= RXMAC_INT_M;
1942                do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943                                   &bar0->mac_int_mask);
1944                interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946                                 RMAC_DOUBLE_ECC_ERR);
1947                if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948                        interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949                do_s2io_write_bits(interruptible,
1950                                   flag, &bar0->mac_rmac_err_mask);
1951        }
1952
1953        if (mask & RX_XGXS_INTR) {
1954                gen_int_mask |= RXXGXS_INT_M;
1955                do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956                                   &bar0->xgxs_int_mask);
1957                do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958                                   &bar0->xgxs_rxgxs_err_mask);
1959        }
1960
1961        if (mask & MC_INTR) {
1962                gen_int_mask |= MC_INT_M;
1963                do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964                                   flag, &bar0->mc_int_mask);
1965                do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966                                   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967                                   &bar0->mc_err_mask);
1968        }
1969        nic->general_int_mask = gen_int_mask;
1970
1971        /* Remove this line when alarm interrupts are enabled */
1972        nic->general_int_mask = 0;
1973}
1974
1975/**
1976 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1977 *  @nic: device private variable,
1978 *  @mask: A mask indicating which Intr block must be modified and,
1979 *  @flag: A flag indicating whether to enable or disable the Intrs.
1980 *  Description: This function will either disable or enable the interrupts
1981 *  depending on the flag argument. The mask argument can be used to
1982 *  enable/disable any Intr block.
1983 *  Return Value: NONE.
1984 */
1985
1986static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987{
1988        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989        register u64 temp64 = 0, intr_mask = 0;
1990
1991        intr_mask = nic->general_int_mask;
1992
1993        /*  Top level interrupt classification */
1994        /*  PIC Interrupts */
1995        if (mask & TX_PIC_INTR) {
1996                /*  Enable PIC Intrs in the general intr mask register */
1997                intr_mask |= TXPIC_INT_M;
1998                if (flag == ENABLE_INTRS) {
1999                        /*
2000                         * If Hercules adapter enable GPIO otherwise
2001                         * disable all PCIX, Flash, MDIO, IIC and GPIO
2002                         * interrupts for now.
2003                         * TODO
2004                         */
2005                        if (s2io_link_fault_indication(nic) ==
2006                            LINK_UP_DOWN_INTERRUPT) {
2007                                do_s2io_write_bits(PIC_INT_GPIO, flag,
2008                                                   &bar0->pic_int_mask);
2009                                do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010                                                   &bar0->gpio_int_mask);
2011                        } else
2012                                writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013                } else if (flag == DISABLE_INTRS) {
2014                        /*
2015                         * Disable PIC Intrs in the general
2016                         * intr mask register
2017                         */
2018                        writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019                }
2020        }
2021
2022        /*  Tx traffic interrupts */
2023        if (mask & TX_TRAFFIC_INTR) {
2024                intr_mask |= TXTRAFFIC_INT_M;
2025                if (flag == ENABLE_INTRS) {
2026                        /*
2027                         * Enable all the Tx side interrupts
2028                         * writing 0 Enables all 64 TX interrupt levels
2029                         */
2030                        writeq(0x0, &bar0->tx_traffic_mask);
2031                } else if (flag == DISABLE_INTRS) {
2032                        /*
2033                         * Disable Tx Traffic Intrs in the general intr mask
2034                         * register.
2035                         */
2036                        writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037                }
2038        }
2039
2040        /*  Rx traffic interrupts */
2041        if (mask & RX_TRAFFIC_INTR) {
2042                intr_mask |= RXTRAFFIC_INT_M;
2043                if (flag == ENABLE_INTRS) {
2044                        /* writing 0 Enables all 8 RX interrupt levels */
2045                        writeq(0x0, &bar0->rx_traffic_mask);
2046                } else if (flag == DISABLE_INTRS) {
2047                        /*
2048                         * Disable Rx Traffic Intrs in the general intr mask
2049                         * register.
2050                         */
2051                        writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052                }
2053        }
2054
2055        temp64 = readq(&bar0->general_int_mask);
2056        if (flag == ENABLE_INTRS)
2057                temp64 &= ~((u64)intr_mask);
2058        else
2059                temp64 = DISABLE_ALL_INTRS;
2060        writeq(temp64, &bar0->general_int_mask);
2061
2062        nic->general_int_mask = readq(&bar0->general_int_mask);
2063}
2064
2065/**
2066 *  verify_pcc_quiescent- Checks for PCC quiescent state
2067 *  Return: 1 If PCC is quiescence
2068 *          0 If PCC is not quiescence
2069 */
2070static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2071{
2072        int ret = 0, herc;
2073        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2074        u64 val64 = readq(&bar0->adapter_status);
2075
2076        herc = (sp->device_type == XFRAME_II_DEVICE);
2077
2078        if (flag == false) {
2079                if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2080                        if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2081                                ret = 1;
2082                } else {
2083                        if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2084                                ret = 1;
2085                }
2086        } else {
2087                if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2088                        if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2089                             ADAPTER_STATUS_RMAC_PCC_IDLE))
2090                                ret = 1;
2091                } else {
2092                        if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2093                             ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2094                                ret = 1;
2095                }
2096        }
2097
2098        return ret;
2099}
2100/**
2101 *  verify_xena_quiescence - Checks whether the H/W is ready
2102 *  Description: Returns whether the H/W is ready to go or not. Depending
2103 *  on whether adapter enable bit was written or not the comparison
2104 *  differs and the calling function passes the input argument flag to
2105 *  indicate this.
2106 *  Return: 1 If xena is quiescence
2107 *          0 If Xena is not quiescence
2108 */
2109
2110static int verify_xena_quiescence(struct s2io_nic *sp)
2111{
2112        int  mode;
2113        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2114        u64 val64 = readq(&bar0->adapter_status);
2115        mode = s2io_verify_pci_mode(sp);
2116
2117        if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2118                DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2119                return 0;
2120        }
2121        if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2122                DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2123                return 0;
2124        }
2125        if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2126                DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2127                return 0;
2128        }
2129        if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2130                DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2131                return 0;
2132        }
2133        if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2134                DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2135                return 0;
2136        }
2137        if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2138                DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2139                return 0;
2140        }
2141        if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2142                DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2143                return 0;
2144        }
2145        if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2146                DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2147                return 0;
2148        }
2149
2150        /*
2151         * In PCI 33 mode, the P_PLL is not used, and therefore,
2152         * the the P_PLL_LOCK bit in the adapter_status register will
2153         * not be asserted.
2154         */
2155        if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2156            sp->device_type == XFRAME_II_DEVICE &&
2157            mode != PCI_MODE_PCI_33) {
2158                DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2159                return 0;
2160        }
2161        if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2162              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2163                DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2164                return 0;
2165        }
2166        return 1;
2167}
2168
2169/**
2170 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2171 * @sp: Pointer to device specifc structure
2172 * Description :
2173 * New procedure to clear mac address reading  problems on Alpha platforms
2174 *
2175 */
2176
2177static void fix_mac_address(struct s2io_nic *sp)
2178{
2179        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2180        int i = 0;
2181
2182        while (fix_mac[i] != END_SIGN) {
2183                writeq(fix_mac[i++], &bar0->gpio_control);
2184                udelay(10);
2185                (void) readq(&bar0->gpio_control);
2186        }
2187}
2188
2189/**
2190 *  start_nic - Turns the device on
2191 *  @nic : device private variable.
2192 *  Description:
2193 *  This function actually turns the device on. Before this  function is
2194 *  called,all Registers are configured from their reset states
2195 *  and shared memory is allocated but the NIC is still quiescent. On
2196 *  calling this function, the device interrupts are cleared and the NIC is
2197 *  literally switched on by writing into the adapter control register.
2198 *  Return Value:
2199 *  SUCCESS on success and -1 on failure.
2200 */
2201
2202static int start_nic(struct s2io_nic *nic)
2203{
2204        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2205        struct net_device *dev = nic->dev;
2206        register u64 val64 = 0;
2207        u16 subid, i;
2208        struct config_param *config = &nic->config;
2209        struct mac_info *mac_control = &nic->mac_control;
2210
2211        /*  PRC Initialization and configuration */
2212        for (i = 0; i < config->rx_ring_num; i++) {
2213                struct ring_info *ring = &mac_control->rings[i];
2214
2215                writeq((u64)ring->rx_blocks[0].block_dma_addr,
2216                       &bar0->prc_rxd0_n[i]);
2217
2218                val64 = readq(&bar0->prc_ctrl_n[i]);
2219                if (nic->rxd_mode == RXD_MODE_1)
2220                        val64 |= PRC_CTRL_RC_ENABLED;
2221                else
2222                        val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2223                if (nic->device_type == XFRAME_II_DEVICE)
2224                        val64 |= PRC_CTRL_GROUP_READS;
2225                val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2226                val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2227                writeq(val64, &bar0->prc_ctrl_n[i]);
2228        }
2229
2230        if (nic->rxd_mode == RXD_MODE_3B) {
2231                /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2232                val64 = readq(&bar0->rx_pa_cfg);
2233                val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2234                writeq(val64, &bar0->rx_pa_cfg);
2235        }
2236
2237        if (vlan_tag_strip == 0) {
2238                val64 = readq(&bar0->rx_pa_cfg);
2239                val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2240                writeq(val64, &bar0->rx_pa_cfg);
2241                nic->vlan_strip_flag = 0;
2242        }
2243
2244        /*
2245         * Enabling MC-RLDRAM. After enabling the device, we timeout
2246         * for around 100ms, which is approximately the time required
2247         * for the device to be ready for operation.
2248         */
2249        val64 = readq(&bar0->mc_rldram_mrs);
2250        val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2251        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2252        val64 = readq(&bar0->mc_rldram_mrs);
2253
2254        msleep(100);    /* Delay by around 100 ms. */
2255
2256        /* Enabling ECC Protection. */
2257        val64 = readq(&bar0->adapter_control);
2258        val64 &= ~ADAPTER_ECC_EN;
2259        writeq(val64, &bar0->adapter_control);
2260
2261        /*
2262         * Verify if the device is ready to be enabled, if so enable
2263         * it.
2264         */
2265        val64 = readq(&bar0->adapter_status);
2266        if (!verify_xena_quiescence(nic)) {
2267                DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2268                          "Adapter status reads: 0x%llx\n",
2269                          dev->name, (unsigned long long)val64);
2270                return FAILURE;
2271        }
2272
2273        /*
2274         * With some switches, link might be already up at this point.
2275         * Because of this weird behavior, when we enable laser,
2276         * we may not get link. We need to handle this. We cannot
2277         * figure out which switch is misbehaving. So we are forced to
2278         * make a global change.
2279         */
2280
2281        /* Enabling Laser. */
2282        val64 = readq(&bar0->adapter_control);
2283        val64 |= ADAPTER_EOI_TX_ON;
2284        writeq(val64, &bar0->adapter_control);
2285
2286        if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2287                /*
2288                 * Dont see link state interrupts initially on some switches,
2289                 * so directly scheduling the link state task here.
2290                 */
2291                schedule_work(&nic->set_link_task);
2292        }
2293        /* SXE-002: Initialize link and activity LED */
2294        subid = nic->pdev->subsystem_device;
2295        if (((subid & 0xFF) >= 0x07) &&
2296            (nic->device_type == XFRAME_I_DEVICE)) {
2297                val64 = readq(&bar0->gpio_control);
2298                val64 |= 0x0000800000000000ULL;
2299                writeq(val64, &bar0->gpio_control);
2300                val64 = 0x0411040400000000ULL;
2301                writeq(val64, (void __iomem *)bar0 + 0x2700);
2302        }
2303
2304        return SUCCESS;
2305}
2306/**
2307 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2308 */
2309static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2310                                        struct TxD *txdlp, int get_off)
2311{
2312        struct s2io_nic *nic = fifo_data->nic;
2313        struct sk_buff *skb;
2314        struct TxD *txds;
2315        u16 j, frg_cnt;
2316
2317        txds = txdlp;
2318        if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2319                pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2320                                 sizeof(u64), PCI_DMA_TODEVICE);
2321                txds++;
2322        }
2323
2324        skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2325        if (!skb) {
2326                memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2327                return NULL;
2328        }
2329        pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2330                         skb_headlen(skb), PCI_DMA_TODEVICE);
2331        frg_cnt = skb_shinfo(skb)->nr_frags;
2332        if (frg_cnt) {
2333                txds++;
2334                for (j = 0; j < frg_cnt; j++, txds++) {
2335                        const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2336                        if (!txds->Buffer_Pointer)
2337                                break;
2338                        pci_unmap_page(nic->pdev,
2339                                       (dma_addr_t)txds->Buffer_Pointer,
2340                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
2341                }
2342        }
2343        memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2344        return skb;
2345}
2346
2347/**
2348 *  free_tx_buffers - Free all queued Tx buffers
2349 *  @nic : device private variable.
2350 *  Description:
2351 *  Free all queued Tx buffers.
2352 *  Return Value: void
2353 */
2354
2355static void free_tx_buffers(struct s2io_nic *nic)
2356{
2357        struct net_device *dev = nic->dev;
2358        struct sk_buff *skb;
2359        struct TxD *txdp;
2360        int i, j;
2361        int cnt = 0;
2362        struct config_param *config = &nic->config;
2363        struct mac_info *mac_control = &nic->mac_control;
2364        struct stat_block *stats = mac_control->stats_info;
2365        struct swStat *swstats = &stats->sw_stat;
2366
2367        for (i = 0; i < config->tx_fifo_num; i++) {
2368                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2369                struct fifo_info *fifo = &mac_control->fifos[i];
2370                unsigned long flags;
2371
2372                spin_lock_irqsave(&fifo->tx_lock, flags);
2373                for (j = 0; j < tx_cfg->fifo_len; j++) {
2374                        txdp = fifo->list_info[j].list_virt_addr;
2375                        skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2376                        if (skb) {
2377                                swstats->mem_freed += skb->truesize;
2378                                dev_kfree_skb(skb);
2379                                cnt++;
2380                        }
2381                }
2382                DBG_PRINT(INTR_DBG,
2383                          "%s: forcibly freeing %d skbs on FIFO%d\n",
2384                          dev->name, cnt, i);
2385                fifo->tx_curr_get_info.offset = 0;
2386                fifo->tx_curr_put_info.offset = 0;
2387                spin_unlock_irqrestore(&fifo->tx_lock, flags);
2388        }
2389}
2390
2391/**
2392 *   stop_nic -  To stop the nic
2393 *   @nic ; device private variable.
2394 *   Description:
2395 *   This function does exactly the opposite of what the start_nic()
2396 *   function does. This function is called to stop the device.
2397 *   Return Value:
2398 *   void.
2399 */
2400
2401static void stop_nic(struct s2io_nic *nic)
2402{
2403        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2404        register u64 val64 = 0;
2405        u16 interruptible;
2406
2407        /*  Disable all interrupts */
2408        en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2409        interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2410        interruptible |= TX_PIC_INTR;
2411        en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2412
2413        /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2414        val64 = readq(&bar0->adapter_control);
2415        val64 &= ~(ADAPTER_CNTL_EN);
2416        writeq(val64, &bar0->adapter_control);
2417}
2418
2419/**
2420 *  fill_rx_buffers - Allocates the Rx side skbs
2421 *  @ring_info: per ring structure
2422 *  @from_card_up: If this is true, we will map the buffer to get
2423 *     the dma address for buf0 and buf1 to give it to the card.
2424 *     Else we will sync the already mapped buffer to give it to the card.
2425 *  Description:
2426 *  The function allocates Rx side skbs and puts the physical
2427 *  address of these buffers into the RxD buffer pointers, so that the NIC
2428 *  can DMA the received frame into these locations.
2429 *  The NIC supports 3 receive modes, viz
2430 *  1. single buffer,
2431 *  2. three buffer and
2432 *  3. Five buffer modes.
2433 *  Each mode defines how many fragments the received frame will be split
2434 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2435 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2436 *  is split into 3 fragments. As of now only single buffer mode is
2437 *  supported.
2438 *   Return Value:
2439 *  SUCCESS on success or an appropriate -ve value on failure.
2440 */
2441static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2442                           int from_card_up)
2443{
2444        struct sk_buff *skb;
2445        struct RxD_t *rxdp;
2446        int off, size, block_no, block_no1;
2447        u32 alloc_tab = 0;
2448        u32 alloc_cnt;
2449        u64 tmp;
2450        struct buffAdd *ba;
2451        struct RxD_t *first_rxdp = NULL;
2452        u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2453        struct RxD1 *rxdp1;
2454        struct RxD3 *rxdp3;
2455        struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2456
2457        alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2458
2459        block_no1 = ring->rx_curr_get_info.block_index;
2460        while (alloc_tab < alloc_cnt) {
2461                block_no = ring->rx_curr_put_info.block_index;
2462
2463                off = ring->rx_curr_put_info.offset;
2464
2465                rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2466
2467                if ((block_no == block_no1) &&
2468                    (off == ring->rx_curr_get_info.offset) &&
2469                    (rxdp->Host_Control)) {
2470                        DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2471                                  ring->dev->name);
2472                        goto end;
2473                }
2474                if (off && (off == ring->rxd_count)) {
2475                        ring->rx_curr_put_info.block_index++;
2476                        if (ring->rx_curr_put_info.block_index ==
2477                            ring->block_count)
2478                                ring->rx_curr_put_info.block_index = 0;
2479                        block_no = ring->rx_curr_put_info.block_index;
2480                        off = 0;
2481                        ring->rx_curr_put_info.offset = off;
2482                        rxdp = ring->rx_blocks[block_no].block_virt_addr;
2483                        DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2484                                  ring->dev->name, rxdp);
2485
2486                }
2487
2488                if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2489                    ((ring->rxd_mode == RXD_MODE_3B) &&
2490                     (rxdp->Control_2 & s2BIT(0)))) {
2491                        ring->rx_curr_put_info.offset = off;
2492                        goto end;
2493                }
2494                /* calculate size of skb based on ring mode */
2495                size = ring->mtu +
2496                        HEADER_ETHERNET_II_802_3_SIZE +
2497                        HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2498                if (ring->rxd_mode == RXD_MODE_1)
2499                        size += NET_IP_ALIGN;
2500                else
2501                        size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2502
2503                /* allocate skb */
2504                skb = netdev_alloc_skb(nic->dev, size);
2505                if (!skb) {
2506                        DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2507                                  ring->dev->name);
2508                        if (first_rxdp) {
2509                                dma_wmb();
2510                                first_rxdp->Control_1 |= RXD_OWN_XENA;
2511                        }
2512                        swstats->mem_alloc_fail_cnt++;
2513
2514                        return -ENOMEM ;
2515                }
2516                swstats->mem_allocated += skb->truesize;
2517
2518                if (ring->rxd_mode == RXD_MODE_1) {
2519                        /* 1 buffer mode - normal operation mode */
2520                        rxdp1 = (struct RxD1 *)rxdp;
2521                        memset(rxdp, 0, sizeof(struct RxD1));
2522                        skb_reserve(skb, NET_IP_ALIGN);
2523                        rxdp1->Buffer0_ptr =
2524                                pci_map_single(ring->pdev, skb->data,
2525                                               size - NET_IP_ALIGN,
2526                                               PCI_DMA_FROMDEVICE);
2527                        if (pci_dma_mapping_error(nic->pdev,
2528                                                  rxdp1->Buffer0_ptr))
2529                                goto pci_map_failed;
2530
2531                        rxdp->Control_2 =
2532                                SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2533                        rxdp->Host_Control = (unsigned long)skb;
2534                } else if (ring->rxd_mode == RXD_MODE_3B) {
2535                        /*
2536                         * 2 buffer mode -
2537                         * 2 buffer mode provides 128
2538                         * byte aligned receive buffers.
2539                         */
2540
2541                        rxdp3 = (struct RxD3 *)rxdp;
2542                        /* save buffer pointers to avoid frequent dma mapping */
2543                        Buffer0_ptr = rxdp3->Buffer0_ptr;
2544                        Buffer1_ptr = rxdp3->Buffer1_ptr;
2545                        memset(rxdp, 0, sizeof(struct RxD3));
2546                        /* restore the buffer pointers for dma sync*/
2547                        rxdp3->Buffer0_ptr = Buffer0_ptr;
2548                        rxdp3->Buffer1_ptr = Buffer1_ptr;
2549
2550                        ba = &ring->ba[block_no][off];
2551                        skb_reserve(skb, BUF0_LEN);
2552                        tmp = (u64)(unsigned long)skb->data;
2553                        tmp += ALIGN_SIZE;
2554                        tmp &= ~ALIGN_SIZE;
2555                        skb->data = (void *) (unsigned long)tmp;
2556                        skb_reset_tail_pointer(skb);
2557
2558                        if (from_card_up) {
2559                                rxdp3->Buffer0_ptr =
2560                                        pci_map_single(ring->pdev, ba->ba_0,
2561                                                       BUF0_LEN,
2562                                                       PCI_DMA_FROMDEVICE);
2563                                if (pci_dma_mapping_error(nic->pdev,
2564                                                          rxdp3->Buffer0_ptr))
2565                                        goto pci_map_failed;
2566                        } else
2567                                pci_dma_sync_single_for_device(ring->pdev,
2568                                                               (dma_addr_t)rxdp3->Buffer0_ptr,
2569                                                               BUF0_LEN,
2570                                                               PCI_DMA_FROMDEVICE);
2571
2572                        rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2573                        if (ring->rxd_mode == RXD_MODE_3B) {
2574                                /* Two buffer mode */
2575
2576                                /*
2577                                 * Buffer2 will have L3/L4 header plus
2578                                 * L4 payload
2579                                 */
2580                                rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2581                                                                    skb->data,
2582                                                                    ring->mtu + 4,
2583                                                                    PCI_DMA_FROMDEVICE);
2584
2585                                if (pci_dma_mapping_error(nic->pdev,
2586                                                          rxdp3->Buffer2_ptr))
2587                                        goto pci_map_failed;
2588
2589                                if (from_card_up) {
2590                                        rxdp3->Buffer1_ptr =
2591                                                pci_map_single(ring->pdev,
2592                                                               ba->ba_1,
2593                                                               BUF1_LEN,
2594                                                               PCI_DMA_FROMDEVICE);
2595
2596                                        if (pci_dma_mapping_error(nic->pdev,
2597                                                                  rxdp3->Buffer1_ptr)) {
2598                                                pci_unmap_single(ring->pdev,
2599                                                                 (dma_addr_t)(unsigned long)
2600                                                                 skb->data,
2601                                                                 ring->mtu + 4,
2602                                                                 PCI_DMA_FROMDEVICE);
2603                                                goto pci_map_failed;
2604                                        }
2605                                }
2606                                rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2607                                rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2608                                        (ring->mtu + 4);
2609                        }
2610                        rxdp->Control_2 |= s2BIT(0);
2611                        rxdp->Host_Control = (unsigned long) (skb);
2612                }
2613                if (alloc_tab & ((1 << rxsync_frequency) - 1))
2614                        rxdp->Control_1 |= RXD_OWN_XENA;
2615                off++;
2616                if (off == (ring->rxd_count + 1))
2617                        off = 0;
2618                ring->rx_curr_put_info.offset = off;
2619
2620                rxdp->Control_2 |= SET_RXD_MARKER;
2621                if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2622                        if (first_rxdp) {
2623                                dma_wmb();
2624                                first_rxdp->Control_1 |= RXD_OWN_XENA;
2625                        }
2626                        first_rxdp = rxdp;
2627                }
2628                ring->rx_bufs_left += 1;
2629                alloc_tab++;
2630        }
2631
2632end:
2633        /* Transfer ownership of first descriptor to adapter just before
2634         * exiting. Before that, use memory barrier so that ownership
2635         * and other fields are seen by adapter correctly.
2636         */
2637        if (first_rxdp) {
2638                dma_wmb();
2639                first_rxdp->Control_1 |= RXD_OWN_XENA;
2640        }
2641
2642        return SUCCESS;
2643
2644pci_map_failed:
2645        swstats->pci_map_fail_cnt++;
2646        swstats->mem_freed += skb->truesize;
2647        dev_kfree_skb_irq(skb);
2648        return -ENOMEM;
2649}
2650
2651static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2652{
2653        struct net_device *dev = sp->dev;
2654        int j;
2655        struct sk_buff *skb;
2656        struct RxD_t *rxdp;
2657        struct RxD1 *rxdp1;
2658        struct RxD3 *rxdp3;
2659        struct mac_info *mac_control = &sp->mac_control;
2660        struct stat_block *stats = mac_control->stats_info;
2661        struct swStat *swstats = &stats->sw_stat;
2662
2663        for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2664                rxdp = mac_control->rings[ring_no].
2665                        rx_blocks[blk].rxds[j].virt_addr;
2666                skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2667                if (!skb)
2668                        continue;
2669                if (sp->rxd_mode == RXD_MODE_1) {
2670                        rxdp1 = (struct RxD1 *)rxdp;
2671                        pci_unmap_single(sp->pdev,
2672                                         (dma_addr_t)rxdp1->Buffer0_ptr,
2673                                         dev->mtu +
2674                                         HEADER_ETHERNET_II_802_3_SIZE +
2675                                         HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2676                                         PCI_DMA_FROMDEVICE);
2677                        memset(rxdp, 0, sizeof(struct RxD1));
2678                } else if (sp->rxd_mode == RXD_MODE_3B) {
2679                        rxdp3 = (struct RxD3 *)rxdp;
2680                        pci_unmap_single(sp->pdev,
2681                                         (dma_addr_t)rxdp3->Buffer0_ptr,
2682                                         BUF0_LEN,
2683                                         PCI_DMA_FROMDEVICE);
2684                        pci_unmap_single(sp->pdev,
2685                                         (dma_addr_t)rxdp3->Buffer1_ptr,
2686                                         BUF1_LEN,
2687                                         PCI_DMA_FROMDEVICE);
2688                        pci_unmap_single(sp->pdev,
2689                                         (dma_addr_t)rxdp3->Buffer2_ptr,
2690                                         dev->mtu + 4,
2691                                         PCI_DMA_FROMDEVICE);
2692                        memset(rxdp, 0, sizeof(struct RxD3));
2693                }
2694                swstats->mem_freed += skb->truesize;
2695                dev_kfree_skb(skb);
2696                mac_control->rings[ring_no].rx_bufs_left -= 1;
2697        }
2698}
2699
2700/**
2701 *  free_rx_buffers - Frees all Rx buffers
2702 *  @sp: device private variable.
2703 *  Description:
2704 *  This function will free all Rx buffers allocated by host.
2705 *  Return Value:
2706 *  NONE.
2707 */
2708
2709static void free_rx_buffers(struct s2io_nic *sp)
2710{
2711        struct net_device *dev = sp->dev;
2712        int i, blk = 0, buf_cnt = 0;
2713        struct config_param *config = &sp->config;
2714        struct mac_info *mac_control = &sp->mac_control;
2715
2716        for (i = 0; i < config->rx_ring_num; i++) {
2717                struct ring_info *ring = &mac_control->rings[i];
2718
2719                for (blk = 0; blk < rx_ring_sz[i]; blk++)
2720                        free_rxd_blk(sp, i, blk);
2721
2722                ring->rx_curr_put_info.block_index = 0;
2723                ring->rx_curr_get_info.block_index = 0;
2724                ring->rx_curr_put_info.offset = 0;
2725                ring->rx_curr_get_info.offset = 0;
2726                ring->rx_bufs_left = 0;
2727                DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2728                          dev->name, buf_cnt, i);
2729        }
2730}
2731
2732static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2733{
2734        if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2735                DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2736                          ring->dev->name);
2737        }
2738        return 0;
2739}
2740
2741/**
2742 * s2io_poll - Rx interrupt handler for NAPI support
2743 * @napi : pointer to the napi structure.
2744 * @budget : The number of packets that were budgeted to be processed
2745 * during  one pass through the 'Poll" function.
2746 * Description:
2747 * Comes into picture only if NAPI support has been incorporated. It does
2748 * the same thing that rx_intr_handler does, but not in a interrupt context
2749 * also It will process only a given number of packets.
2750 * Return value:
2751 * 0 on success and 1 if there are No Rx packets to be processed.
2752 */
2753
2754static int s2io_poll_msix(struct napi_struct *napi, int budget)
2755{
2756        struct ring_info *ring = container_of(napi, struct ring_info, napi);
2757        struct net_device *dev = ring->dev;
2758        int pkts_processed = 0;
2759        u8 __iomem *addr = NULL;
2760        u8 val8 = 0;
2761        struct s2io_nic *nic = netdev_priv(dev);
2762        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2763        int budget_org = budget;
2764
2765        if (unlikely(!is_s2io_card_up(nic)))
2766                return 0;
2767
2768        pkts_processed = rx_intr_handler(ring, budget);
2769        s2io_chk_rx_buffers(nic, ring);
2770
2771        if (pkts_processed < budget_org) {
2772                napi_complete_done(napi, pkts_processed);
2773                /*Re Enable MSI-Rx Vector*/
2774                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2775                addr += 7 - ring->ring_no;
2776                val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2777                writeb(val8, addr);
2778                val8 = readb(addr);
2779        }
2780        return pkts_processed;
2781}
2782
2783static int s2io_poll_inta(struct napi_struct *napi, int budget)
2784{
2785        struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2786        int pkts_processed = 0;
2787        int ring_pkts_processed, i;
2788        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2789        int budget_org = budget;
2790        struct config_param *config = &nic->config;
2791        struct mac_info *mac_control = &nic->mac_control;
2792
2793        if (unlikely(!is_s2io_card_up(nic)))
2794                return 0;
2795
2796        for (i = 0; i < config->rx_ring_num; i++) {
2797                struct ring_info *ring = &mac_control->rings[i];
2798                ring_pkts_processed = rx_intr_handler(ring, budget);
2799                s2io_chk_rx_buffers(nic, ring);
2800                pkts_processed += ring_pkts_processed;
2801                budget -= ring_pkts_processed;
2802                if (budget <= 0)
2803                        break;
2804        }
2805        if (pkts_processed < budget_org) {
2806                napi_complete_done(napi, pkts_processed);
2807                /* Re enable the Rx interrupts for the ring */
2808                writeq(0, &bar0->rx_traffic_mask);
2809                readl(&bar0->rx_traffic_mask);
2810        }
2811        return pkts_processed;
2812}
2813
2814#ifdef CONFIG_NET_POLL_CONTROLLER
2815/**
2816 * s2io_netpoll - netpoll event handler entry point
2817 * @dev : pointer to the device structure.
2818 * Description:
2819 *      This function will be called by upper layer to check for events on the
2820 * interface in situations where interrupts are disabled. It is used for
2821 * specific in-kernel networking tasks, such as remote consoles and kernel
2822 * debugging over the network (example netdump in RedHat).
2823 */
2824static void s2io_netpoll(struct net_device *dev)
2825{
2826        struct s2io_nic *nic = netdev_priv(dev);
2827        const int irq = nic->pdev->irq;
2828        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2829        u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2830        int i;
2831        struct config_param *config = &nic->config;
2832        struct mac_info *mac_control = &nic->mac_control;
2833
2834        if (pci_channel_offline(nic->pdev))
2835                return;
2836
2837        disable_irq(irq);
2838
2839        writeq(val64, &bar0->rx_traffic_int);
2840        writeq(val64, &bar0->tx_traffic_int);
2841
2842        /* we need to free up the transmitted skbufs or else netpoll will
2843         * run out of skbs and will fail and eventually netpoll application such
2844         * as netdump will fail.
2845         */
2846        for (i = 0; i < config->tx_fifo_num; i++)
2847                tx_intr_handler(&mac_control->fifos[i]);
2848
2849        /* check for received packet and indicate up to network */
2850        for (i = 0; i < config->rx_ring_num; i++) {
2851                struct ring_info *ring = &mac_control->rings[i];
2852
2853                rx_intr_handler(ring, 0);
2854        }
2855
2856        for (i = 0; i < config->rx_ring_num; i++) {
2857                struct ring_info *ring = &mac_control->rings[i];
2858
2859                if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2860                        DBG_PRINT(INFO_DBG,
2861                                  "%s: Out of memory in Rx Netpoll!!\n",
2862                                  dev->name);
2863                        break;
2864                }
2865        }
2866        enable_irq(irq);
2867}
2868#endif
2869
2870/**
2871 *  rx_intr_handler - Rx interrupt handler
2872 *  @ring_info: per ring structure.
2873 *  @budget: budget for napi processing.
2874 *  Description:
2875 *  If the interrupt is because of a received frame or if the
2876 *  receive ring contains fresh as yet un-processed frames,this function is
2877 *  called. It picks out the RxD at which place the last Rx processing had
2878 *  stopped and sends the skb to the OSM's Rx handler and then increments
2879 *  the offset.
2880 *  Return Value:
2881 *  No. of napi packets processed.
2882 */
2883static int rx_intr_handler(struct ring_info *ring_data, int budget)
2884{
2885        int get_block, put_block;
2886        struct rx_curr_get_info get_info, put_info;
2887        struct RxD_t *rxdp;
2888        struct sk_buff *skb;
2889        int pkt_cnt = 0, napi_pkts = 0;
2890        int i;
2891        struct RxD1 *rxdp1;
2892        struct RxD3 *rxdp3;
2893
2894        if (budget <= 0)
2895                return napi_pkts;
2896
2897        get_info = ring_data->rx_curr_get_info;
2898        get_block = get_info.block_index;
2899        memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2900        put_block = put_info.block_index;
2901        rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2902
2903        while (RXD_IS_UP2DT(rxdp)) {
2904                /*
2905                 * If your are next to put index then it's
2906                 * FIFO full condition
2907                 */
2908                if ((get_block == put_block) &&
2909                    (get_info.offset + 1) == put_info.offset) {
2910                        DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2911                                  ring_data->dev->name);
2912                        break;
2913                }
2914                skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2915                if (skb == NULL) {
2916                        DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2917                                  ring_data->dev->name);
2918                        return 0;
2919                }
2920                if (ring_data->rxd_mode == RXD_MODE_1) {
2921                        rxdp1 = (struct RxD1 *)rxdp;
2922                        pci_unmap_single(ring_data->pdev, (dma_addr_t)
2923                                         rxdp1->Buffer0_ptr,
2924                                         ring_data->mtu +
2925                                         HEADER_ETHERNET_II_802_3_SIZE +
2926                                         HEADER_802_2_SIZE +
2927                                         HEADER_SNAP_SIZE,
2928                                         PCI_DMA_FROMDEVICE);
2929                } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2930                        rxdp3 = (struct RxD3 *)rxdp;
2931                        pci_dma_sync_single_for_cpu(ring_data->pdev,
2932                                                    (dma_addr_t)rxdp3->Buffer0_ptr,
2933                                                    BUF0_LEN,
2934                                                    PCI_DMA_FROMDEVICE);
2935                        pci_unmap_single(ring_data->pdev,
2936                                         (dma_addr_t)rxdp3->Buffer2_ptr,
2937                                         ring_data->mtu + 4,
2938                                         PCI_DMA_FROMDEVICE);
2939                }
2940                prefetch(skb->data);
2941                rx_osm_handler(ring_data, rxdp);
2942                get_info.offset++;
2943                ring_data->rx_curr_get_info.offset = get_info.offset;
2944                rxdp = ring_data->rx_blocks[get_block].
2945                        rxds[get_info.offset].virt_addr;
2946                if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2947                        get_info.offset = 0;
2948                        ring_data->rx_curr_get_info.offset = get_info.offset;
2949                        get_block++;
2950                        if (get_block == ring_data->block_count)
2951                                get_block = 0;
2952                        ring_data->rx_curr_get_info.block_index = get_block;
2953                        rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2954                }
2955
2956                if (ring_data->nic->config.napi) {
2957                        budget--;
2958                        napi_pkts++;
2959                        if (!budget)
2960                                break;
2961                }
2962                pkt_cnt++;
2963                if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2964                        break;
2965        }
2966        if (ring_data->lro) {
2967                /* Clear all LRO sessions before exiting */
2968                for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2969                        struct lro *lro = &ring_data->lro0_n[i];
2970                        if (lro->in_use) {
2971                                update_L3L4_header(ring_data->nic, lro);
2972                                queue_rx_frame(lro->parent, lro->vlan_tag);
2973                                clear_lro_session(lro);
2974                        }
2975                }
2976        }
2977        return napi_pkts;
2978}
2979
2980/**
2981 *  tx_intr_handler - Transmit interrupt handler
2982 *  @nic : device private variable
2983 *  Description:
2984 *  If an interrupt was raised to indicate DMA complete of the
2985 *  Tx packet, this function is called. It identifies the last TxD
2986 *  whose buffer was freed and frees all skbs whose data have already
2987 *  DMA'ed into the NICs internal memory.
2988 *  Return Value:
2989 *  NONE
2990 */
2991
2992static void tx_intr_handler(struct fifo_info *fifo_data)
2993{
2994        struct s2io_nic *nic = fifo_data->nic;
2995        struct tx_curr_get_info get_info, put_info;
2996        struct sk_buff *skb = NULL;
2997        struct TxD *txdlp;
2998        int pkt_cnt = 0;
2999        unsigned long flags = 0;
3000        u8 err_mask;
3001        struct stat_block *stats = nic->mac_control.stats_info;
3002        struct swStat *swstats = &stats->sw_stat;
3003
3004        if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3005                return;
3006
3007        get_info = fifo_data->tx_curr_get_info;
3008        memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3009        txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3010        while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3011               (get_info.offset != put_info.offset) &&
3012               (txdlp->Host_Control)) {
3013                /* Check for TxD errors */
3014                if (txdlp->Control_1 & TXD_T_CODE) {
3015                        unsigned long long err;
3016                        err = txdlp->Control_1 & TXD_T_CODE;
3017                        if (err & 0x1) {
3018                                swstats->parity_err_cnt++;
3019                        }
3020
3021                        /* update t_code statistics */
3022                        err_mask = err >> 48;
3023                        switch (err_mask) {
3024                        case 2:
3025                                swstats->tx_buf_abort_cnt++;
3026                                break;
3027
3028                        case 3:
3029                                swstats->tx_desc_abort_cnt++;
3030                                break;
3031
3032                        case 7:
3033                                swstats->tx_parity_err_cnt++;
3034                                break;
3035
3036                        case 10:
3037                                swstats->tx_link_loss_cnt++;
3038                                break;
3039
3040                        case 15:
3041                                swstats->tx_list_proc_err_cnt++;
3042                                break;
3043                        }
3044                }
3045
3046                skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3047                if (skb == NULL) {
3048                        spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3049                        DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3050                                  __func__);
3051                        return;
3052                }
3053                pkt_cnt++;
3054
3055                /* Updating the statistics block */
3056                swstats->mem_freed += skb->truesize;
3057                dev_kfree_skb_irq(skb);
3058
3059                get_info.offset++;
3060                if (get_info.offset == get_info.fifo_len + 1)
3061                        get_info.offset = 0;
3062                txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3063                fifo_data->tx_curr_get_info.offset = get_info.offset;
3064        }
3065
3066        s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3067
3068        spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3069}
3070
3071/**
3072 *  s2io_mdio_write - Function to write in to MDIO registers
3073 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3074 *  @addr     : address value
3075 *  @value    : data value
3076 *  @dev      : pointer to net_device structure
3077 *  Description:
3078 *  This function is used to write values to the MDIO registers
3079 *  NONE
3080 */
3081static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3082                            struct net_device *dev)
3083{
3084        u64 val64;
3085        struct s2io_nic *sp = netdev_priv(dev);
3086        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3087
3088        /* address transaction */
3089        val64 = MDIO_MMD_INDX_ADDR(addr) |
3090                MDIO_MMD_DEV_ADDR(mmd_type) |
3091                MDIO_MMS_PRT_ADDR(0x0);
3092        writeq(val64, &bar0->mdio_control);
3093        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3094        writeq(val64, &bar0->mdio_control);
3095        udelay(100);
3096
3097        /* Data transaction */
3098        val64 = MDIO_MMD_INDX_ADDR(addr) |
3099                MDIO_MMD_DEV_ADDR(mmd_type) |
3100                MDIO_MMS_PRT_ADDR(0x0) |
3101                MDIO_MDIO_DATA(value) |
3102                MDIO_OP(MDIO_OP_WRITE_TRANS);
3103        writeq(val64, &bar0->mdio_control);
3104        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3105        writeq(val64, &bar0->mdio_control);
3106        udelay(100);
3107
3108        val64 = MDIO_MMD_INDX_ADDR(addr) |
3109                MDIO_MMD_DEV_ADDR(mmd_type) |
3110                MDIO_MMS_PRT_ADDR(0x0) |
3111                MDIO_OP(MDIO_OP_READ_TRANS);
3112        writeq(val64, &bar0->mdio_control);
3113        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3114        writeq(val64, &bar0->mdio_control);
3115        udelay(100);
3116}
3117
3118/**
3119 *  s2io_mdio_read - Function to write in to MDIO registers
3120 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3121 *  @addr     : address value
3122 *  @dev      : pointer to net_device structure
3123 *  Description:
3124 *  This function is used to read values to the MDIO registers
3125 *  NONE
3126 */
3127static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3128{
3129        u64 val64 = 0x0;
3130        u64 rval64 = 0x0;
3131        struct s2io_nic *sp = netdev_priv(dev);
3132        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3133
3134        /* address transaction */
3135        val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3136                         | MDIO_MMD_DEV_ADDR(mmd_type)
3137                         | MDIO_MMS_PRT_ADDR(0x0));
3138        writeq(val64, &bar0->mdio_control);
3139        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3140        writeq(val64, &bar0->mdio_control);
3141        udelay(100);
3142
3143        /* Data transaction */
3144        val64 = MDIO_MMD_INDX_ADDR(addr) |
3145                MDIO_MMD_DEV_ADDR(mmd_type) |
3146                MDIO_MMS_PRT_ADDR(0x0) |
3147                MDIO_OP(MDIO_OP_READ_TRANS);
3148        writeq(val64, &bar0->mdio_control);
3149        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3150        writeq(val64, &bar0->mdio_control);
3151        udelay(100);
3152
3153        /* Read the value from regs */
3154        rval64 = readq(&bar0->mdio_control);
3155        rval64 = rval64 & 0xFFFF0000;
3156        rval64 = rval64 >> 16;
3157        return rval64;
3158}
3159
3160/**
3161 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3162 *  @counter      : counter value to be updated
3163 *  @flag         : flag to indicate the status
3164 *  @type         : counter type
3165 *  Description:
3166 *  This function is to check the status of the xpak counters value
3167 *  NONE
3168 */
3169
3170static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3171                                  u16 flag, u16 type)
3172{
3173        u64 mask = 0x3;
3174        u64 val64;
3175        int i;
3176        for (i = 0; i < index; i++)
3177                mask = mask << 0x2;
3178
3179        if (flag > 0) {
3180                *counter = *counter + 1;
3181                val64 = *regs_stat & mask;
3182                val64 = val64 >> (index * 0x2);
3183                val64 = val64 + 1;
3184                if (val64 == 3) {
3185                        switch (type) {
3186                        case 1:
3187                                DBG_PRINT(ERR_DBG,
3188                                          "Take Xframe NIC out of service.\n");
3189                                DBG_PRINT(ERR_DBG,
3190"Excessive temperatures may result in premature transceiver failure.\n");
3191                                break;
3192                        case 2:
3193                                DBG_PRINT(ERR_DBG,
3194                                          "Take Xframe NIC out of service.\n");
3195                                DBG_PRINT(ERR_DBG,
3196"Excessive bias currents may indicate imminent laser diode failure.\n");
3197                                break;
3198                        case 3:
3199                                DBG_PRINT(ERR_DBG,
3200                                          "Take Xframe NIC out of service.\n");
3201                                DBG_PRINT(ERR_DBG,
3202"Excessive laser output power may saturate far-end receiver.\n");
3203                                break;
3204                        default:
3205                                DBG_PRINT(ERR_DBG,
3206                                          "Incorrect XPAK Alarm type\n");
3207                        }
3208                        val64 = 0x0;
3209                }
3210                val64 = val64 << (index * 0x2);
3211                *regs_stat = (*regs_stat & (~mask)) | (val64);
3212
3213        } else {
3214                *regs_stat = *regs_stat & (~mask);
3215        }
3216}
3217
3218/**
3219 *  s2io_updt_xpak_counter - Function to update the xpak counters
3220 *  @dev         : pointer to net_device struct
3221 *  Description:
3222 *  This function is to upate the status of the xpak counters value
3223 *  NONE
3224 */
3225static void s2io_updt_xpak_counter(struct net_device *dev)
3226{
3227        u16 flag  = 0x0;
3228        u16 type  = 0x0;
3229        u16 val16 = 0x0;
3230        u64 val64 = 0x0;
3231        u64 addr  = 0x0;
3232
3233        struct s2io_nic *sp = netdev_priv(dev);
3234        struct stat_block *stats = sp->mac_control.stats_info;
3235        struct xpakStat *xstats = &stats->xpak_stat;
3236
3237        /* Check the communication with the MDIO slave */
3238        addr = MDIO_CTRL1;
3239        val64 = 0x0;
3240        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3241        if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3242                DBG_PRINT(ERR_DBG,
3243                          "ERR: MDIO slave access failed - Returned %llx\n",
3244                          (unsigned long long)val64);
3245                return;
3246        }
3247
3248        /* Check for the expected value of control reg 1 */
3249        if (val64 != MDIO_CTRL1_SPEED10G) {
3250                DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3251                          "Returned: %llx- Expected: 0x%x\n",
3252                          (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3253                return;
3254        }
3255
3256        /* Loading the DOM register to MDIO register */
3257        addr = 0xA100;
3258        s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3259        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3260
3261        /* Reading the Alarm flags */
3262        addr = 0xA070;
3263        val64 = 0x0;
3264        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3265
3266        flag = CHECKBIT(val64, 0x7);
3267        type = 1;
3268        s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3269                              &xstats->xpak_regs_stat,
3270                              0x0, flag, type);
3271
3272        if (CHECKBIT(val64, 0x6))
3273                xstats->alarm_transceiver_temp_low++;
3274
3275        flag = CHECKBIT(val64, 0x3);
3276        type = 2;
3277        s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3278                              &xstats->xpak_regs_stat,
3279                              0x2, flag, type);
3280
3281        if (CHECKBIT(val64, 0x2))
3282                xstats->alarm_laser_bias_current_low++;
3283
3284        flag = CHECKBIT(val64, 0x1);
3285        type = 3;
3286        s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3287                              &xstats->xpak_regs_stat,
3288                              0x4, flag, type);
3289
3290        if (CHECKBIT(val64, 0x0))
3291                xstats->alarm_laser_output_power_low++;
3292
3293        /* Reading the Warning flags */
3294        addr = 0xA074;
3295        val64 = 0x0;
3296        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3297
3298        if (CHECKBIT(val64, 0x7))
3299                xstats->warn_transceiver_temp_high++;
3300
3301        if (CHECKBIT(val64, 0x6))
3302                xstats->warn_transceiver_temp_low++;
3303
3304        if (CHECKBIT(val64, 0x3))
3305                xstats->warn_laser_bias_current_high++;
3306
3307        if (CHECKBIT(val64, 0x2))
3308                xstats->warn_laser_bias_current_low++;
3309
3310        if (CHECKBIT(val64, 0x1))
3311                xstats->warn_laser_output_power_high++;
3312
3313        if (CHECKBIT(val64, 0x0))
3314                xstats->warn_laser_output_power_low++;
3315}
3316
3317/**
3318 *  wait_for_cmd_complete - waits for a command to complete.
3319 *  @sp : private member of the device structure, which is a pointer to the
3320 *  s2io_nic structure.
3321 *  Description: Function that waits for a command to Write into RMAC
3322 *  ADDR DATA registers to be completed and returns either success or
3323 *  error depending on whether the command was complete or not.
3324 *  Return value:
3325 *   SUCCESS on success and FAILURE on failure.
3326 */
3327
3328static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3329                                 int bit_state)
3330{
3331        int ret = FAILURE, cnt = 0, delay = 1;
3332        u64 val64;
3333
3334        if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3335                return FAILURE;
3336
3337        do {
3338                val64 = readq(addr);
3339                if (bit_state == S2IO_BIT_RESET) {
3340                        if (!(val64 & busy_bit)) {
3341                                ret = SUCCESS;
3342                                break;
3343                        }
3344                } else {
3345                        if (val64 & busy_bit) {
3346                                ret = SUCCESS;
3347                                break;
3348                        }
3349                }
3350
3351                if (in_interrupt())
3352                        mdelay(delay);
3353                else
3354                        msleep(delay);
3355
3356                if (++cnt >= 10)
3357                        delay = 50;
3358        } while (cnt < 20);
3359        return ret;
3360}
3361/**
3362 * check_pci_device_id - Checks if the device id is supported
3363 * @id : device id
3364 * Description: Function to check if the pci device id is supported by driver.
3365 * Return value: Actual device id if supported else PCI_ANY_ID
3366 */
3367static u16 check_pci_device_id(u16 id)
3368{
3369        switch (id) {
3370        case PCI_DEVICE_ID_HERC_WIN:
3371        case PCI_DEVICE_ID_HERC_UNI:
3372                return XFRAME_II_DEVICE;
3373        case PCI_DEVICE_ID_S2IO_UNI:
3374        case PCI_DEVICE_ID_S2IO_WIN:
3375                return XFRAME_I_DEVICE;
3376        default:
3377                return PCI_ANY_ID;
3378        }
3379}
3380
3381/**
3382 *  s2io_reset - Resets the card.
3383 *  @sp : private member of the device structure.
3384 *  Description: Function to Reset the card. This function then also
3385 *  restores the previously saved PCI configuration space registers as
3386 *  the card reset also resets the configuration space.
3387 *  Return value:
3388 *  void.
3389 */
3390
3391static void s2io_reset(struct s2io_nic *sp)
3392{
3393        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3394        u64 val64;
3395        u16 subid, pci_cmd;
3396        int i;
3397        u16 val16;
3398        unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3399        unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3400        struct stat_block *stats;
3401        struct swStat *swstats;
3402
3403        DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3404                  __func__, pci_name(sp->pdev));
3405
3406        /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3407        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3408
3409        val64 = SW_RESET_ALL;
3410        writeq(val64, &bar0->sw_reset);
3411        if (strstr(sp->product_name, "CX4"))
3412                msleep(750);
3413        msleep(250);
3414        for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3415
3416                /* Restore the PCI state saved during initialization. */
3417                pci_restore_state(sp->pdev);
3418                pci_save_state(sp->pdev);
3419                pci_read_config_word(sp->pdev, 0x2, &val16);
3420                if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3421                        break;
3422                msleep(200);
3423        }
3424
3425        if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3426                DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3427
3428        pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3429
3430        s2io_init_pci(sp);
3431
3432        /* Set swapper to enable I/O register access */
3433        s2io_set_swapper(sp);
3434
3435        /* restore mac_addr entries */
3436        do_s2io_restore_unicast_mc(sp);
3437
3438        /* Restore the MSIX table entries from local variables */
3439        restore_xmsi_data(sp);
3440
3441        /* Clear certain PCI/PCI-X fields after reset */
3442        if (sp->device_type == XFRAME_II_DEVICE) {
3443                /* Clear "detected parity error" bit */
3444                pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3445
3446                /* Clearing PCIX Ecc status register */
3447                pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3448
3449                /* Clearing PCI_STATUS error reflected here */
3450                writeq(s2BIT(62), &bar0->txpic_int_reg);
3451        }
3452
3453        /* Reset device statistics maintained by OS */
3454        memset(&sp->stats, 0, sizeof(struct net_device_stats));
3455
3456        stats = sp->mac_control.stats_info;
3457        swstats = &stats->sw_stat;
3458
3459        /* save link up/down time/cnt, reset/memory/watchdog cnt */
3460        up_cnt = swstats->link_up_cnt;
3461        down_cnt = swstats->link_down_cnt;
3462        up_time = swstats->link_up_time;
3463        down_time = swstats->link_down_time;
3464        reset_cnt = swstats->soft_reset_cnt;
3465        mem_alloc_cnt = swstats->mem_allocated;
3466        mem_free_cnt = swstats->mem_freed;
3467        watchdog_cnt = swstats->watchdog_timer_cnt;
3468
3469        memset(stats, 0, sizeof(struct stat_block));
3470
3471        /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3472        swstats->link_up_cnt = up_cnt;
3473        swstats->link_down_cnt = down_cnt;
3474        swstats->link_up_time = up_time;
3475        swstats->link_down_time = down_time;
3476        swstats->soft_reset_cnt = reset_cnt;
3477        swstats->mem_allocated = mem_alloc_cnt;
3478        swstats->mem_freed = mem_free_cnt;
3479        swstats->watchdog_timer_cnt = watchdog_cnt;
3480
3481        /* SXE-002: Configure link and activity LED to turn it off */
3482        subid = sp->pdev->subsystem_device;
3483        if (((subid & 0xFF) >= 0x07) &&
3484            (sp->device_type == XFRAME_I_DEVICE)) {
3485                val64 = readq(&bar0->gpio_control);
3486                val64 |= 0x0000800000000000ULL;
3487                writeq(val64, &bar0->gpio_control);
3488                val64 = 0x0411040400000000ULL;
3489                writeq(val64, (void __iomem *)bar0 + 0x2700);
3490        }
3491
3492        /*
3493         * Clear spurious ECC interrupts that would have occurred on
3494         * XFRAME II cards after reset.
3495         */
3496        if (sp->device_type == XFRAME_II_DEVICE) {
3497                val64 = readq(&bar0->pcc_err_reg);
3498                writeq(val64, &bar0->pcc_err_reg);
3499        }
3500
3501        sp->device_enabled_once = false;
3502}
3503
3504/**
3505 *  s2io_set_swapper - to set the swapper controle on the card
3506 *  @sp : private member of the device structure,
3507 *  pointer to the s2io_nic structure.
3508 *  Description: Function to set the swapper control on the card
3509 *  correctly depending on the 'endianness' of the system.
3510 *  Return value:
3511 *  SUCCESS on success and FAILURE on failure.
3512 */
3513
3514static int s2io_set_swapper(struct s2io_nic *sp)
3515{
3516        struct net_device *dev = sp->dev;
3517        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3518        u64 val64, valt, valr;
3519
3520        /*
3521         * Set proper endian settings and verify the same by reading
3522         * the PIF Feed-back register.
3523         */
3524
3525        val64 = readq(&bar0->pif_rd_swapper_fb);
3526        if (val64 != 0x0123456789ABCDEFULL) {
3527                int i = 0;
3528                static const u64 value[] = {
3529                        0xC30000C3C30000C3ULL,  /* FE=1, SE=1 */
3530                        0x8100008181000081ULL,  /* FE=1, SE=0 */
3531                        0x4200004242000042ULL,  /* FE=0, SE=1 */
3532                        0                       /* FE=0, SE=0 */
3533                };
3534
3535                while (i < 4) {
3536                        writeq(value[i], &bar0->swapper_ctrl);
3537                        val64 = readq(&bar0->pif_rd_swapper_fb);
3538                        if (val64 == 0x0123456789ABCDEFULL)
3539                                break;
3540                        i++;
3541                }
3542                if (i == 4) {
3543                        DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3544                                  "feedback read %llx\n",
3545                                  dev->name, (unsigned long long)val64);
3546                        return FAILURE;
3547                }
3548                valr = value[i];
3549        } else {
3550                valr = readq(&bar0->swapper_ctrl);
3551        }
3552
3553        valt = 0x0123456789ABCDEFULL;
3554        writeq(valt, &bar0->xmsi_address);
3555        val64 = readq(&bar0->xmsi_address);
3556
3557        if (val64 != valt) {
3558                int i = 0;
3559                static const u64 value[] = {
3560                        0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3561                        0x0081810000818100ULL,  /* FE=1, SE=0 */
3562                        0x0042420000424200ULL,  /* FE=0, SE=1 */
3563                        0                       /* FE=0, SE=0 */
3564                };
3565
3566                while (i < 4) {
3567                        writeq((value[i] | valr), &bar0->swapper_ctrl);
3568                        writeq(valt, &bar0->xmsi_address);
3569                        val64 = readq(&bar0->xmsi_address);
3570                        if (val64 == valt)
3571                                break;
3572                        i++;
3573                }
3574                if (i == 4) {
3575                        unsigned long long x = val64;
3576                        DBG_PRINT(ERR_DBG,
3577                                  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3578                        return FAILURE;
3579                }
3580        }
3581        val64 = readq(&bar0->swapper_ctrl);
3582        val64 &= 0xFFFF000000000000ULL;
3583
3584#ifdef __BIG_ENDIAN
3585        /*
3586         * The device by default set to a big endian format, so a
3587         * big endian driver need not set anything.
3588         */
3589        val64 |= (SWAPPER_CTRL_TXP_FE |
3590                  SWAPPER_CTRL_TXP_SE |
3591                  SWAPPER_CTRL_TXD_R_FE |
3592                  SWAPPER_CTRL_TXD_W_FE |
3593                  SWAPPER_CTRL_TXF_R_FE |
3594                  SWAPPER_CTRL_RXD_R_FE |
3595                  SWAPPER_CTRL_RXD_W_FE |
3596                  SWAPPER_CTRL_RXF_W_FE |
3597                  SWAPPER_CTRL_XMSI_FE |
3598                  SWAPPER_CTRL_STATS_FE |
3599                  SWAPPER_CTRL_STATS_SE);
3600        if (sp->config.intr_type == INTA)
3601                val64 |= SWAPPER_CTRL_XMSI_SE;
3602        writeq(val64, &bar0->swapper_ctrl);
3603#else
3604        /*
3605         * Initially we enable all bits to make it accessible by the
3606         * driver, then we selectively enable only those bits that
3607         * we want to set.
3608         */
3609        val64 |= (SWAPPER_CTRL_TXP_FE |
3610                  SWAPPER_CTRL_TXP_SE |
3611                  SWAPPER_CTRL_TXD_R_FE |
3612                  SWAPPER_CTRL_TXD_R_SE |
3613                  SWAPPER_CTRL_TXD_W_FE |
3614                  SWAPPER_CTRL_TXD_W_SE |
3615                  SWAPPER_CTRL_TXF_R_FE |
3616                  SWAPPER_CTRL_RXD_R_FE |
3617                  SWAPPER_CTRL_RXD_R_SE |
3618                  SWAPPER_CTRL_RXD_W_FE |
3619                  SWAPPER_CTRL_RXD_W_SE |
3620                  SWAPPER_CTRL_RXF_W_FE |
3621                  SWAPPER_CTRL_XMSI_FE |
3622                  SWAPPER_CTRL_STATS_FE |
3623                  SWAPPER_CTRL_STATS_SE);
3624        if (sp->config.intr_type == INTA)
3625                val64 |= SWAPPER_CTRL_XMSI_SE;
3626        writeq(val64, &bar0->swapper_ctrl);
3627#endif
3628        val64 = readq(&bar0->swapper_ctrl);
3629
3630        /*
3631         * Verifying if endian settings are accurate by reading a
3632         * feedback register.
3633         */
3634        val64 = readq(&bar0->pif_rd_swapper_fb);
3635        if (val64 != 0x0123456789ABCDEFULL) {
3636                /* Endian settings are incorrect, calls for another dekko. */
3637                DBG_PRINT(ERR_DBG,
3638                          "%s: Endian settings are wrong, feedback read %llx\n",
3639                          dev->name, (unsigned long long)val64);
3640                return FAILURE;
3641        }
3642
3643        return SUCCESS;
3644}
3645
3646static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3647{
3648        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3649        u64 val64;
3650        int ret = 0, cnt = 0;
3651
3652        do {
3653                val64 = readq(&bar0->xmsi_access);
3654                if (!(val64 & s2BIT(15)))
3655                        break;
3656                mdelay(1);
3657                cnt++;
3658        } while (cnt < 5);
3659        if (cnt == 5) {
3660                DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3661                ret = 1;
3662        }
3663
3664        return ret;
3665}
3666
3667static void restore_xmsi_data(struct s2io_nic *nic)
3668{
3669        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3670        u64 val64;
3671        int i, msix_index;
3672
3673        if (nic->device_type == XFRAME_I_DEVICE)
3674                return;
3675
3676        for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3677                msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3678                writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3679                writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3680                val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3681                writeq(val64, &bar0->xmsi_access);
3682                if (wait_for_msix_trans(nic, msix_index)) {
3683                        DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3684                                  __func__, msix_index);
3685                        continue;
3686                }
3687        }
3688}
3689
3690static void store_xmsi_data(struct s2io_nic *nic)
3691{
3692        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3693        u64 val64, addr, data;
3694        int i, msix_index;
3695
3696        if (nic->device_type == XFRAME_I_DEVICE)
3697                return;
3698
3699        /* Store and display */
3700        for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3701                msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3702                val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3703                writeq(val64, &bar0->xmsi_access);
3704                if (wait_for_msix_trans(nic, msix_index)) {
3705                        DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3706                                  __func__, msix_index);
3707                        continue;
3708                }
3709                addr = readq(&bar0->xmsi_address);
3710                data = readq(&bar0->xmsi_data);
3711                if (addr && data) {
3712                        nic->msix_info[i].addr = addr;
3713                        nic->msix_info[i].data = data;
3714                }
3715        }
3716}
3717
3718static int s2io_enable_msi_x(struct s2io_nic *nic)
3719{
3720        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3721        u64 rx_mat;
3722        u16 msi_control; /* Temp variable */
3723        int ret, i, j, msix_indx = 1;
3724        int size;
3725        struct stat_block *stats = nic->mac_control.stats_info;
3726        struct swStat *swstats = &stats->sw_stat;
3727
3728        size = nic->num_entries * sizeof(struct msix_entry);
3729        nic->entries = kzalloc(size, GFP_KERNEL);
3730        if (!nic->entries) {
3731                DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3732                          __func__);
3733                swstats->mem_alloc_fail_cnt++;
3734                return -ENOMEM;
3735        }
3736        swstats->mem_allocated += size;
3737
3738        size = nic->num_entries * sizeof(struct s2io_msix_entry);
3739        nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3740        if (!nic->s2io_entries) {
3741                DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3742                          __func__);
3743                swstats->mem_alloc_fail_cnt++;
3744                kfree(nic->entries);
3745                swstats->mem_freed
3746                        += (nic->num_entries * sizeof(struct msix_entry));
3747                return -ENOMEM;
3748        }
3749        swstats->mem_allocated += size;
3750
3751        nic->entries[0].entry = 0;
3752        nic->s2io_entries[0].entry = 0;
3753        nic->s2io_entries[0].in_use = MSIX_FLG;
3754        nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3755        nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3756
3757        for (i = 1; i < nic->num_entries; i++) {
3758                nic->entries[i].entry = ((i - 1) * 8) + 1;
3759                nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3760                nic->s2io_entries[i].arg = NULL;
3761                nic->s2io_entries[i].in_use = 0;
3762        }
3763
3764        rx_mat = readq(&bar0->rx_mat);
3765        for (j = 0; j < nic->config.rx_ring_num; j++) {
3766                rx_mat |= RX_MAT_SET(j, msix_indx);
3767                nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3768                nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3769                nic->s2io_entries[j+1].in_use = MSIX_FLG;
3770                msix_indx += 8;
3771        }
3772        writeq(rx_mat, &bar0->rx_mat);
3773        readq(&bar0->rx_mat);
3774
3775        ret = pci_enable_msix_range(nic->pdev, nic->entries,
3776                                    nic->num_entries, nic->num_entries);
3777        /* We fail init if error or we get less vectors than min required */
3778        if (ret < 0) {
3779                DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3780                kfree(nic->entries);
3781                swstats->mem_freed += nic->num_entries *
3782                        sizeof(struct msix_entry);
3783                kfree(nic->s2io_entries);
3784                swstats->mem_freed += nic->num_entries *
3785                        sizeof(struct s2io_msix_entry);
3786                nic->entries = NULL;
3787                nic->s2io_entries = NULL;
3788                return -ENOMEM;
3789        }
3790
3791        /*
3792         * To enable MSI-X, MSI also needs to be enabled, due to a bug
3793         * in the herc NIC. (Temp change, needs to be removed later)
3794         */
3795        pci_read_config_word(nic->pdev, 0x42, &msi_control);
3796        msi_control |= 0x1; /* Enable MSI */
3797        pci_write_config_word(nic->pdev, 0x42, msi_control);
3798
3799        return 0;
3800}
3801
3802/* Handle software interrupt used during MSI(X) test */
3803static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3804{
3805        struct s2io_nic *sp = dev_id;
3806
3807        sp->msi_detected = 1;
3808        wake_up(&sp->msi_wait);
3809
3810        return IRQ_HANDLED;
3811}
3812
3813/* Test interrupt path by forcing a a software IRQ */
3814static int s2io_test_msi(struct s2io_nic *sp)
3815{
3816        struct pci_dev *pdev = sp->pdev;
3817        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3818        int err;
3819        u64 val64, saved64;
3820
3821        err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3822                          sp->name, sp);
3823        if (err) {
3824                DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3825                          sp->dev->name, pci_name(pdev), pdev->irq);
3826                return err;
3827        }
3828
3829        init_waitqueue_head(&sp->msi_wait);
3830        sp->msi_detected = 0;
3831
3832        saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3833        val64 |= SCHED_INT_CTRL_ONE_SHOT;
3834        val64 |= SCHED_INT_CTRL_TIMER_EN;
3835        val64 |= SCHED_INT_CTRL_INT2MSI(1);
3836        writeq(val64, &bar0->scheduled_int_ctrl);
3837
3838        wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3839
3840        if (!sp->msi_detected) {
3841                /* MSI(X) test failed, go back to INTx mode */
3842                DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3843                          "using MSI(X) during test\n",
3844                          sp->dev->name, pci_name(pdev));
3845
3846                err = -EOPNOTSUPP;
3847        }
3848
3849        free_irq(sp->entries[1].vector, sp);
3850
3851        writeq(saved64, &bar0->scheduled_int_ctrl);
3852
3853        return err;
3854}
3855
3856static void remove_msix_isr(struct s2io_nic *sp)
3857{
3858        int i;
3859        u16 msi_control;
3860
3861        for (i = 0; i < sp->num_entries; i++) {
3862                if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3863                        int vector = sp->entries[i].vector;
3864                        void *arg = sp->s2io_entries[i].arg;
3865                        free_irq(vector, arg);
3866                }
3867        }
3868
3869        kfree(sp->entries);
3870        kfree(sp->s2io_entries);
3871        sp->entries = NULL;
3872        sp->s2io_entries = NULL;
3873
3874        pci_read_config_word(sp->pdev, 0x42, &msi_control);
3875        msi_control &= 0xFFFE; /* Disable MSI */
3876        pci_write_config_word(sp->pdev, 0x42, msi_control);
3877
3878        pci_disable_msix(sp->pdev);
3879}
3880
3881static void remove_inta_isr(struct s2io_nic *sp)
3882{
3883        free_irq(sp->pdev->irq, sp->dev);
3884}
3885
3886/* ********************************************************* *
3887 * Functions defined below concern the OS part of the driver *
3888 * ********************************************************* */
3889
3890/**
3891 *  s2io_open - open entry point of the driver
3892 *  @dev : pointer to the device structure.
3893 *  Description:
3894 *  This function is the open entry point of the driver. It mainly calls a
3895 *  function to allocate Rx buffers and inserts them into the buffer
3896 *  descriptors and then enables the Rx part of the NIC.
3897 *  Return value:
3898 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3899 *   file on failure.
3900 */
3901
3902static int s2io_open(struct net_device *dev)
3903{
3904        struct s2io_nic *sp = netdev_priv(dev);
3905        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3906        int err = 0;
3907
3908        /*
3909         * Make sure you have link off by default every time
3910         * Nic is initialized
3911         */
3912        netif_carrier_off(dev);
3913        sp->last_link_state = 0;
3914
3915        /* Initialize H/W and enable interrupts */
3916        err = s2io_card_up(sp);
3917        if (err) {
3918                DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3919                          dev->name);
3920                goto hw_init_failed;
3921        }
3922
3923        if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3924                DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3925                s2io_card_down(sp);
3926                err = -ENODEV;
3927                goto hw_init_failed;
3928        }
3929        s2io_start_all_tx_queue(sp);
3930        return 0;
3931
3932hw_init_failed:
3933        if (sp->config.intr_type == MSI_X) {
3934                if (sp->entries) {
3935                        kfree(sp->entries);
3936                        swstats->mem_freed += sp->num_entries *
3937                                sizeof(struct msix_entry);
3938                }
3939                if (sp->s2io_entries) {
3940                        kfree(sp->s2io_entries);
3941                        swstats->mem_freed += sp->num_entries *
3942                                sizeof(struct s2io_msix_entry);
3943                }
3944        }
3945        return err;
3946}
3947
3948/**
3949 *  s2io_close -close entry point of the driver
3950 *  @dev : device pointer.
3951 *  Description:
3952 *  This is the stop entry point of the driver. It needs to undo exactly
3953 *  whatever was done by the open entry point,thus it's usually referred to
3954 *  as the close function.Among other things this function mainly stops the
3955 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3956 *  Return value:
3957 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3958 *  file on failure.
3959 */
3960
3961static int s2io_close(struct net_device *dev)
3962{
3963        struct s2io_nic *sp = netdev_priv(dev);
3964        struct config_param *config = &sp->config;
3965        u64 tmp64;
3966        int offset;
3967
3968        /* Return if the device is already closed               *
3969         *  Can happen when s2io_card_up failed in change_mtu    *
3970         */
3971        if (!is_s2io_card_up(sp))
3972                return 0;
3973
3974        s2io_stop_all_tx_queue(sp);
3975        /* delete all populated mac entries */
3976        for (offset = 1; offset < config->max_mc_addr; offset++) {
3977                tmp64 = do_s2io_read_unicast_mc(sp, offset);
3978                if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3979                        do_s2io_delete_unicast_mc(sp, tmp64);
3980        }
3981
3982        s2io_card_down(sp);
3983
3984        return 0;
3985}
3986
3987/**
3988 *  s2io_xmit - Tx entry point of te driver
3989 *  @skb : the socket buffer containing the Tx data.
3990 *  @dev : device pointer.
3991 *  Description :
3992 *  This function is the Tx entry point of the driver. S2IO NIC supports
3993 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3994 *  NOTE: when device can't queue the pkt,just the trans_start variable will
3995 *  not be upadted.
3996 *  Return value:
3997 *  0 on success & 1 on failure.
3998 */
3999
4000static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4001{
4002        struct s2io_nic *sp = netdev_priv(dev);
4003        u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4004        register u64 val64;
4005        struct TxD *txdp;
4006        struct TxFIFO_element __iomem *tx_fifo;
4007        unsigned long flags = 0;
4008        u16 vlan_tag = 0;
4009        struct fifo_info *fifo = NULL;
4010        int offload_type;
4011        int enable_per_list_interrupt = 0;
4012        struct config_param *config = &sp->config;
4013        struct mac_info *mac_control = &sp->mac_control;
4014        struct stat_block *stats = mac_control->stats_info;
4015        struct swStat *swstats = &stats->sw_stat;
4016
4017        DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4018
4019        if (unlikely(skb->len <= 0)) {
4020                DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4021                dev_kfree_skb_any(skb);
4022                return NETDEV_TX_OK;
4023        }
4024
4025        if (!is_s2io_card_up(sp)) {
4026                DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4027                          dev->name);
4028                dev_kfree_skb_any(skb);
4029                return NETDEV_TX_OK;
4030        }
4031
4032        queue = 0;
4033        if (skb_vlan_tag_present(skb))
4034                vlan_tag = skb_vlan_tag_get(skb);
4035        if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4036                if (skb->protocol == htons(ETH_P_IP)) {
4037                        struct iphdr *ip;
4038                        struct tcphdr *th;
4039                        ip = ip_hdr(skb);
4040
4041                        if (!ip_is_fragment(ip)) {
4042                                th = (struct tcphdr *)(((unsigned char *)ip) +
4043                                                       ip->ihl*4);
4044
4045                                if (ip->protocol == IPPROTO_TCP) {
4046                                        queue_len = sp->total_tcp_fifos;
4047                                        queue = (ntohs(th->source) +
4048                                                 ntohs(th->dest)) &
4049                                                sp->fifo_selector[queue_len - 1];
4050                                        if (queue >= queue_len)
4051                                                queue = queue_len - 1;
4052                                } else if (ip->protocol == IPPROTO_UDP) {
4053                                        queue_len = sp->total_udp_fifos;
4054                                        queue = (ntohs(th->source) +
4055                                                 ntohs(th->dest)) &
4056                                                sp->fifo_selector[queue_len - 1];
4057                                        if (queue >= queue_len)
4058                                                queue = queue_len - 1;
4059                                        queue += sp->udp_fifo_idx;
4060                                        if (skb->len > 1024)
4061                                                enable_per_list_interrupt = 1;
4062                                }
4063                        }
4064                }
4065        } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4066                /* get fifo number based on skb->priority value */
4067                queue = config->fifo_mapping
4068                        [skb->priority & (MAX_TX_FIFOS - 1)];
4069        fifo = &mac_control->fifos[queue];
4070
4071        spin_lock_irqsave(&fifo->tx_lock, flags);
4072
4073        if (sp->config.multiq) {
4074                if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4075                        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4076                        return NETDEV_TX_BUSY;
4077                }
4078        } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4079                if (netif_queue_stopped(dev)) {
4080                        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4081                        return NETDEV_TX_BUSY;
4082                }
4083        }
4084
4085        put_off = (u16)fifo->tx_curr_put_info.offset;
4086        get_off = (u16)fifo->tx_curr_get_info.offset;
4087        txdp = fifo->list_info[put_off].list_virt_addr;
4088
4089        queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4090        /* Avoid "put" pointer going beyond "get" pointer */
4091        if (txdp->Host_Control ||
4092            ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4093                DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4094                s2io_stop_tx_queue(sp, fifo->fifo_no);
4095                dev_kfree_skb_any(skb);
4096                spin_unlock_irqrestore(&fifo->tx_lock, flags);
4097                return NETDEV_TX_OK;
4098        }
4099
4100        offload_type = s2io_offload_type(skb);
4101        if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4102                txdp->Control_1 |= TXD_TCP_LSO_EN;
4103                txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4104        }
4105        if (skb->ip_summed == CHECKSUM_PARTIAL) {
4106                txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4107                                    TXD_TX_CKO_TCP_EN |
4108                                    TXD_TX_CKO_UDP_EN);
4109        }
4110        txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4111        txdp->Control_1 |= TXD_LIST_OWN_XENA;
4112        txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4113        if (enable_per_list_interrupt)
4114                if (put_off & (queue_len >> 5))
4115                        txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4116        if (vlan_tag) {
4117                txdp->Control_2 |= TXD_VLAN_ENABLE;
4118                txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4119        }
4120
4121        frg_len = skb_headlen(skb);
4122        txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4123                                              frg_len, PCI_DMA_TODEVICE);
4124        if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4125                goto pci_map_failed;
4126
4127        txdp->Host_Control = (unsigned long)skb;
4128        txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4129
4130        frg_cnt = skb_shinfo(skb)->nr_frags;
4131        /* For fragmented SKB. */
4132        for (i = 0; i < frg_cnt; i++) {
4133                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4134                /* A '0' length fragment will be ignored */
4135                if (!skb_frag_size(frag))
4136                        continue;
4137                txdp++;
4138                txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4139                                                             frag, 0,
4140                                                             skb_frag_size(frag),
4141                                                             DMA_TO_DEVICE);
4142                txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4143        }
4144        txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4145
4146        tx_fifo = mac_control->tx_FIFO_start[queue];
4147        val64 = fifo->list_info[put_off].list_phy_addr;
4148        writeq(val64, &tx_fifo->TxDL_Pointer);
4149
4150        val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4151                 TX_FIFO_LAST_LIST);
4152        if (offload_type)
4153                val64 |= TX_FIFO_SPECIAL_FUNC;
4154
4155        writeq(val64, &tx_fifo->List_Control);
4156
4157        mmiowb();
4158
4159        put_off++;
4160        if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4161                put_off = 0;
4162        fifo->tx_curr_put_info.offset = put_off;
4163
4164        /* Avoid "put" pointer going beyond "get" pointer */
4165        if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4166                swstats->fifo_full_cnt++;
4167                DBG_PRINT(TX_DBG,
4168                          "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4169                          put_off, get_off);
4170                s2io_stop_tx_queue(sp, fifo->fifo_no);
4171        }
4172        swstats->mem_allocated += skb->truesize;
4173        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4174
4175        if (sp->config.intr_type == MSI_X)
4176                tx_intr_handler(fifo);
4177
4178        return NETDEV_TX_OK;
4179
4180pci_map_failed:
4181        swstats->pci_map_fail_cnt++;
4182        s2io_stop_tx_queue(sp, fifo->fifo_no);
4183        swstats->mem_freed += skb->truesize;
4184        dev_kfree_skb_any(skb);
4185        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4186        return NETDEV_TX_OK;
4187}
4188
4189static void
4190s2io_alarm_handle(struct timer_list *t)
4191{
4192        struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4193        struct net_device *dev = sp->dev;
4194
4195        s2io_handle_errors(dev);
4196        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4197}
4198
4199static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4200{
4201        struct ring_info *ring = (struct ring_info *)dev_id;
4202        struct s2io_nic *sp = ring->nic;
4203        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4204
4205        if (unlikely(!is_s2io_card_up(sp)))
4206                return IRQ_HANDLED;
4207
4208        if (sp->config.napi) {
4209                u8 __iomem *addr = NULL;
4210                u8 val8 = 0;
4211
4212                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4213                addr += (7 - ring->ring_no);
4214                val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4215                writeb(val8, addr);
4216                val8 = readb(addr);
4217                napi_schedule(&ring->napi);
4218        } else {
4219                rx_intr_handler(ring, 0);
4220                s2io_chk_rx_buffers(sp, ring);
4221        }
4222
4223        return IRQ_HANDLED;
4224}
4225
4226static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4227{
4228        int i;
4229        struct fifo_info *fifos = (struct fifo_info *)dev_id;
4230        struct s2io_nic *sp = fifos->nic;
4231        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4232        struct config_param *config  = &sp->config;
4233        u64 reason;
4234
4235        if (unlikely(!is_s2io_card_up(sp)))
4236                return IRQ_NONE;
4237
4238        reason = readq(&bar0->general_int_status);
4239        if (unlikely(reason == S2IO_MINUS_ONE))
4240                /* Nothing much can be done. Get out */
4241                return IRQ_HANDLED;
4242
4243        if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4244                writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4245
4246                if (reason & GEN_INTR_TXPIC)
4247                        s2io_txpic_intr_handle(sp);
4248
4249                if (reason & GEN_INTR_TXTRAFFIC)
4250                        writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4251
4252                for (i = 0; i < config->tx_fifo_num; i++)
4253                        tx_intr_handler(&fifos[i]);
4254
4255                writeq(sp->general_int_mask, &bar0->general_int_mask);
4256                readl(&bar0->general_int_status);
4257                return IRQ_HANDLED;
4258        }
4259        /* The interrupt was not raised by us */
4260        return IRQ_NONE;
4261}
4262
4263static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4264{
4265        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4266        u64 val64;
4267
4268        val64 = readq(&bar0->pic_int_status);
4269        if (val64 & PIC_INT_GPIO) {
4270                val64 = readq(&bar0->gpio_int_reg);
4271                if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4272                    (val64 & GPIO_INT_REG_LINK_UP)) {
4273                        /*
4274                         * This is unstable state so clear both up/down
4275                         * interrupt and adapter to re-evaluate the link state.
4276                         */
4277                        val64 |= GPIO_INT_REG_LINK_DOWN;
4278                        val64 |= GPIO_INT_REG_LINK_UP;
4279                        writeq(val64, &bar0->gpio_int_reg);
4280                        val64 = readq(&bar0->gpio_int_mask);
4281                        val64 &= ~(GPIO_INT_MASK_LINK_UP |
4282                                   GPIO_INT_MASK_LINK_DOWN);
4283                        writeq(val64, &bar0->gpio_int_mask);
4284                } else if (val64 & GPIO_INT_REG_LINK_UP) {
4285                        val64 = readq(&bar0->adapter_status);
4286                        /* Enable Adapter */
4287                        val64 = readq(&bar0->adapter_control);
4288                        val64 |= ADAPTER_CNTL_EN;
4289                        writeq(val64, &bar0->adapter_control);
4290                        val64 |= ADAPTER_LED_ON;
4291                        writeq(val64, &bar0->adapter_control);
4292                        if (!sp->device_enabled_once)
4293                                sp->device_enabled_once = 1;
4294
4295                        s2io_link(sp, LINK_UP);
4296                        /*
4297                         * unmask link down interrupt and mask link-up
4298                         * intr
4299                         */
4300                        val64 = readq(&bar0->gpio_int_mask);
4301                        val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4302                        val64 |= GPIO_INT_MASK_LINK_UP;
4303                        writeq(val64, &bar0->gpio_int_mask);
4304
4305                } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4306                        val64 = readq(&bar0->adapter_status);
4307                        s2io_link(sp, LINK_DOWN);
4308                        /* Link is down so unmaks link up interrupt */
4309                        val64 = readq(&bar0->gpio_int_mask);
4310                        val64 &= ~GPIO_INT_MASK_LINK_UP;
4311                        val64 |= GPIO_INT_MASK_LINK_DOWN;
4312                        writeq(val64, &bar0->gpio_int_mask);
4313
4314                        /* turn off LED */
4315                        val64 = readq(&bar0->adapter_control);
4316                        val64 = val64 & (~ADAPTER_LED_ON);
4317                        writeq(val64, &bar0->adapter_control);
4318                }
4319        }
4320        val64 = readq(&bar0->gpio_int_mask);
4321}
4322
4323/**
4324 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4325 *  @value: alarm bits
4326 *  @addr: address value
4327 *  @cnt: counter variable
4328 *  Description: Check for alarm and increment the counter
4329 *  Return Value:
4330 *  1 - if alarm bit set
4331 *  0 - if alarm bit is not set
4332 */
4333static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4334                                 unsigned long long *cnt)
4335{
4336        u64 val64;
4337        val64 = readq(addr);
4338        if (val64 & value) {
4339                writeq(val64, addr);
4340                (*cnt)++;
4341                return 1;
4342        }
4343        return 0;
4344
4345}
4346
4347/**
4348 *  s2io_handle_errors - Xframe error indication handler
4349 *  @nic: device private variable
4350 *  Description: Handle alarms such as loss of link, single or
4351 *  double ECC errors, critical and serious errors.
4352 *  Return Value:
4353 *  NONE
4354 */
4355static void s2io_handle_errors(void *dev_id)
4356{
4357        struct net_device *dev = (struct net_device *)dev_id;
4358        struct s2io_nic *sp = netdev_priv(dev);
4359        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4360        u64 temp64 = 0, val64 = 0;
4361        int i = 0;
4362
4363        struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4364        struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4365
4366        if (!is_s2io_card_up(sp))
4367                return;
4368
4369        if (pci_channel_offline(sp->pdev))
4370                return;
4371
4372        memset(&sw_stat->ring_full_cnt, 0,
4373               sizeof(sw_stat->ring_full_cnt));
4374
4375        /* Handling the XPAK counters update */
4376        if (stats->xpak_timer_count < 72000) {
4377                /* waiting for an hour */
4378                stats->xpak_timer_count++;
4379        } else {
4380                s2io_updt_xpak_counter(dev);
4381                /* reset the count to zero */
4382                stats->xpak_timer_count = 0;
4383        }
4384
4385        /* Handling link status change error Intr */
4386        if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4387                val64 = readq(&bar0->mac_rmac_err_reg);
4388                writeq(val64, &bar0->mac_rmac_err_reg);
4389                if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4390                        schedule_work(&sp->set_link_task);
4391        }
4392
4393        /* In case of a serious error, the device will be Reset. */
4394        if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4395                                  &sw_stat->serious_err_cnt))
4396                goto reset;
4397
4398        /* Check for data parity error */
4399        if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4400                                  &sw_stat->parity_err_cnt))
4401                goto reset;
4402
4403        /* Check for ring full counter */
4404        if (sp->device_type == XFRAME_II_DEVICE) {
4405                val64 = readq(&bar0->ring_bump_counter1);
4406                for (i = 0; i < 4; i++) {
4407                        temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4408                        temp64 >>= 64 - ((i+1)*16);
4409                        sw_stat->ring_full_cnt[i] += temp64;
4410                }
4411
4412                val64 = readq(&bar0->ring_bump_counter2);
4413                for (i = 0; i < 4; i++) {
4414                        temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4415                        temp64 >>= 64 - ((i+1)*16);
4416                        sw_stat->ring_full_cnt[i+4] += temp64;
4417                }
4418        }
4419
4420        val64 = readq(&bar0->txdma_int_status);
4421        /*check for pfc_err*/
4422        if (val64 & TXDMA_PFC_INT) {
4423                if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4424                                          PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4425                                          PFC_PCIX_ERR,
4426                                          &bar0->pfc_err_reg,
4427                                          &sw_stat->pfc_err_cnt))
4428                        goto reset;
4429                do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4430                                      &bar0->pfc_err_reg,
4431                                      &sw_stat->pfc_err_cnt);
4432        }
4433
4434        /*check for tda_err*/
4435        if (val64 & TXDMA_TDA_INT) {
4436                if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4437                                          TDA_SM0_ERR_ALARM |
4438                                          TDA_SM1_ERR_ALARM,
4439                                          &bar0->tda_err_reg,
4440                                          &sw_stat->tda_err_cnt))
4441                        goto reset;
4442                do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4443                                      &bar0->tda_err_reg,
4444                                      &sw_stat->tda_err_cnt);
4445        }
4446        /*check for pcc_err*/
4447        if (val64 & TXDMA_PCC_INT) {
4448                if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4449                                          PCC_N_SERR | PCC_6_COF_OV_ERR |
4450                                          PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4451                                          PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4452                                          PCC_TXB_ECC_DB_ERR,
4453                                          &bar0->pcc_err_reg,
4454                                          &sw_stat->pcc_err_cnt))
4455                        goto reset;
4456                do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4457                                      &bar0->pcc_err_reg,
4458                                      &sw_stat->pcc_err_cnt);
4459        }
4460
4461        /*check for tti_err*/
4462        if (val64 & TXDMA_TTI_INT) {
4463                if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4464                                          &bar0->tti_err_reg,
4465                                          &sw_stat->tti_err_cnt))
4466                        goto reset;
4467                do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4468                                      &bar0->tti_err_reg,
4469                                      &sw_stat->tti_err_cnt);
4470        }
4471
4472        /*check for lso_err*/
4473        if (val64 & TXDMA_LSO_INT) {
4474                if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4475                                          LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4476                                          &bar0->lso_err_reg,
4477                                          &sw_stat->lso_err_cnt))
4478                        goto reset;
4479                do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4480                                      &bar0->lso_err_reg,
4481                                      &sw_stat->lso_err_cnt);
4482        }
4483
4484        /*check for tpa_err*/
4485        if (val64 & TXDMA_TPA_INT) {
4486                if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4487                                          &bar0->tpa_err_reg,
4488                                          &sw_stat->tpa_err_cnt))
4489                        goto reset;
4490                do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4491                                      &bar0->tpa_err_reg,
4492                                      &sw_stat->tpa_err_cnt);
4493        }
4494
4495        /*check for sm_err*/
4496        if (val64 & TXDMA_SM_INT) {
4497                if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4498                                          &bar0->sm_err_reg,
4499                                          &sw_stat->sm_err_cnt))
4500                        goto reset;
4501        }
4502
4503        val64 = readq(&bar0->mac_int_status);
4504        if (val64 & MAC_INT_STATUS_TMAC_INT) {
4505                if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4506                                          &bar0->mac_tmac_err_reg,
4507                                          &sw_stat->mac_tmac_err_cnt))
4508                        goto reset;
4509                do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4510                                      TMAC_DESC_ECC_SG_ERR |
4511                                      TMAC_DESC_ECC_DB_ERR,
4512                                      &bar0->mac_tmac_err_reg,
4513                                      &sw_stat->mac_tmac_err_cnt);
4514        }
4515
4516        val64 = readq(&bar0->xgxs_int_status);
4517        if (val64 & XGXS_INT_STATUS_TXGXS) {
4518                if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4519                                          &bar0->xgxs_txgxs_err_reg,
4520                                          &sw_stat->xgxs_txgxs_err_cnt))
4521                        goto reset;
4522                do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4523                                      &bar0->xgxs_txgxs_err_reg,
4524                                      &sw_stat->xgxs_txgxs_err_cnt);
4525        }
4526
4527        val64 = readq(&bar0->rxdma_int_status);
4528        if (val64 & RXDMA_INT_RC_INT_M) {
4529                if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4530                                          RC_FTC_ECC_DB_ERR |
4531                                          RC_PRCn_SM_ERR_ALARM |
4532                                          RC_FTC_SM_ERR_ALARM,
4533                                          &bar0->rc_err_reg,
4534                                          &sw_stat->rc_err_cnt))
4535                        goto reset;
4536                do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4537                                      RC_FTC_ECC_SG_ERR |
4538                                      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4539                                      &sw_stat->rc_err_cnt);
4540                if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4541                                          PRC_PCI_AB_WR_Rn |
4542                                          PRC_PCI_AB_F_WR_Rn,
4543                                          &bar0->prc_pcix_err_reg,
4544                                          &sw_stat->prc_pcix_err_cnt))
4545                        goto reset;
4546                do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4547                                      PRC_PCI_DP_WR_Rn |
4548                                      PRC_PCI_DP_F_WR_Rn,
4549                                      &bar0->prc_pcix_err_reg,
4550                                      &sw_stat->prc_pcix_err_cnt);
4551        }
4552
4553        if (val64 & RXDMA_INT_RPA_INT_M) {
4554                if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4555                                          &bar0->rpa_err_reg,
4556                                          &sw_stat->rpa_err_cnt))
4557                        goto reset;
4558                do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4559                                      &bar0->rpa_err_reg,
4560                                      &sw_stat->rpa_err_cnt);
4561        }
4562
4563        if (val64 & RXDMA_INT_RDA_INT_M) {
4564                if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4565                                          RDA_FRM_ECC_DB_N_AERR |
4566                                          RDA_SM1_ERR_ALARM |
4567                                          RDA_SM0_ERR_ALARM |
4568                                          RDA_RXD_ECC_DB_SERR,
4569                                          &bar0->rda_err_reg,
4570                                          &sw_stat->rda_err_cnt))
4571                        goto reset;
4572                do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4573                                      RDA_FRM_ECC_SG_ERR |
4574                                      RDA_MISC_ERR |
4575                                      RDA_PCIX_ERR,
4576                                      &bar0->rda_err_reg,
4577                                      &sw_stat->rda_err_cnt);
4578        }
4579
4580        if (val64 & RXDMA_INT_RTI_INT_M) {
4581                if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4582                                          &bar0->rti_err_reg,
4583                                          &sw_stat->rti_err_cnt))
4584                        goto reset;
4585                do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4586                                      &bar0->rti_err_reg,
4587                                      &sw_stat->rti_err_cnt);
4588        }
4589
4590        val64 = readq(&bar0->mac_int_status);
4591        if (val64 & MAC_INT_STATUS_RMAC_INT) {
4592                if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4593                                          &bar0->mac_rmac_err_reg,
4594                                          &sw_stat->mac_rmac_err_cnt))
4595                        goto reset;
4596                do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4597                                      RMAC_SINGLE_ECC_ERR |
4598                                      RMAC_DOUBLE_ECC_ERR,
4599                                      &bar0->mac_rmac_err_reg,
4600                                      &sw_stat->mac_rmac_err_cnt);
4601        }
4602
4603        val64 = readq(&bar0->xgxs_int_status);
4604        if (val64 & XGXS_INT_STATUS_RXGXS) {
4605                if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4606                                          &bar0->xgxs_rxgxs_err_reg,
4607                                          &sw_stat->xgxs_rxgxs_err_cnt))
4608                        goto reset;
4609        }
4610
4611        val64 = readq(&bar0->mc_int_status);
4612        if (val64 & MC_INT_STATUS_MC_INT) {
4613                if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4614                                          &bar0->mc_err_reg,
4615                                          &sw_stat->mc_err_cnt))
4616                        goto reset;
4617
4618                /* Handling Ecc errors */
4619                if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4620                        writeq(val64, &bar0->mc_err_reg);
4621                        if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4622                                sw_stat->double_ecc_errs++;
4623                                if (sp->device_type != XFRAME_II_DEVICE) {
4624                                        /*
4625                                         * Reset XframeI only if critical error
4626                                         */
4627                                        if (val64 &
4628                                            (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4629                                             MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4630                                                goto reset;
4631                                }
4632                        } else
4633                                sw_stat->single_ecc_errs++;
4634                }
4635        }
4636        return;
4637
4638reset:
4639        s2io_stop_all_tx_queue(sp);
4640        schedule_work(&sp->rst_timer_task);
4641        sw_stat->soft_reset_cnt++;
4642}
4643
4644/**
4645 *  s2io_isr - ISR handler of the device .
4646 *  @irq: the irq of the device.
4647 *  @dev_id: a void pointer to the dev structure of the NIC.
4648 *  Description:  This function is the ISR handler of the device. It
4649 *  identifies the reason for the interrupt and calls the relevant
4650 *  service routines. As a contongency measure, this ISR allocates the
4651 *  recv buffers, if their numbers are below the panic value which is
4652 *  presently set to 25% of the original number of rcv buffers allocated.
4653 *  Return value:
4654 *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4655 *   IRQ_NONE: will be returned if interrupt is not from our device
4656 */
4657static irqreturn_t s2io_isr(int irq, void *dev_id)
4658{
4659        struct net_device *dev = (struct net_device *)dev_id;
4660        struct s2io_nic *sp = netdev_priv(dev);
4661        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4662        int i;
4663        u64 reason = 0;
4664        struct mac_info *mac_control;
4665        struct config_param *config;
4666
4667        /* Pretend we handled any irq's from a disconnected card */
4668        if (pci_channel_offline(sp->pdev))
4669                return IRQ_NONE;
4670
4671        if (!is_s2io_card_up(sp))
4672                return IRQ_NONE;
4673
4674        config = &sp->config;
4675        mac_control = &sp->mac_control;
4676
4677        /*
4678         * Identify the cause for interrupt and call the appropriate
4679         * interrupt handler. Causes for the interrupt could be;
4680         * 1. Rx of packet.
4681         * 2. Tx complete.
4682         * 3. Link down.
4683         */
4684        reason = readq(&bar0->general_int_status);
4685
4686        if (unlikely(reason == S2IO_MINUS_ONE))
4687                return IRQ_HANDLED;     /* Nothing much can be done. Get out */
4688
4689        if (reason &
4690            (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4691                writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4692
4693                if (config->napi) {
4694                        if (reason & GEN_INTR_RXTRAFFIC) {
4695                                napi_schedule(&sp->napi);
4696                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4697                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4698                                readl(&bar0->rx_traffic_int);
4699                        }
4700                } else {
4701                        /*
4702                         * rx_traffic_int reg is an R1 register, writing all 1's
4703                         * will ensure that the actual interrupt causing bit
4704                         * get's cleared and hence a read can be avoided.
4705                         */
4706                        if (reason & GEN_INTR_RXTRAFFIC)
4707                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4708
4709                        for (i = 0; i < config->rx_ring_num; i++) {
4710                                struct ring_info *ring = &mac_control->rings[i];
4711
4712                                rx_intr_handler(ring, 0);
4713                        }
4714                }
4715
4716                /*
4717                 * tx_traffic_int reg is an R1 register, writing all 1's
4718                 * will ensure that the actual interrupt causing bit get's
4719                 * cleared and hence a read can be avoided.
4720                 */
4721                if (reason & GEN_INTR_TXTRAFFIC)
4722                        writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4723
4724                for (i = 0; i < config->tx_fifo_num; i++)
4725                        tx_intr_handler(&mac_control->fifos[i]);
4726
4727                if (reason & GEN_INTR_TXPIC)
4728                        s2io_txpic_intr_handle(sp);
4729
4730                /*
4731                 * Reallocate the buffers from the interrupt handler itself.
4732                 */
4733                if (!config->napi) {
4734                        for (i = 0; i < config->rx_ring_num; i++) {
4735                                struct ring_info *ring = &mac_control->rings[i];
4736
4737                                s2io_chk_rx_buffers(sp, ring);
4738                        }
4739                }
4740                writeq(sp->general_int_mask, &bar0->general_int_mask);
4741                readl(&bar0->general_int_status);
4742
4743                return IRQ_HANDLED;
4744
4745        } else if (!reason) {
4746                /* The interrupt was not raised by us */
4747                return IRQ_NONE;
4748        }
4749
4750        return IRQ_HANDLED;
4751}
4752
4753/**
4754 * s2io_updt_stats -
4755 */
4756static void s2io_updt_stats(struct s2io_nic *sp)
4757{
4758        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4759        u64 val64;
4760        int cnt = 0;
4761
4762        if (is_s2io_card_up(sp)) {
4763                /* Apprx 30us on a 133 MHz bus */
4764                val64 = SET_UPDT_CLICKS(10) |
4765                        STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4766                writeq(val64, &bar0->stat_cfg);
4767                do {
4768                        udelay(100);
4769                        val64 = readq(&bar0->stat_cfg);
4770                        if (!(val64 & s2BIT(0)))
4771                                break;
4772                        cnt++;
4773                        if (cnt == 5)
4774                                break; /* Updt failed */
4775                } while (1);
4776        }
4777}
4778
4779/**
4780 *  s2io_get_stats - Updates the device statistics structure.
4781 *  @dev : pointer to the device structure.
4782 *  Description:
4783 *  This function updates the device statistics structure in the s2io_nic
4784 *  structure and returns a pointer to the same.
4785 *  Return value:
4786 *  pointer to the updated net_device_stats structure.
4787 */
4788static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4789{
4790        struct s2io_nic *sp = netdev_priv(dev);
4791        struct mac_info *mac_control = &sp->mac_control;
4792        struct stat_block *stats = mac_control->stats_info;
4793        u64 delta;
4794
4795        /* Configure Stats for immediate updt */
4796        s2io_updt_stats(sp);
4797
4798        /* A device reset will cause the on-adapter statistics to be zero'ed.
4799         * This can be done while running by changing the MTU.  To prevent the
4800         * system from having the stats zero'ed, the driver keeps a copy of the
4801         * last update to the system (which is also zero'ed on reset).  This
4802         * enables the driver to accurately know the delta between the last
4803         * update and the current update.
4804         */
4805        delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4806                le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4807        sp->stats.rx_packets += delta;
4808        dev->stats.rx_packets += delta;
4809
4810        delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4811                le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4812        sp->stats.tx_packets += delta;
4813        dev->stats.tx_packets += delta;
4814
4815        delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4816                le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4817        sp->stats.rx_bytes += delta;
4818        dev->stats.rx_bytes += delta;
4819
4820        delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4821                le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4822        sp->stats.tx_bytes += delta;
4823        dev->stats.tx_bytes += delta;
4824
4825        delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4826        sp->stats.rx_errors += delta;
4827        dev->stats.rx_errors += delta;
4828
4829        delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4830                le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4831        sp->stats.tx_errors += delta;
4832        dev->stats.tx_errors += delta;
4833
4834        delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4835        sp->stats.rx_dropped += delta;
4836        dev->stats.rx_dropped += delta;
4837
4838        delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4839        sp->stats.tx_dropped += delta;
4840        dev->stats.tx_dropped += delta;
4841
4842        /* The adapter MAC interprets pause frames as multicast packets, but
4843         * does not pass them up.  This erroneously increases the multicast
4844         * packet count and needs to be deducted when the multicast frame count
4845         * is queried.
4846         */
4847        delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4848                le32_to_cpu(stats->rmac_vld_mcst_frms);
4849        delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4850        delta -= sp->stats.multicast;
4851        sp->stats.multicast += delta;
4852        dev->stats.multicast += delta;
4853
4854        delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4855                le32_to_cpu(stats->rmac_usized_frms)) +
4856                le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4857        sp->stats.rx_length_errors += delta;
4858        dev->stats.rx_length_errors += delta;
4859
4860        delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4861        sp->stats.rx_crc_errors += delta;
4862        dev->stats.rx_crc_errors += delta;
4863
4864        return &dev->stats;
4865}
4866
4867/**
4868 *  s2io_set_multicast - entry point for multicast address enable/disable.
4869 *  @dev : pointer to the device structure
4870 *  Description:
4871 *  This function is a driver entry point which gets called by the kernel
4872 *  whenever multicast addresses must be enabled/disabled. This also gets
4873 *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4874 *  determine, if multicast address must be enabled or if promiscuous mode
4875 *  is to be disabled etc.
4876 *  Return value:
4877 *  void.
4878 */
4879
4880static void s2io_set_multicast(struct net_device *dev)
4881{
4882        int i, j, prev_cnt;
4883        struct netdev_hw_addr *ha;
4884        struct s2io_nic *sp = netdev_priv(dev);
4885        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4886        u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4887                0xfeffffffffffULL;
4888        u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4889        void __iomem *add;
4890        struct config_param *config = &sp->config;
4891
4892        if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4893                /*  Enable all Multicast addresses */
4894                writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4895                       &bar0->rmac_addr_data0_mem);
4896                writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4897                       &bar0->rmac_addr_data1_mem);
4898                val64 = RMAC_ADDR_CMD_MEM_WE |
4899                        RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4900                        RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4901                writeq(val64, &bar0->rmac_addr_cmd_mem);
4902                /* Wait till command completes */
4903                wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4904                                      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4905                                      S2IO_BIT_RESET);
4906
4907                sp->m_cast_flg = 1;
4908                sp->all_multi_pos = config->max_mc_addr - 1;
4909        } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4910                /*  Disable all Multicast addresses */
4911                writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4912                       &bar0->rmac_addr_data0_mem);
4913                writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4914                       &bar0->rmac_addr_data1_mem);
4915                val64 = RMAC_ADDR_CMD_MEM_WE |
4916                        RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4917                        RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4918                writeq(val64, &bar0->rmac_addr_cmd_mem);
4919                /* Wait till command completes */
4920                wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4921                                      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4922                                      S2IO_BIT_RESET);
4923
4924                sp->m_cast_flg = 0;
4925                sp->all_multi_pos = 0;
4926        }
4927
4928        if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4929                /*  Put the NIC into promiscuous mode */
4930                add = &bar0->mac_cfg;
4931                val64 = readq(&bar0->mac_cfg);
4932                val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4933
4934                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4935                writel((u32)val64, add);
4936                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4937                writel((u32) (val64 >> 32), (add + 4));
4938
4939                if (vlan_tag_strip != 1) {
4940                        val64 = readq(&bar0->rx_pa_cfg);
4941                        val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4942                        writeq(val64, &bar0->rx_pa_cfg);
4943                        sp->vlan_strip_flag = 0;
4944                }
4945
4946                val64 = readq(&bar0->mac_cfg);
4947                sp->promisc_flg = 1;
4948                DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4949                          dev->name);
4950        } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4951                /*  Remove the NIC from promiscuous mode */
4952                add = &bar0->mac_cfg;
4953                val64 = readq(&bar0->mac_cfg);
4954                val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4955
4956                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4957                writel((u32)val64, add);
4958                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4959                writel((u32) (val64 >> 32), (add + 4));
4960
4961                if (vlan_tag_strip != 0) {
4962                        val64 = readq(&bar0->rx_pa_cfg);
4963                        val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4964                        writeq(val64, &bar0->rx_pa_cfg);
4965                        sp->vlan_strip_flag = 1;
4966                }
4967
4968                val64 = readq(&bar0->mac_cfg);
4969                sp->promisc_flg = 0;
4970                DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4971        }
4972
4973        /*  Update individual M_CAST address list */
4974        if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4975                if (netdev_mc_count(dev) >
4976                    (config->max_mc_addr - config->max_mac_addr)) {
4977                        DBG_PRINT(ERR_DBG,
4978                                  "%s: No more Rx filters can be added - "
4979                                  "please enable ALL_MULTI instead\n",
4980                                  dev->name);
4981                        return;
4982                }
4983
4984                prev_cnt = sp->mc_addr_count;
4985                sp->mc_addr_count = netdev_mc_count(dev);
4986
4987                /* Clear out the previous list of Mc in the H/W. */
4988                for (i = 0; i < prev_cnt; i++) {
4989                        writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4990                               &bar0->rmac_addr_data0_mem);
4991                        writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4992                               &bar0->rmac_addr_data1_mem);
4993                        val64 = RMAC_ADDR_CMD_MEM_WE |
4994                                RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4995                                RMAC_ADDR_CMD_MEM_OFFSET
4996                                (config->mc_start_offset + i);
4997                        writeq(val64, &bar0->rmac_addr_cmd_mem);
4998
4999                        /* Wait for command completes */
5000                        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5001                                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5002                                                  S2IO_BIT_RESET)) {
5003                                DBG_PRINT(ERR_DBG,
5004                                          "%s: Adding Multicasts failed\n",
5005                                          dev->name);
5006                                return;
5007                        }
5008                }
5009
5010                /* Create the new Rx filter list and update the same in H/W. */
5011                i = 0;
5012                netdev_for_each_mc_addr(ha, dev) {
5013                        mac_addr = 0;
5014                        for (j = 0; j < ETH_ALEN; j++) {
5015                                mac_addr |= ha->addr[j];
5016                                mac_addr <<= 8;
5017                        }
5018                        mac_addr >>= 8;
5019                        writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5020                               &bar0->rmac_addr_data0_mem);
5021                        writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5022                               &bar0->rmac_addr_data1_mem);
5023                        val64 = RMAC_ADDR_CMD_MEM_WE |
5024                                RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5025                                RMAC_ADDR_CMD_MEM_OFFSET
5026                                (i + config->mc_start_offset);
5027                        writeq(val64, &bar0->rmac_addr_cmd_mem);
5028
5029                        /* Wait for command completes */
5030                        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5031                                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5032                                                  S2IO_BIT_RESET)) {
5033                                DBG_PRINT(ERR_DBG,
5034                                          "%s: Adding Multicasts failed\n",
5035                                          dev->name);
5036                                return;
5037                        }
5038                        i++;
5039                }
5040        }
5041}
5042
5043/* read from CAM unicast & multicast addresses and store it in
5044 * def_mac_addr structure
5045 */
5046static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5047{
5048        int offset;
5049        u64 mac_addr = 0x0;
5050        struct config_param *config = &sp->config;
5051
5052        /* store unicast & multicast mac addresses */
5053        for (offset = 0; offset < config->max_mc_addr; offset++) {
5054                mac_addr = do_s2io_read_unicast_mc(sp, offset);
5055                /* if read fails disable the entry */
5056                if (mac_addr == FAILURE)
5057                        mac_addr = S2IO_DISABLE_MAC_ENTRY;
5058                do_s2io_copy_mac_addr(sp, offset, mac_addr);
5059        }
5060}
5061
5062/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5063static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5064{
5065        int offset;
5066        struct config_param *config = &sp->config;
5067        /* restore unicast mac address */
5068        for (offset = 0; offset < config->max_mac_addr; offset++)
5069                do_s2io_prog_unicast(sp->dev,
5070                                     sp->def_mac_addr[offset].mac_addr);
5071
5072        /* restore multicast mac address */
5073        for (offset = config->mc_start_offset;
5074             offset < config->max_mc_addr; offset++)
5075                do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5076}
5077
5078/* add a multicast MAC address to CAM */
5079static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5080{
5081        int i;
5082        u64 mac_addr = 0;
5083        struct config_param *config = &sp->config;
5084
5085        for (i = 0; i < ETH_ALEN; i++) {
5086                mac_addr <<= 8;
5087                mac_addr |= addr[i];
5088        }
5089        if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5090                return SUCCESS;
5091
5092        /* check if the multicast mac already preset in CAM */
5093        for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5094                u64 tmp64;
5095                tmp64 = do_s2io_read_unicast_mc(sp, i);
5096                if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5097                        break;
5098
5099                if (tmp64 == mac_addr)
5100                        return SUCCESS;
5101        }
5102        if (i == config->max_mc_addr) {
5103                DBG_PRINT(ERR_DBG,
5104                          "CAM full no space left for multicast MAC\n");
5105                return FAILURE;
5106        }
5107        /* Update the internal structure with this new mac address */
5108        do_s2io_copy_mac_addr(sp, i, mac_addr);
5109
5110        return do_s2io_add_mac(sp, mac_addr, i);
5111}
5112
5113/* add MAC address to CAM */
5114static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5115{
5116        u64 val64;
5117        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5118
5119        writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5120               &bar0->rmac_addr_data0_mem);
5121
5122        val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5123                RMAC_ADDR_CMD_MEM_OFFSET(off);
5124        writeq(val64, &bar0->rmac_addr_cmd_mem);
5125
5126        /* Wait till command completes */
5127        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5128                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5129                                  S2IO_BIT_RESET)) {
5130                DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5131                return FAILURE;
5132        }
5133        return SUCCESS;
5134}
5135/* deletes a specified unicast/multicast mac entry from CAM */
5136static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5137{
5138        int offset;
5139        u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5140        struct config_param *config = &sp->config;
5141
5142        for (offset = 1;
5143             offset < config->max_mc_addr; offset++) {
5144                tmp64 = do_s2io_read_unicast_mc(sp, offset);
5145                if (tmp64 == addr) {
5146                        /* disable the entry by writing  0xffffffffffffULL */
5147                        if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5148                                return FAILURE;
5149                        /* store the new mac list from CAM */
5150                        do_s2io_store_unicast_mc(sp);
5151                        return SUCCESS;
5152                }
5153        }
5154        DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5155                  (unsigned long long)addr);
5156        return FAILURE;
5157}
5158
5159/* read mac entries from CAM */
5160static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5161{
5162        u64 tmp64 = 0xffffffffffff0000ULL, val64;
5163        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5164
5165        /* read mac addr */
5166        val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5167                RMAC_ADDR_CMD_MEM_OFFSET(offset);
5168        writeq(val64, &bar0->rmac_addr_cmd_mem);
5169
5170        /* Wait till command completes */
5171        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5172                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5173                                  S2IO_BIT_RESET)) {
5174                DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5175                return FAILURE;
5176        }
5177        tmp64 = readq(&bar0->rmac_addr_data0_mem);
5178
5179        return tmp64 >> 16;
5180}
5181
5182/**
5183 * s2io_set_mac_addr - driver entry point
5184 */
5185
5186static int s2io_set_mac_addr(struct net_device *dev, void *p)
5187{
5188        struct sockaddr *addr = p;
5189
5190        if (!is_valid_ether_addr(addr->sa_data))
5191                return -EADDRNOTAVAIL;
5192
5193        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5194
5195        /* store the MAC address in CAM */
5196        return do_s2io_prog_unicast(dev, dev->dev_addr);
5197}
5198/**
5199 *  do_s2io_prog_unicast - Programs the Xframe mac address
5200 *  @dev : pointer to the device structure.
5201 *  @addr: a uchar pointer to the new mac address which is to be set.
5202 *  Description : This procedure will program the Xframe to receive
5203 *  frames with new Mac Address
5204 *  Return value: SUCCESS on success and an appropriate (-)ve integer
5205 *  as defined in errno.h file on failure.
5206 */
5207
5208static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5209{
5210        struct s2io_nic *sp = netdev_priv(dev);
5211        register u64 mac_addr = 0, perm_addr = 0;
5212        int i;
5213        u64 tmp64;
5214        struct config_param *config = &sp->config;
5215
5216        /*
5217         * Set the new MAC address as the new unicast filter and reflect this
5218         * change on the device address registered with the OS. It will be
5219         * at offset 0.
5220         */
5221        for (i = 0; i < ETH_ALEN; i++) {
5222                mac_addr <<= 8;
5223                mac_addr |= addr[i];
5224                perm_addr <<= 8;
5225                perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5226        }
5227
5228        /* check if the dev_addr is different than perm_addr */
5229        if (mac_addr == perm_addr)
5230                return SUCCESS;
5231
5232        /* check if the mac already preset in CAM */
5233        for (i = 1; i < config->max_mac_addr; i++) {
5234                tmp64 = do_s2io_read_unicast_mc(sp, i);
5235                if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5236                        break;
5237
5238                if (tmp64 == mac_addr) {
5239                        DBG_PRINT(INFO_DBG,
5240                                  "MAC addr:0x%llx already present in CAM\n",
5241                                  (unsigned long long)mac_addr);
5242                        return SUCCESS;
5243                }
5244        }
5245        if (i == config->max_mac_addr) {
5246                DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5247                return FAILURE;
5248        }
5249        /* Update the internal structure with this new mac address */
5250        do_s2io_copy_mac_addr(sp, i, mac_addr);
5251
5252        return do_s2io_add_mac(sp, mac_addr, i);
5253}
5254
5255/**
5256 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5257 * @sp : private member of the device structure, which is a pointer to the
5258 * s2io_nic structure.
5259 * @cmd: pointer to the structure with parameters given by ethtool to set
5260 * link information.
5261 * Description:
5262 * The function sets different link parameters provided by the user onto
5263 * the NIC.
5264 * Return value:
5265 * 0 on success.
5266 */
5267
5268static int
5269s2io_ethtool_set_link_ksettings(struct net_device *dev,
5270                                const struct ethtool_link_ksettings *cmd)
5271{
5272        struct s2io_nic *sp = netdev_priv(dev);
5273        if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5274            (cmd->base.speed != SPEED_10000) ||
5275            (cmd->base.duplex != DUPLEX_FULL))
5276                return -EINVAL;
5277        else {
5278                s2io_close(sp->dev);
5279                s2io_open(sp->dev);
5280        }
5281
5282        return 0;
5283}
5284
5285/**
5286 * s2io_ethtol_get_link_ksettings - Return link specific information.
5287 * @sp : private member of the device structure, pointer to the
5288 *      s2io_nic structure.
5289 * @cmd : pointer to the structure with parameters given by ethtool
5290 * to return link information.
5291 * Description:
5292 * Returns link specific information like speed, duplex etc.. to ethtool.
5293 * Return value :
5294 * return 0 on success.
5295 */
5296
5297static int
5298s2io_ethtool_get_link_ksettings(struct net_device *dev,
5299                                struct ethtool_link_ksettings *cmd)
5300{
5301        struct s2io_nic *sp = netdev_priv(dev);
5302
5303        ethtool_link_ksettings_zero_link_mode(cmd, supported);
5304        ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5305        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5306
5307        ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5308        ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5309        ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5310
5311        cmd->base.port = PORT_FIBRE;
5312
5313        if (netif_carrier_ok(sp->dev)) {
5314                cmd->base.speed = SPEED_10000;
5315                cmd->base.duplex = DUPLEX_FULL;
5316        } else {
5317                cmd->base.speed = SPEED_UNKNOWN;
5318                cmd->base.duplex = DUPLEX_UNKNOWN;
5319        }
5320
5321        cmd->base.autoneg = AUTONEG_DISABLE;
5322        return 0;
5323}
5324
5325/**
5326 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5327 * @sp : private member of the device structure, which is a pointer to the
5328 * s2io_nic structure.
5329 * @info : pointer to the structure with parameters given by ethtool to
5330 * return driver information.
5331 * Description:
5332 * Returns driver specefic information like name, version etc.. to ethtool.
5333 * Return value:
5334 *  void
5335 */
5336
5337static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5338                                  struct ethtool_drvinfo *info)
5339{
5340        struct s2io_nic *sp = netdev_priv(dev);
5341
5342        strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5343        strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5344        strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5345}
5346
5347/**
5348 *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5349 *  @sp: private member of the device structure, which is a pointer to the
5350 *  s2io_nic structure.
5351 *  @regs : pointer to the structure with parameters given by ethtool for
5352 *  dumping the registers.
5353 *  @reg_space: The input argument into which all the registers are dumped.
5354 *  Description:
5355 *  Dumps the entire register space of xFrame NIC into the user given
5356 *  buffer area.
5357 * Return value :
5358 * void .
5359 */
5360
5361static void s2io_ethtool_gregs(struct net_device *dev,
5362                               struct ethtool_regs *regs, void *space)
5363{
5364        int i;
5365        u64 reg;
5366        u8 *reg_space = (u8 *)space;
5367        struct s2io_nic *sp = netdev_priv(dev);
5368
5369        regs->len = XENA_REG_SPACE;
5370        regs->version = sp->pdev->subsystem_device;
5371
5372        for (i = 0; i < regs->len; i += 8) {
5373                reg = readq(sp->bar0 + i);
5374                memcpy((reg_space + i), &reg, 8);
5375        }
5376}
5377
5378/*
5379 *  s2io_set_led - control NIC led
5380 */
5381static void s2io_set_led(struct s2io_nic *sp, bool on)
5382{
5383        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5384        u16 subid = sp->pdev->subsystem_device;
5385        u64 val64;
5386
5387        if ((sp->device_type == XFRAME_II_DEVICE) ||
5388            ((subid & 0xFF) >= 0x07)) {
5389                val64 = readq(&bar0->gpio_control);
5390                if (on)
5391                        val64 |= GPIO_CTRL_GPIO_0;
5392                else
5393                        val64 &= ~GPIO_CTRL_GPIO_0;
5394
5395                writeq(val64, &bar0->gpio_control);
5396        } else {
5397                val64 = readq(&bar0->adapter_control);
5398                if (on)
5399                        val64 |= ADAPTER_LED_ON;
5400                else
5401                        val64 &= ~ADAPTER_LED_ON;
5402
5403                writeq(val64, &bar0->adapter_control);
5404        }
5405
5406}
5407
5408/**
5409 * s2io_ethtool_set_led - To physically identify the nic on the system.
5410 * @dev : network device
5411 * @state: led setting
5412 *
5413 * Description: Used to physically identify the NIC on the system.
5414 * The Link LED will blink for a time specified by the user for
5415 * identification.
5416 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5417 * identification is possible only if it's link is up.
5418 */
5419
5420static int s2io_ethtool_set_led(struct net_device *dev,
5421                                enum ethtool_phys_id_state state)
5422{
5423        struct s2io_nic *sp = netdev_priv(dev);
5424        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5425        u16 subid = sp->pdev->subsystem_device;
5426
5427        if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5428                u64 val64 = readq(&bar0->adapter_control);
5429                if (!(val64 & ADAPTER_CNTL_EN)) {
5430                        pr_err("Adapter Link down, cannot blink LED\n");
5431                        return -EAGAIN;
5432                }
5433        }
5434
5435        switch (state) {
5436        case ETHTOOL_ID_ACTIVE:
5437                sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5438                return 1;       /* cycle on/off once per second */
5439
5440        case ETHTOOL_ID_ON:
5441                s2io_set_led(sp, true);
5442                break;
5443
5444        case ETHTOOL_ID_OFF:
5445                s2io_set_led(sp, false);
5446                break;
5447
5448        case ETHTOOL_ID_INACTIVE:
5449                if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5450                        writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5451        }
5452
5453        return 0;
5454}
5455
5456static void s2io_ethtool_gringparam(struct net_device *dev,
5457                                    struct ethtool_ringparam *ering)
5458{
5459        struct s2io_nic *sp = netdev_priv(dev);
5460        int i, tx_desc_count = 0, rx_desc_count = 0;
5461
5462        if (sp->rxd_mode == RXD_MODE_1) {
5463                ering->rx_max_pending = MAX_RX_DESC_1;
5464                ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5465        } else {
5466                ering->rx_max_pending = MAX_RX_DESC_2;
5467                ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5468        }
5469
5470        ering->tx_max_pending = MAX_TX_DESC;
5471
5472        for (i = 0; i < sp->config.rx_ring_num; i++)
5473                rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5474        ering->rx_pending = rx_desc_count;
5475        ering->rx_jumbo_pending = rx_desc_count;
5476
5477        for (i = 0; i < sp->config.tx_fifo_num; i++)
5478                tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5479        ering->tx_pending = tx_desc_count;
5480        DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5481}
5482
5483/**
5484 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5485 * @sp : private member of the device structure, which is a pointer to the
5486 *      s2io_nic structure.
5487 * @ep : pointer to the structure with pause parameters given by ethtool.
5488 * Description:
5489 * Returns the Pause frame generation and reception capability of the NIC.
5490 * Return value:
5491 *  void
5492 */
5493static void s2io_ethtool_getpause_data(struct net_device *dev,
5494                                       struct ethtool_pauseparam *ep)
5495{
5496        u64 val64;
5497        struct s2io_nic *sp = netdev_priv(dev);
5498        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5499
5500        val64 = readq(&bar0->rmac_pause_cfg);
5501        if (val64 & RMAC_PAUSE_GEN_ENABLE)
5502                ep->tx_pause = true;
5503        if (val64 & RMAC_PAUSE_RX_ENABLE)
5504                ep->rx_pause = true;
5505        ep->autoneg = false;
5506}
5507
5508/**
5509 * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5510 * @sp : private member of the device structure, which is a pointer to the
5511 *      s2io_nic structure.
5512 * @ep : pointer to the structure with pause parameters given by ethtool.
5513 * Description:
5514 * It can be used to set or reset Pause frame generation or reception
5515 * support of the NIC.
5516 * Return value:
5517 * int, returns 0 on Success
5518 */
5519
5520static int s2io_ethtool_setpause_data(struct net_device *dev,
5521                                      struct ethtool_pauseparam *ep)
5522{
5523        u64 val64;
5524        struct s2io_nic *sp = netdev_priv(dev);
5525        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5526
5527        val64 = readq(&bar0->rmac_pause_cfg);
5528        if (ep->tx_pause)
5529                val64 |= RMAC_PAUSE_GEN_ENABLE;
5530        else
5531                val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5532        if (ep->rx_pause)
5533                val64 |= RMAC_PAUSE_RX_ENABLE;
5534        else
5535                val64 &= ~RMAC_PAUSE_RX_ENABLE;
5536        writeq(val64, &bar0->rmac_pause_cfg);
5537        return 0;
5538}
5539
5540/**
5541 * read_eeprom - reads 4 bytes of data from user given offset.
5542 * @sp : private member of the device structure, which is a pointer to the
5543 *      s2io_nic structure.
5544 * @off : offset at which the data must be written
5545 * @data : Its an output parameter where the data read at the given
5546 *      offset is stored.
5547 * Description:
5548 * Will read 4 bytes of data from the user given offset and return the
5549 * read data.
5550 * NOTE: Will allow to read only part of the EEPROM visible through the
5551 *   I2C bus.
5552 * Return value:
5553 *  -1 on failure and 0 on success.
5554 */
5555
5556#define S2IO_DEV_ID             5
5557static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5558{
5559        int ret = -1;
5560        u32 exit_cnt = 0;
5561        u64 val64;
5562        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5563
5564        if (sp->device_type == XFRAME_I_DEVICE) {
5565                val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5566                        I2C_CONTROL_ADDR(off) |
5567                        I2C_CONTROL_BYTE_CNT(0x3) |
5568                        I2C_CONTROL_READ |
5569                        I2C_CONTROL_CNTL_START;
5570                SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5571
5572                while (exit_cnt < 5) {
5573                        val64 = readq(&bar0->i2c_control);
5574                        if (I2C_CONTROL_CNTL_END(val64)) {
5575                                *data = I2C_CONTROL_GET_DATA(val64);
5576                                ret = 0;
5577                                break;
5578                        }
5579                        msleep(50);
5580                        exit_cnt++;
5581                }
5582        }
5583
5584        if (sp->device_type == XFRAME_II_DEVICE) {
5585                val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5586                        SPI_CONTROL_BYTECNT(0x3) |
5587                        SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5588                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5589                val64 |= SPI_CONTROL_REQ;
5590                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5591                while (exit_cnt < 5) {
5592                        val64 = readq(&bar0->spi_control);
5593                        if (val64 & SPI_CONTROL_NACK) {
5594                                ret = 1;
5595                                break;
5596                        } else if (val64 & SPI_CONTROL_DONE) {
5597                                *data = readq(&bar0->spi_data);
5598                                *data &= 0xffffff;
5599                                ret = 0;
5600                                break;
5601                        }
5602                        msleep(50);
5603                        exit_cnt++;
5604                }
5605        }
5606        return ret;
5607}
5608
5609/**
5610 *  write_eeprom - actually writes the relevant part of the data value.
5611 *  @sp : private member of the device structure, which is a pointer to the
5612 *       s2io_nic structure.
5613 *  @off : offset at which the data must be written
5614 *  @data : The data that is to be written
5615 *  @cnt : Number of bytes of the data that are actually to be written into
5616 *  the Eeprom. (max of 3)
5617 * Description:
5618 *  Actually writes the relevant part of the data value into the Eeprom
5619 *  through the I2C bus.
5620 * Return value:
5621 *  0 on success, -1 on failure.
5622 */
5623
5624static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5625{
5626        int exit_cnt = 0, ret = -1;
5627        u64 val64;
5628        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5629
5630        if (sp->device_type == XFRAME_I_DEVICE) {
5631                val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5632                        I2C_CONTROL_ADDR(off) |
5633                        I2C_CONTROL_BYTE_CNT(cnt) |
5634                        I2C_CONTROL_SET_DATA((u32)data) |
5635                        I2C_CONTROL_CNTL_START;
5636                SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5637
5638                while (exit_cnt < 5) {
5639                        val64 = readq(&bar0->i2c_control);
5640                        if (I2C_CONTROL_CNTL_END(val64)) {
5641                                if (!(val64 & I2C_CONTROL_NACK))
5642                                        ret = 0;
5643                                break;
5644                        }
5645                        msleep(50);
5646                        exit_cnt++;
5647                }
5648        }
5649
5650        if (sp->device_type == XFRAME_II_DEVICE) {
5651                int write_cnt = (cnt == 8) ? 0 : cnt;
5652                writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5653
5654                val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5655                        SPI_CONTROL_BYTECNT(write_cnt) |
5656                        SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5657                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5658                val64 |= SPI_CONTROL_REQ;
5659                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5660                while (exit_cnt < 5) {
5661                        val64 = readq(&bar0->spi_control);
5662                        if (val64 & SPI_CONTROL_NACK) {
5663                                ret = 1;
5664                                break;
5665                        } else if (val64 & SPI_CONTROL_DONE) {
5666                                ret = 0;
5667                                break;
5668                        }
5669                        msleep(50);
5670                        exit_cnt++;
5671                }
5672        }
5673        return ret;
5674}
5675static void s2io_vpd_read(struct s2io_nic *nic)
5676{
5677        u8 *vpd_data;
5678        u8 data;
5679        int i = 0, cnt, len, fail = 0;
5680        int vpd_addr = 0x80;
5681        struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5682
5683        if (nic->device_type == XFRAME_II_DEVICE) {
5684                strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5685                vpd_addr = 0x80;
5686        } else {
5687                strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5688                vpd_addr = 0x50;
5689        }
5690        strcpy(nic->serial_num, "NOT AVAILABLE");
5691
5692        vpd_data = kmalloc(256, GFP_KERNEL);
5693        if (!vpd_data) {
5694                swstats->mem_alloc_fail_cnt++;
5695                return;
5696        }
5697        swstats->mem_allocated += 256;
5698
5699        for (i = 0; i < 256; i += 4) {
5700                pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5701                pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5702                pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5703                for (cnt = 0; cnt < 5; cnt++) {
5704                        msleep(2);
5705                        pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5706                        if (data == 0x80)
5707                                break;
5708                }
5709                if (cnt >= 5) {
5710                        DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5711                        fail = 1;
5712                        break;
5713                }
5714                pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5715                                      (u32 *)&vpd_data[i]);
5716        }
5717
5718        if (!fail) {
5719                /* read serial number of adapter */
5720                for (cnt = 0; cnt < 252; cnt++) {
5721                        if ((vpd_data[cnt] == 'S') &&
5722                            (vpd_data[cnt+1] == 'N')) {
5723                                len = vpd_data[cnt+2];
5724                                if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5725                                        memcpy(nic->serial_num,
5726                                               &vpd_data[cnt + 3],
5727                                               len);
5728                                        memset(nic->serial_num+len,
5729                                               0,
5730                                               VPD_STRING_LEN-len);
5731                                        break;
5732                                }
5733                        }
5734                }
5735        }
5736
5737        if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5738                len = vpd_data[1];
5739                memcpy(nic->product_name, &vpd_data[3], len);
5740                nic->product_name[len] = 0;
5741        }
5742        kfree(vpd_data);
5743        swstats->mem_freed += 256;
5744}
5745
5746/**
5747 *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5748 *  @sp : private member of the device structure, which is a pointer to the
5749 *  s2io_nic structure.
5750 *  @eeprom : pointer to the user level structure provided by ethtool,
5751 *  containing all relevant information.
5752 *  @data_buf : user defined value to be written into Eeprom.
5753 *  Description: Reads the values stored in the Eeprom at given offset
5754 *  for a given length. Stores these values int the input argument data
5755 *  buffer 'data_buf' and returns these to the caller (ethtool.)
5756 *  Return value:
5757 *  int  0 on success
5758 */
5759
5760static int s2io_ethtool_geeprom(struct net_device *dev,
5761                                struct ethtool_eeprom *eeprom, u8 * data_buf)
5762{
5763        u32 i, valid;
5764        u64 data;
5765        struct s2io_nic *sp = netdev_priv(dev);
5766
5767        eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5768
5769        if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5770                eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5771
5772        for (i = 0; i < eeprom->len; i += 4) {
5773                if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5774                        DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5775                        return -EFAULT;
5776                }
5777                valid = INV(data);
5778                memcpy((data_buf + i), &valid, 4);
5779        }
5780        return 0;
5781}
5782
5783/**
5784 *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5785 *  @sp : private member of the device structure, which is a pointer to the
5786 *  s2io_nic structure.
5787 *  @eeprom : pointer to the user level structure provided by ethtool,
5788 *  containing all relevant information.
5789 *  @data_buf ; user defined value to be written into Eeprom.
5790 *  Description:
5791 *  Tries to write the user provided value in the Eeprom, at the offset
5792 *  given by the user.
5793 *  Return value:
5794 *  0 on success, -EFAULT on failure.
5795 */
5796
5797static int s2io_ethtool_seeprom(struct net_device *dev,
5798                                struct ethtool_eeprom *eeprom,
5799                                u8 *data_buf)
5800{
5801        int len = eeprom->len, cnt = 0;
5802        u64 valid = 0, data;
5803        struct s2io_nic *sp = netdev_priv(dev);
5804
5805        if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5806                DBG_PRINT(ERR_DBG,
5807                          "ETHTOOL_WRITE_EEPROM Err: "
5808                          "Magic value is wrong, it is 0x%x should be 0x%x\n",
5809                          (sp->pdev->vendor | (sp->pdev->device << 16)),
5810                          eeprom->magic);
5811                return -EFAULT;
5812        }
5813
5814        while (len) {
5815                data = (u32)data_buf[cnt] & 0x000000FF;
5816                if (data)
5817                        valid = (u32)(data << 24);
5818                else
5819                        valid = data;
5820
5821                if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5822                        DBG_PRINT(ERR_DBG,
5823                                  "ETHTOOL_WRITE_EEPROM Err: "
5824                                  "Cannot write into the specified offset\n");
5825                        return -EFAULT;
5826                }
5827                cnt++;
5828                len--;
5829        }
5830
5831        return 0;
5832}
5833
5834/**
5835 * s2io_register_test - reads and writes into all clock domains.
5836 * @sp : private member of the device structure, which is a pointer to the
5837 * s2io_nic structure.
5838 * @data : variable that returns the result of each of the test conducted b
5839 * by the driver.
5840 * Description:
5841 * Read and write into all clock domains. The NIC has 3 clock domains,
5842 * see that registers in all the three regions are accessible.
5843 * Return value:
5844 * 0 on success.
5845 */
5846
5847static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5848{
5849        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5850        u64 val64 = 0, exp_val;
5851        int fail = 0;
5852
5853        val64 = readq(&bar0->pif_rd_swapper_fb);
5854        if (val64 != 0x123456789abcdefULL) {
5855                fail = 1;
5856                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5857        }
5858
5859        val64 = readq(&bar0->rmac_pause_cfg);
5860        if (val64 != 0xc000ffff00000000ULL) {
5861                fail = 1;
5862                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5863        }
5864
5865        val64 = readq(&bar0->rx_queue_cfg);
5866        if (sp->device_type == XFRAME_II_DEVICE)
5867                exp_val = 0x0404040404040404ULL;
5868        else
5869                exp_val = 0x0808080808080808ULL;
5870        if (val64 != exp_val) {
5871                fail = 1;
5872                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5873        }
5874
5875        val64 = readq(&bar0->xgxs_efifo_cfg);
5876        if (val64 != 0x000000001923141EULL) {
5877                fail = 1;
5878                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5879        }
5880
5881        val64 = 0x5A5A5A5A5A5A5A5AULL;
5882        writeq(val64, &bar0->xmsi_data);
5883        val64 = readq(&bar0->xmsi_data);
5884        if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5885                fail = 1;
5886                DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5887        }
5888
5889        val64 = 0xA5A5A5A5A5A5A5A5ULL;
5890        writeq(val64, &bar0->xmsi_data);
5891        val64 = readq(&bar0->xmsi_data);
5892        if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5893                fail = 1;
5894                DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5895        }
5896
5897        *data = fail;
5898        return fail;
5899}
5900
5901/**
5902 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5903 * @sp : private member of the device structure, which is a pointer to the
5904 * s2io_nic structure.
5905 * @data:variable that returns the result of each of the test conducted by
5906 * the driver.
5907 * Description:
5908 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5909 * register.
5910 * Return value:
5911 * 0 on success.
5912 */
5913
5914static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5915{
5916        int fail = 0;
5917        u64 ret_data, org_4F0, org_7F0;
5918        u8 saved_4F0 = 0, saved_7F0 = 0;
5919        struct net_device *dev = sp->dev;
5920
5921        /* Test Write Error at offset 0 */
5922        /* Note that SPI interface allows write access to all areas
5923         * of EEPROM. Hence doing all negative testing only for Xframe I.
5924         */
5925        if (sp->device_type == XFRAME_I_DEVICE)
5926                if (!write_eeprom(sp, 0, 0, 3))
5927                        fail = 1;
5928
5929        /* Save current values at offsets 0x4F0 and 0x7F0 */
5930        if (!read_eeprom(sp, 0x4F0, &org_4F0))
5931                saved_4F0 = 1;
5932        if (!read_eeprom(sp, 0x7F0, &org_7F0))
5933                saved_7F0 = 1;
5934
5935        /* Test Write at offset 4f0 */
5936        if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5937                fail = 1;
5938        if (read_eeprom(sp, 0x4F0, &ret_data))
5939                fail = 1;
5940
5941        if (ret_data != 0x012345) {
5942                DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5943                          "Data written %llx Data read %llx\n",
5944                          dev->name, (unsigned long long)0x12345,
5945                          (unsigned long long)ret_data);
5946                fail = 1;
5947        }
5948
5949        /* Reset the EEPROM data go FFFF */
5950        write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5951
5952        /* Test Write Request Error at offset 0x7c */
5953        if (sp->device_type == XFRAME_I_DEVICE)
5954                if (!write_eeprom(sp, 0x07C, 0, 3))
5955                        fail = 1;
5956
5957        /* Test Write Request at offset 0x7f0 */
5958        if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5959                fail = 1;
5960        if (read_eeprom(sp, 0x7F0, &ret_data))
5961                fail = 1;
5962
5963        if (ret_data != 0x012345) {
5964                DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5965                          "Data written %llx Data read %llx\n",
5966                          dev->name, (unsigned long long)0x12345,
5967                          (unsigned long long)ret_data);
5968                fail = 1;
5969        }
5970
5971        /* Reset the EEPROM data go FFFF */
5972        write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5973
5974        if (sp->device_type == XFRAME_I_DEVICE) {
5975                /* Test Write Error at offset 0x80 */
5976                if (!write_eeprom(sp, 0x080, 0, 3))
5977                        fail = 1;
5978
5979                /* Test Write Error at offset 0xfc */
5980                if (!write_eeprom(sp, 0x0FC, 0, 3))
5981                        fail = 1;
5982
5983                /* Test Write Error at offset 0x100 */
5984                if (!write_eeprom(sp, 0x100, 0, 3))
5985                        fail = 1;
5986
5987                /* Test Write Error at offset 4ec */
5988                if (!write_eeprom(sp, 0x4EC, 0, 3))
5989                        fail = 1;
5990        }
5991
5992        /* Restore values at offsets 0x4F0 and 0x7F0 */
5993        if (saved_4F0)
5994                write_eeprom(sp, 0x4F0, org_4F0, 3);
5995        if (saved_7F0)
5996                write_eeprom(sp, 0x7F0, org_7F0, 3);
5997
5998        *data = fail;
5999        return fail;
6000}
6001
6002/**
6003 * s2io_bist_test - invokes the MemBist test of the card .
6004 * @sp : private member of the device structure, which is a pointer to the
6005 * s2io_nic structure.
6006 * @data:variable that returns the result of each of the test conducted by
6007 * the driver.
6008 * Description:
6009 * This invokes the MemBist test of the card. We give around
6010 * 2 secs time for the Test to complete. If it's still not complete
6011 * within this peiod, we consider that the test failed.
6012 * Return value:
6013 * 0 on success and -1 on failure.
6014 */
6015
6016static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6017{
6018        u8 bist = 0;
6019        int cnt = 0, ret = -1;
6020
6021        pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6022        bist |= PCI_BIST_START;
6023        pci_write_config_word(sp->pdev, PCI_BIST, bist);
6024
6025        while (cnt < 20) {
6026                pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6027                if (!(bist & PCI_BIST_START)) {
6028                        *data = (bist & PCI_BIST_CODE_MASK);
6029                        ret = 0;
6030                        break;
6031                }
6032                msleep(100);
6033                cnt++;
6034        }
6035
6036        return ret;
6037}
6038
6039/**
6040 * s2io_link_test - verifies the link state of the nic
6041 * @sp ; private member of the device structure, which is a pointer to the
6042 * s2io_nic structure.
6043 * @data: variable that returns the result of each of the test conducted by
6044 * the driver.
6045 * Description:
6046 * The function verifies the link state of the NIC and updates the input
6047 * argument 'data' appropriately.
6048 * Return value:
6049 * 0 on success.
6050 */
6051
6052static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6053{
6054        struct XENA_dev_config __iomem *bar0 = sp->bar0;
6055        u64 val64;
6056
6057        val64 = readq(&bar0->adapter_status);
6058        if (!(LINK_IS_UP(val64)))
6059                *data = 1;
6060        else
6061                *data = 0;
6062
6063        return *data;
6064}
6065
6066/**
6067 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6068 * @sp: private member of the device structure, which is a pointer to the
6069 * s2io_nic structure.
6070 * @data: variable that returns the result of each of the test
6071 * conducted by the driver.
6072 * Description:
6073 *  This is one of the offline test that tests the read and write
6074 *  access to the RldRam chip on the NIC.
6075 * Return value:
6076 *  0 on success.
6077 */
6078
6079static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6080{
6081        struct XENA_dev_config __iomem *bar0 = sp->bar0;
6082        u64 val64;
6083        int cnt, iteration = 0, test_fail = 0;
6084
6085        val64 = readq(&bar0->adapter_control);
6086        val64 &= ~ADAPTER_ECC_EN;
6087        writeq(val64, &bar0->adapter_control);
6088
6089        val64 = readq(&bar0->mc_rldram_test_ctrl);
6090        val64 |= MC_RLDRAM_TEST_MODE;
6091        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6092
6093        val64 = readq(&bar0->mc_rldram_mrs);
6094        val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6095        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6096
6097        val64 |= MC_RLDRAM_MRS_ENABLE;
6098        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6099
6100        while (iteration < 2) {
6101                val64 = 0x55555555aaaa0000ULL;
6102                if (iteration == 1)
6103                        val64 ^= 0xFFFFFFFFFFFF0000ULL;
6104                writeq(val64, &bar0->mc_rldram_test_d0);
6105
6106                val64 = 0xaaaa5a5555550000ULL;
6107                if (iteration == 1)
6108                        val64 ^= 0xFFFFFFFFFFFF0000ULL;
6109                writeq(val64, &bar0->mc_rldram_test_d1);
6110
6111                val64 = 0x55aaaaaaaa5a0000ULL;
6112                if (iteration == 1)
6113                        val64 ^= 0xFFFFFFFFFFFF0000ULL;
6114                writeq(val64, &bar0->mc_rldram_test_d2);
6115
6116                val64 = (u64) (0x0000003ffffe0100ULL);
6117                writeq(val64, &bar0->mc_rldram_test_add);
6118
6119                val64 = MC_RLDRAM_TEST_MODE |
6120                        MC_RLDRAM_TEST_WRITE |
6121                        MC_RLDRAM_TEST_GO;
6122                SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6123
6124                for (cnt = 0; cnt < 5; cnt++) {
6125                        val64 = readq(&bar0->mc_rldram_test_ctrl);
6126                        if (val64 & MC_RLDRAM_TEST_DONE)
6127                                break;
6128                        msleep(200);
6129                }
6130
6131                if (cnt == 5)
6132                        break;
6133
6134                val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6135                SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6136
6137                for (cnt = 0; cnt < 5; cnt++) {
6138                        val64 = readq(&bar0->mc_rldram_test_ctrl);
6139                        if (val64 & MC_RLDRAM_TEST_DONE)
6140                                break;
6141                        msleep(500);
6142                }
6143
6144                if (cnt == 5)
6145                        break;
6146
6147                val64 = readq(&bar0->mc_rldram_test_ctrl);
6148                if (!(val64 & MC_RLDRAM_TEST_PASS))
6149                        test_fail = 1;
6150
6151                iteration++;
6152        }
6153
6154        *data = test_fail;
6155
6156        /* Bring the adapter out of test mode */
6157        SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6158
6159        return test_fail;
6160}
6161
6162/**
6163 *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6164 *  @sp : private member of the device structure, which is a pointer to the
6165 *  s2io_nic structure.
6166 *  @ethtest : pointer to a ethtool command specific structure that will be
6167 *  returned to the user.
6168 *  @data : variable that returns the result of each of the test
6169 * conducted by the driver.
6170 * Description:
6171 *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6172 *  the health of the card.
6173 * Return value:
6174 *  void
6175 */
6176
6177static void s2io_ethtool_test(struct net_device *dev,
6178                              struct ethtool_test *ethtest,
6179                              uint64_t *data)
6180{
6181        struct s2io_nic *sp = netdev_priv(dev);
6182        int orig_state = netif_running(sp->dev);
6183
6184        if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6185                /* Offline Tests. */
6186                if (orig_state)
6187                        s2io_close(sp->dev);
6188
6189                if (s2io_register_test(sp, &data[0]))
6190                        ethtest->flags |= ETH_TEST_FL_FAILED;
6191
6192                s2io_reset(sp);
6193
6194                if (s2io_rldram_test(sp, &data[3]))
6195                        ethtest->flags |= ETH_TEST_FL_FAILED;
6196
6197                s2io_reset(sp);
6198
6199                if (s2io_eeprom_test(sp, &data[1]))
6200                        ethtest->flags |= ETH_TEST_FL_FAILED;
6201
6202                if (s2io_bist_test(sp, &data[4]))
6203                        ethtest->flags |= ETH_TEST_FL_FAILED;
6204
6205                if (orig_state)
6206                        s2io_open(sp->dev);
6207
6208                data[2] = 0;
6209        } else {
6210                /* Online Tests. */
6211                if (!orig_state) {
6212                        DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6213                                  dev->name);
6214                        data[0] = -1;
6215                        data[1] = -1;
6216                        data[2] = -1;
6217                        data[3] = -1;
6218                        data[4] = -1;
6219                }
6220
6221                if (s2io_link_test(sp, &data[2]))
6222                        ethtest->flags |= ETH_TEST_FL_FAILED;
6223
6224                data[0] = 0;
6225                data[1] = 0;
6226                data[3] = 0;
6227                data[4] = 0;
6228        }
6229}
6230
6231static void s2io_get_ethtool_stats(struct net_device *dev,
6232                                   struct ethtool_stats *estats,
6233                                   u64 *tmp_stats)
6234{
6235        int i = 0, k;
6236        struct s2io_nic *sp = netdev_priv(dev);
6237        struct stat_block *stats = sp->mac_control.stats_info;
6238        struct swStat *swstats = &stats->sw_stat;
6239        struct xpakStat *xstats = &stats->xpak_stat;
6240
6241        s2io_updt_stats(sp);
6242        tmp_stats[i++] =
6243                (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6244                le32_to_cpu(stats->tmac_frms);
6245        tmp_stats[i++] =
6246                (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6247                le32_to_cpu(stats->tmac_data_octets);
6248        tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6249        tmp_stats[i++] =
6250                (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6251                le32_to_cpu(stats->tmac_mcst_frms);
6252        tmp_stats[i++] =
6253                (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6254                le32_to_cpu(stats->tmac_bcst_frms);
6255        tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6256        tmp_stats[i++] =
6257                (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6258                le32_to_cpu(stats->tmac_ttl_octets);
6259        tmp_stats[i++] =
6260                (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6261                le32_to_cpu(stats->tmac_ucst_frms);
6262        tmp_stats[i++] =
6263                (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6264                le32_to_cpu(stats->tmac_nucst_frms);
6265        tmp_stats[i++] =
6266                (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6267                le32_to_cpu(stats->tmac_any_err_frms);
6268        tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6269        tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6270        tmp_stats[i++] =
6271                (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6272                le32_to_cpu(stats->tmac_vld_ip);
6273        tmp_stats[i++] =
6274                (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6275                le32_to_cpu(stats->tmac_drop_ip);
6276        tmp_stats[i++] =
6277                (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6278                le32_to_cpu(stats->tmac_icmp);
6279        tmp_stats[i++] =
6280                (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6281                le32_to_cpu(stats->tmac_rst_tcp);
6282        tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6283        tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6284                le32_to_cpu(stats->tmac_udp);
6285        tmp_stats[i++] =
6286                (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6287                le32_to_cpu(stats->rmac_vld_frms);
6288        tmp_stats[i++] =
6289                (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6290                le32_to_cpu(stats->rmac_data_octets);
6291        tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6292        tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6293        tmp_stats[i++] =
6294                (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6295                le32_to_cpu(stats->rmac_vld_mcst_frms);
6296        tmp_stats[i++] =
6297                (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6298                le32_to_cpu(stats->rmac_vld_bcst_frms);
6299        tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6300        tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6301        tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6302        tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6303        tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6304        tmp_stats[i++] =
6305                (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6306                le32_to_cpu(stats->rmac_ttl_octets);
6307        tmp_stats[i++] =
6308                (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6309                | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6310        tmp_stats[i++] =
6311                (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6312                << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6313        tmp_stats[i++] =
6314                (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6315                le32_to_cpu(stats->rmac_discarded_frms);
6316        tmp_stats[i++] =
6317                (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6318                << 32 | le32_to_cpu(stats->rmac_drop_events);
6319        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6320        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6321        tmp_stats[i++] =
6322                (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6323                le32_to_cpu(stats->rmac_usized_frms);
6324        tmp_stats[i++] =
6325                (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6326                le32_to_cpu(stats->rmac_osized_frms);
6327        tmp_stats[i++] =
6328                (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6329                le32_to_cpu(stats->rmac_frag_frms);
6330        tmp_stats[i++] =
6331                (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6332                le32_to_cpu(stats->rmac_jabber_frms);
6333        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6334        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6335        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6336        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6337        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6338        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6339        tmp_stats[i++] =
6340                (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6341                le32_to_cpu(stats->rmac_ip);
6342        tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6343        tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6344        tmp_stats[i++] =
6345                (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6346                le32_to_cpu(stats->rmac_drop_ip);
6347        tmp_stats[i++] =
6348                (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6349                le32_to_cpu(stats->rmac_icmp);
6350        tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6351        tmp_stats[i++] =
6352                (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6353                le32_to_cpu(stats->rmac_udp);
6354        tmp_stats[i++] =
6355                (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6356                le32_to_cpu(stats->rmac_err_drp_udp);
6357        tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6358        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6359        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6360        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6361        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6362        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6363        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6364        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6365        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6366        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6367        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6368        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6369        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6370        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6371        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6372        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6373        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6374        tmp_stats[i++] =
6375                (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6376                le32_to_cpu(stats->rmac_pause_cnt);
6377        tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6378        tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6379        tmp_stats[i++] =
6380                (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6381                le32_to_cpu(stats->rmac_accepted_ip);
6382        tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6383        tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6384        tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6385        tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6386        tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6387        tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6388        tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6389        tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6390        tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6391        tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6392        tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6393        tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6394        tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6395        tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6396        tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6397        tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6398        tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6399        tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6400        tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6401
6402        /* Enhanced statistics exist only for Hercules */
6403        if (sp->device_type == XFRAME_II_DEVICE) {
6404                tmp_stats[i++] =
6405                        le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6406                tmp_stats[i++] =
6407                        le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6408                tmp_stats[i++] =
6409                        le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6410                tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6411                tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6412                tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6413                tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6414                tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6415                tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6416                tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6417                tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6418                tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6419                tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6420                tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6421                tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6422                tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6423        }
6424
6425        tmp_stats[i++] = 0;
6426        tmp_stats[i++] = swstats->single_ecc_errs;
6427        tmp_stats[i++] = swstats->double_ecc_errs;
6428        tmp_stats[i++] = swstats->parity_err_cnt;
6429        tmp_stats[i++] = swstats->serious_err_cnt;
6430        tmp_stats[i++] = swstats->soft_reset_cnt;
6431        tmp_stats[i++] = swstats->fifo_full_cnt;
6432        for (k = 0; k < MAX_RX_RINGS; k++)
6433                tmp_stats[i++] = swstats->ring_full_cnt[k];
6434        tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6435        tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6436        tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6437        tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6438        tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6439        tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6440        tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6441        tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6442        tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6443        tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6444        tmp_stats[i++] = xstats->warn_laser_output_power_high;
6445        tmp_stats[i++] = xstats->warn_laser_output_power_low;
6446        tmp_stats[i++] = swstats->clubbed_frms_cnt;
6447        tmp_stats[i++] = swstats->sending_both;
6448        tmp_stats[i++] = swstats->outof_sequence_pkts;
6449        tmp_stats[i++] = swstats->flush_max_pkts;
6450        if (swstats->num_aggregations) {
6451                u64 tmp = swstats->sum_avg_pkts_aggregated;
6452                int count = 0;
6453                /*
6454                 * Since 64-bit divide does not work on all platforms,
6455                 * do repeated subtraction.
6456                 */
6457                while (tmp >= swstats->num_aggregations) {
6458                        tmp -= swstats->num_aggregations;
6459                        count++;
6460                }
6461                tmp_stats[i++] = count;
6462        } else
6463                tmp_stats[i++] = 0;
6464        tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6465        tmp_stats[i++] = swstats->pci_map_fail_cnt;
6466        tmp_stats[i++] = swstats->watchdog_timer_cnt;
6467        tmp_stats[i++] = swstats->mem_allocated;
6468        tmp_stats[i++] = swstats->mem_freed;
6469        tmp_stats[i++] = swstats->link_up_cnt;
6470        tmp_stats[i++] = swstats->link_down_cnt;
6471        tmp_stats[i++] = swstats->link_up_time;
6472        tmp_stats[i++] = swstats->link_down_time;
6473
6474        tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6475        tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6476        tmp_stats[i++] = swstats->tx_parity_err_cnt;
6477        tmp_stats[i++] = swstats->tx_link_loss_cnt;
6478        tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6479
6480        tmp_stats[i++] = swstats->rx_parity_err_cnt;
6481        tmp_stats[i++] = swstats->rx_abort_cnt;
6482        tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6483        tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6484        tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6485        tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6486        tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6487        tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6488        tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6489        tmp_stats[i++] = swstats->tda_err_cnt;
6490        tmp_stats[i++] = swstats->pfc_err_cnt;
6491        tmp_stats[i++] = swstats->pcc_err_cnt;
6492        tmp_stats[i++] = swstats->tti_err_cnt;
6493        tmp_stats[i++] = swstats->tpa_err_cnt;
6494        tmp_stats[i++] = swstats->sm_err_cnt;
6495        tmp_stats[i++] = swstats->lso_err_cnt;
6496        tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6497        tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6498        tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6499        tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6500        tmp_stats[i++] = swstats->rc_err_cnt;
6501        tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6502        tmp_stats[i++] = swstats->rpa_err_cnt;
6503        tmp_stats[i++] = swstats->rda_err_cnt;
6504        tmp_stats[i++] = swstats->rti_err_cnt;
6505        tmp_stats[i++] = swstats->mc_err_cnt;
6506}
6507
6508static int s2io_ethtool_get_regs_len(struct net_device *dev)
6509{
6510        return XENA_REG_SPACE;
6511}
6512
6513
6514static int s2io_get_eeprom_len(struct net_device *dev)
6515{
6516        return XENA_EEPROM_SPACE;
6517}
6518
6519static int s2io_get_sset_count(struct net_device *dev, int sset)
6520{
6521        struct s2io_nic *sp = netdev_priv(dev);
6522
6523        switch (sset) {
6524        case ETH_SS_TEST:
6525                return S2IO_TEST_LEN;
6526        case ETH_SS_STATS:
6527                switch (sp->device_type) {
6528                case XFRAME_I_DEVICE:
6529                        return XFRAME_I_STAT_LEN;
6530                case XFRAME_II_DEVICE:
6531                        return XFRAME_II_STAT_LEN;
6532                default:
6533                        return 0;
6534                }
6535        default:
6536                return -EOPNOTSUPP;
6537        }
6538}
6539
6540static void s2io_ethtool_get_strings(struct net_device *dev,
6541                                     u32 stringset, u8 *data)
6542{
6543        int stat_size = 0;
6544        struct s2io_nic *sp = netdev_priv(dev);
6545
6546        switch (stringset) {
6547        case ETH_SS_TEST:
6548                memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6549                break;
6550        case ETH_SS_STATS:
6551                stat_size = sizeof(ethtool_xena_stats_keys);
6552                memcpy(data, &ethtool_xena_stats_keys, stat_size);
6553                if (sp->device_type == XFRAME_II_DEVICE) {
6554                        memcpy(data + stat_size,
6555                               &ethtool_enhanced_stats_keys,
6556                               sizeof(ethtool_enhanced_stats_keys));
6557                        stat_size += sizeof(ethtool_enhanced_stats_keys);
6558                }
6559
6560                memcpy(data + stat_size, &ethtool_driver_stats_keys,
6561                       sizeof(ethtool_driver_stats_keys));
6562        }
6563}
6564
6565static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6566{
6567        struct s2io_nic *sp = netdev_priv(dev);
6568        netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6569
6570        if (changed && netif_running(dev)) {
6571                int rc;
6572
6573                s2io_stop_all_tx_queue(sp);
6574                s2io_card_down(sp);
6575                dev->features = features;
6576                rc = s2io_card_up(sp);
6577                if (rc)
6578                        s2io_reset(sp);
6579                else
6580                        s2io_start_all_tx_queue(sp);
6581
6582                return rc ? rc : 1;
6583        }
6584
6585        return 0;
6586}
6587
6588static const struct ethtool_ops netdev_ethtool_ops = {
6589        .get_drvinfo = s2io_ethtool_gdrvinfo,
6590        .get_regs_len = s2io_ethtool_get_regs_len,
6591        .get_regs = s2io_ethtool_gregs,
6592        .get_link = ethtool_op_get_link,
6593        .get_eeprom_len = s2io_get_eeprom_len,
6594        .get_eeprom = s2io_ethtool_geeprom,
6595        .set_eeprom = s2io_ethtool_seeprom,
6596        .get_ringparam = s2io_ethtool_gringparam,
6597        .get_pauseparam = s2io_ethtool_getpause_data,
6598        .set_pauseparam = s2io_ethtool_setpause_data,
6599        .self_test = s2io_ethtool_test,
6600        .get_strings = s2io_ethtool_get_strings,
6601        .set_phys_id = s2io_ethtool_set_led,
6602        .get_ethtool_stats = s2io_get_ethtool_stats,
6603        .get_sset_count = s2io_get_sset_count,
6604        .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6605        .set_link_ksettings = s2io_ethtool_set_link_ksettings,
6606};
6607
6608/**
6609 *  s2io_ioctl - Entry point for the Ioctl
6610 *  @dev :  Device pointer.
6611 *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6612 *  a proprietary structure used to pass information to the driver.
6613 *  @cmd :  This is used to distinguish between the different commands that
6614 *  can be passed to the IOCTL functions.
6615 *  Description:
6616 *  Currently there are no special functionality supported in IOCTL, hence
6617 *  function always return EOPNOTSUPPORTED
6618 */
6619
6620static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6621{
6622        return -EOPNOTSUPP;
6623}
6624
6625/**
6626 *  s2io_change_mtu - entry point to change MTU size for the device.
6627 *   @dev : device pointer.
6628 *   @new_mtu : the new MTU size for the device.
6629 *   Description: A driver entry point to change MTU size for the device.
6630 *   Before changing the MTU the device must be stopped.
6631 *  Return value:
6632 *   0 on success and an appropriate (-)ve integer as defined in errno.h
6633 *   file on failure.
6634 */
6635
6636static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6637{
6638        struct s2io_nic *sp = netdev_priv(dev);
6639        int ret = 0;
6640
6641        dev->mtu = new_mtu;
6642        if (netif_running(dev)) {
6643                s2io_stop_all_tx_queue(sp);
6644                s2io_card_down(sp);
6645                ret = s2io_card_up(sp);
6646                if (ret) {
6647                        DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6648                                  __func__);
6649                        return ret;
6650                }
6651                s2io_wake_all_tx_queue(sp);
6652        } else { /* Device is down */
6653                struct XENA_dev_config __iomem *bar0 = sp->bar0;
6654                u64 val64 = new_mtu;
6655
6656                writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6657        }
6658
6659        return ret;
6660}
6661
6662/**
6663 * s2io_set_link - Set the LInk status
6664 * @data: long pointer to device private structue
6665 * Description: Sets the link status for the adapter
6666 */
6667
6668static void s2io_set_link(struct work_struct *work)
6669{
6670        struct s2io_nic *nic = container_of(work, struct s2io_nic,
6671                                            set_link_task);
6672        struct net_device *dev = nic->dev;
6673        struct XENA_dev_config __iomem *bar0 = nic->bar0;
6674        register u64 val64;
6675        u16 subid;
6676
6677        rtnl_lock();
6678
6679        if (!netif_running(dev))
6680                goto out_unlock;
6681
6682        if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6683                /* The card is being reset, no point doing anything */
6684                goto out_unlock;
6685        }
6686
6687        subid = nic->pdev->subsystem_device;
6688        if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6689                /*
6690                 * Allow a small delay for the NICs self initiated
6691                 * cleanup to complete.
6692                 */
6693                msleep(100);
6694        }
6695
6696        val64 = readq(&bar0->adapter_status);
6697        if (LINK_IS_UP(val64)) {
6698                if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6699                        if (verify_xena_quiescence(nic)) {
6700                                val64 = readq(&bar0->adapter_control);
6701                                val64 |= ADAPTER_CNTL_EN;
6702                                writeq(val64, &bar0->adapter_control);
6703                                if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6704                                            nic->device_type, subid)) {
6705                                        val64 = readq(&bar0->gpio_control);
6706                                        val64 |= GPIO_CTRL_GPIO_0;
6707                                        writeq(val64, &bar0->gpio_control);
6708                                        val64 = readq(&bar0->gpio_control);
6709                                } else {
6710                                        val64 |= ADAPTER_LED_ON;
6711                                        writeq(val64, &bar0->adapter_control);
6712                                }
6713                                nic->device_enabled_once = true;
6714                        } else {
6715                                DBG_PRINT(ERR_DBG,
6716                                          "%s: Error: device is not Quiescent\n",
6717                                          dev->name);
6718                                s2io_stop_all_tx_queue(nic);
6719                        }
6720                }
6721                val64 = readq(&bar0->adapter_control);
6722                val64 |= ADAPTER_LED_ON;
6723                writeq(val64, &bar0->adapter_control);
6724                s2io_link(nic, LINK_UP);
6725        } else {
6726                if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6727                                                      subid)) {
6728                        val64 = readq(&bar0->gpio_control);
6729                        val64 &= ~GPIO_CTRL_GPIO_0;
6730                        writeq(val64, &bar0->gpio_control);
6731                        val64 = readq(&bar0->gpio_control);
6732                }
6733                /* turn off LED */
6734                val64 = readq(&bar0->adapter_control);
6735                val64 = val64 & (~ADAPTER_LED_ON);
6736                writeq(val64, &bar0->adapter_control);
6737                s2io_link(nic, LINK_DOWN);
6738        }
6739        clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6740
6741out_unlock:
6742        rtnl_unlock();
6743}
6744
6745static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6746                                  struct buffAdd *ba,
6747                                  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6748                                  u64 *temp2, int size)
6749{
6750        struct net_device *dev = sp->dev;
6751        struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6752
6753        if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6754                struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6755                /* allocate skb */
6756                if (*skb) {
6757                        DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6758                        /*
6759                         * As Rx frame are not going to be processed,
6760                         * using same mapped address for the Rxd
6761                         * buffer pointer
6762                         */
6763                        rxdp1->Buffer0_ptr = *temp0;
6764                } else {
6765                        *skb = netdev_alloc_skb(dev, size);
6766                        if (!(*skb)) {
6767                                DBG_PRINT(INFO_DBG,
6768                                          "%s: Out of memory to allocate %s\n",
6769                                          dev->name, "1 buf mode SKBs");
6770                                stats->mem_alloc_fail_cnt++;
6771                                return -ENOMEM ;
6772                        }
6773                        stats->mem_allocated += (*skb)->truesize;
6774                        /* storing the mapped addr in a temp variable
6775                         * such it will be used for next rxd whose
6776                         * Host Control is NULL
6777                         */
6778                        rxdp1->Buffer0_ptr = *temp0 =
6779                                pci_map_single(sp->pdev, (*skb)->data,
6780                                               size - NET_IP_ALIGN,
6781                                               PCI_DMA_FROMDEVICE);
6782                        if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6783                                goto memalloc_failed;
6784                        rxdp->Host_Control = (unsigned long) (*skb);
6785                }
6786        } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6787                struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6788                /* Two buffer Mode */
6789                if (*skb) {
6790                        rxdp3->Buffer2_ptr = *temp2;
6791                        rxdp3->Buffer0_ptr = *temp0;
6792                        rxdp3->Buffer1_ptr = *temp1;
6793                } else {
6794                        *skb = netdev_alloc_skb(dev, size);
6795                        if (!(*skb)) {
6796                                DBG_PRINT(INFO_DBG,
6797                                          "%s: Out of memory to allocate %s\n",
6798                                          dev->name,
6799                                          "2 buf mode SKBs");
6800                                stats->mem_alloc_fail_cnt++;
6801                                return -ENOMEM;
6802                        }
6803                        stats->mem_allocated += (*skb)->truesize;
6804                        rxdp3->Buffer2_ptr = *temp2 =
6805                                pci_map_single(sp->pdev, (*skb)->data,
6806                                               dev->mtu + 4,
6807                                               PCI_DMA_FROMDEVICE);
6808                        if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6809                                goto memalloc_failed;
6810                        rxdp3->Buffer0_ptr = *temp0 =
6811                                pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6812                                               PCI_DMA_FROMDEVICE);
6813                        if (pci_dma_mapping_error(sp->pdev,
6814                                                  rxdp3->Buffer0_ptr)) {
6815                                pci_unmap_single(sp->pdev,
6816                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
6817                                                 dev->mtu + 4,
6818                                                 PCI_DMA_FROMDEVICE);
6819                                goto memalloc_failed;
6820                        }
6821                        rxdp->Host_Control = (unsigned long) (*skb);
6822
6823                        /* Buffer-1 will be dummy buffer not used */
6824                        rxdp3->Buffer1_ptr = *temp1 =
6825                                pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6826                                               PCI_DMA_FROMDEVICE);
6827                        if (pci_dma_mapping_error(sp->pdev,
6828                                                  rxdp3->Buffer1_ptr)) {
6829                                pci_unmap_single(sp->pdev,
6830                                                 (dma_addr_t)rxdp3->Buffer0_ptr,
6831                                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
6832                                pci_unmap_single(sp->pdev,
6833                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
6834                                                 dev->mtu + 4,
6835                                                 PCI_DMA_FROMDEVICE);
6836                                goto memalloc_failed;
6837                        }
6838                }
6839        }
6840        return 0;
6841
6842memalloc_failed:
6843        stats->pci_map_fail_cnt++;
6844        stats->mem_freed += (*skb)->truesize;
6845        dev_kfree_skb(*skb);
6846        return -ENOMEM;
6847}
6848
6849static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6850                                int size)
6851{
6852        struct net_device *dev = sp->dev;
6853        if (sp->rxd_mode == RXD_MODE_1) {
6854                rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6855        } else if (sp->rxd_mode == RXD_MODE_3B) {
6856                rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6857                rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6858                rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6859        }
6860}
6861
6862static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6863{
6864        int i, j, k, blk_cnt = 0, size;
6865        struct config_param *config = &sp->config;
6866        struct mac_info *mac_control = &sp->mac_control;
6867        struct net_device *dev = sp->dev;
6868        struct RxD_t *rxdp = NULL;
6869        struct sk_buff *skb = NULL;
6870        struct buffAdd *ba = NULL;
6871        u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6872
6873        /* Calculate the size based on ring mode */
6874        size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6875                HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6876        if (sp->rxd_mode == RXD_MODE_1)
6877                size += NET_IP_ALIGN;
6878        else if (sp->rxd_mode == RXD_MODE_3B)
6879                size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6880
6881        for (i = 0; i < config->rx_ring_num; i++) {
6882                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6883                struct ring_info *ring = &mac_control->rings[i];
6884
6885                blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6886
6887                for (j = 0; j < blk_cnt; j++) {
6888                        for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6889                                rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6890                                if (sp->rxd_mode == RXD_MODE_3B)
6891                                        ba = &ring->ba[j][k];
6892                                if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6893                                                           &temp0_64,
6894                                                           &temp1_64,
6895                                                           &temp2_64,
6896                                                           size) == -ENOMEM) {
6897                                        return 0;
6898                                }
6899
6900                                set_rxd_buffer_size(sp, rxdp, size);
6901                                dma_wmb();
6902                                /* flip the Ownership bit to Hardware */
6903                                rxdp->Control_1 |= RXD_OWN_XENA;
6904                        }
6905                }
6906        }
6907        return 0;
6908
6909}
6910
6911static int s2io_add_isr(struct s2io_nic *sp)
6912{
6913        int ret = 0;
6914        struct net_device *dev = sp->dev;
6915        int err = 0;
6916
6917        if (sp->config.intr_type == MSI_X)
6918                ret = s2io_enable_msi_x(sp);
6919        if (ret) {
6920                DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6921                sp->config.intr_type = INTA;
6922        }
6923
6924        /*
6925         * Store the values of the MSIX table in
6926         * the struct s2io_nic structure
6927         */
6928        store_xmsi_data(sp);
6929
6930        /* After proper initialization of H/W, register ISR */
6931        if (sp->config.intr_type == MSI_X) {
6932                int i, msix_rx_cnt = 0;
6933
6934                for (i = 0; i < sp->num_entries; i++) {
6935                        if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6936                                if (sp->s2io_entries[i].type ==
6937                                    MSIX_RING_TYPE) {
6938                                        snprintf(sp->desc[i],
6939                                                sizeof(sp->desc[i]),
6940                                                "%s:MSI-X-%d-RX",
6941                                                dev->name, i);
6942                                        err = request_irq(sp->entries[i].vector,
6943                                                          s2io_msix_ring_handle,
6944                                                          0,
6945                                                          sp->desc[i],
6946                                                          sp->s2io_entries[i].arg);
6947                                } else if (sp->s2io_entries[i].type ==
6948                                           MSIX_ALARM_TYPE) {
6949                                        snprintf(sp->desc[i],
6950                                                sizeof(sp->desc[i]),
6951                                                "%s:MSI-X-%d-TX",
6952                                                dev->name, i);
6953                                        err = request_irq(sp->entries[i].vector,
6954                                                          s2io_msix_fifo_handle,
6955                                                          0,
6956                                                          sp->desc[i],
6957                                                          sp->s2io_entries[i].arg);
6958
6959                                }
6960                                /* if either data or addr is zero print it. */
6961                                if (!(sp->msix_info[i].addr &&
6962                                      sp->msix_info[i].data)) {
6963                                        DBG_PRINT(ERR_DBG,
6964                                                  "%s @Addr:0x%llx Data:0x%llx\n",
6965                                                  sp->desc[i],
6966                                                  (unsigned long long)
6967                                                  sp->msix_info[i].addr,
6968                                                  (unsigned long long)
6969                                                  ntohl(sp->msix_info[i].data));
6970                                } else
6971                                        msix_rx_cnt++;
6972                                if (err) {
6973                                        remove_msix_isr(sp);
6974
6975                                        DBG_PRINT(ERR_DBG,
6976                                                  "%s:MSI-X-%d registration "
6977                                                  "failed\n", dev->name, i);
6978
6979                                        DBG_PRINT(ERR_DBG,
6980                                                  "%s: Defaulting to INTA\n",
6981                                                  dev->name);
6982                                        sp->config.intr_type = INTA;
6983                                        break;
6984                                }
6985                                sp->s2io_entries[i].in_use =
6986                                        MSIX_REGISTERED_SUCCESS;
6987                        }
6988                }
6989                if (!err) {
6990                        pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6991                        DBG_PRINT(INFO_DBG,
6992                                  "MSI-X-TX entries enabled through alarm vector\n");
6993                }
6994        }
6995        if (sp->config.intr_type == INTA) {
6996                err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6997                                  sp->name, dev);
6998                if (err) {
6999                        DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7000                                  dev->name);
7001                        return -1;
7002                }
7003        }
7004        return 0;
7005}
7006
7007static void s2io_rem_isr(struct s2io_nic *sp)
7008{
7009        if (sp->config.intr_type == MSI_X)
7010                remove_msix_isr(sp);
7011        else
7012                remove_inta_isr(sp);
7013}
7014
7015static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7016{
7017        int cnt = 0;
7018        struct XENA_dev_config __iomem *bar0 = sp->bar0;
7019        register u64 val64 = 0;
7020        struct config_param *config;
7021        config = &sp->config;
7022
7023        if (!is_s2io_card_up(sp))
7024                return;
7025
7026        del_timer_sync(&sp->alarm_timer);
7027        /* If s2io_set_link task is executing, wait till it completes. */
7028        while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7029                msleep(50);
7030        clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7031
7032        /* Disable napi */
7033        if (sp->config.napi) {
7034                int off = 0;
7035                if (config->intr_type ==  MSI_X) {
7036                        for (; off < sp->config.rx_ring_num; off++)
7037                                napi_disable(&sp->mac_control.rings[off].napi);
7038                }
7039                else
7040                        napi_disable(&sp->napi);
7041        }
7042
7043        /* disable Tx and Rx traffic on the NIC */
7044        if (do_io)
7045                stop_nic(sp);
7046
7047        s2io_rem_isr(sp);
7048
7049        /* stop the tx queue, indicate link down */
7050        s2io_link(sp, LINK_DOWN);
7051
7052        /* Check if the device is Quiescent and then Reset the NIC */
7053        while (do_io) {
7054                /* As per the HW requirement we need to replenish the
7055                 * receive buffer to avoid the ring bump. Since there is
7056                 * no intention of processing the Rx frame at this pointwe are
7057                 * just setting the ownership bit of rxd in Each Rx
7058                 * ring to HW and set the appropriate buffer size
7059                 * based on the ring mode
7060                 */
7061                rxd_owner_bit_reset(sp);
7062
7063                val64 = readq(&bar0->adapter_status);
7064                if (verify_xena_quiescence(sp)) {
7065                        if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7066                                break;
7067                }
7068
7069                msleep(50);
7070                cnt++;
7071                if (cnt == 10) {
7072                        DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7073                                  "adapter status reads 0x%llx\n",
7074                                  (unsigned long long)val64);
7075                        break;
7076                }
7077        }
7078        if (do_io)
7079                s2io_reset(sp);
7080
7081        /* Free all Tx buffers */
7082        free_tx_buffers(sp);
7083
7084        /* Free all Rx buffers */
7085        free_rx_buffers(sp);
7086
7087        clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7088}
7089
7090static void s2io_card_down(struct s2io_nic *sp)
7091{
7092        do_s2io_card_down(sp, 1);
7093}
7094
7095static int s2io_card_up(struct s2io_nic *sp)
7096{
7097        int i, ret = 0;
7098        struct config_param *config;
7099        struct mac_info *mac_control;
7100        struct net_device *dev = sp->dev;
7101        u16 interruptible;
7102
7103        /* Initialize the H/W I/O registers */
7104        ret = init_nic(sp);
7105        if (ret != 0) {
7106                DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7107                          dev->name);
7108                if (ret != -EIO)
7109                        s2io_reset(sp);
7110                return ret;
7111        }
7112
7113        /*
7114         * Initializing the Rx buffers. For now we are considering only 1
7115         * Rx ring and initializing buffers into 30 Rx blocks
7116         */
7117        config = &sp->config;
7118        mac_control = &sp->mac_control;
7119
7120        for (i = 0; i < config->rx_ring_num; i++) {
7121                struct ring_info *ring = &mac_control->rings[i];
7122
7123                ring->mtu = dev->mtu;
7124                ring->lro = !!(dev->features & NETIF_F_LRO);
7125                ret = fill_rx_buffers(sp, ring, 1);
7126                if (ret) {
7127                        DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7128                                  dev->name);
7129                        s2io_reset(sp);
7130                        free_rx_buffers(sp);
7131                        return -ENOMEM;
7132                }
7133                DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7134                          ring->rx_bufs_left);
7135        }
7136
7137        /* Initialise napi */
7138        if (config->napi) {
7139                if (config->intr_type ==  MSI_X) {
7140                        for (i = 0; i < sp->config.rx_ring_num; i++)
7141                                napi_enable(&sp->mac_control.rings[i].napi);
7142                } else {
7143                        napi_enable(&sp->napi);
7144                }
7145        }
7146
7147        /* Maintain the state prior to the open */
7148        if (sp->promisc_flg)
7149                sp->promisc_flg = 0;
7150        if (sp->m_cast_flg) {
7151                sp->m_cast_flg = 0;
7152                sp->all_multi_pos = 0;
7153        }
7154
7155        /* Setting its receive mode */
7156        s2io_set_multicast(dev);
7157
7158        if (dev->features & NETIF_F_LRO) {
7159                /* Initialize max aggregatable pkts per session based on MTU */
7160                sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7161                /* Check if we can use (if specified) user provided value */
7162                if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7163                        sp->lro_max_aggr_per_sess = lro_max_pkts;
7164        }
7165
7166        /* Enable Rx Traffic and interrupts on the NIC */
7167        if (start_nic(sp)) {
7168                DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7169                s2io_reset(sp);
7170                free_rx_buffers(sp);
7171                return -ENODEV;
7172        }
7173
7174        /* Add interrupt service routine */
7175        if (s2io_add_isr(sp) != 0) {
7176                if (sp->config.intr_type == MSI_X)
7177                        s2io_rem_isr(sp);
7178                s2io_reset(sp);
7179                free_rx_buffers(sp);
7180                return -ENODEV;
7181        }
7182
7183        timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7184        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7185
7186        set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7187
7188        /*  Enable select interrupts */
7189        en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7190        if (sp->config.intr_type != INTA) {
7191                interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7192                en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7193        } else {
7194                interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7195                interruptible |= TX_PIC_INTR;
7196                en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7197        }
7198
7199        return 0;
7200}
7201
7202/**
7203 * s2io_restart_nic - Resets the NIC.
7204 * @data : long pointer to the device private structure
7205 * Description:
7206 * This function is scheduled to be run by the s2io_tx_watchdog
7207 * function after 0.5 secs to reset the NIC. The idea is to reduce
7208 * the run time of the watch dog routine which is run holding a
7209 * spin lock.
7210 */
7211
7212static void s2io_restart_nic(struct work_struct *work)
7213{
7214        struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7215        struct net_device *dev = sp->dev;
7216
7217        rtnl_lock();
7218
7219        if (!netif_running(dev))
7220                goto out_unlock;
7221
7222        s2io_card_down(sp);
7223        if (s2io_card_up(sp)) {
7224                DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7225        }
7226        s2io_wake_all_tx_queue(sp);
7227        DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7228out_unlock:
7229        rtnl_unlock();
7230}
7231
7232/**
7233 *  s2io_tx_watchdog - Watchdog for transmit side.
7234 *  @dev : Pointer to net device structure
7235 *  Description:
7236 *  This function is triggered if the Tx Queue is stopped
7237 *  for a pre-defined amount of time when the Interface is still up.
7238 *  If the Interface is jammed in such a situation, the hardware is
7239 *  reset (by s2io_close) and restarted again (by s2io_open) to
7240 *  overcome any problem that might have been caused in the hardware.
7241 *  Return value:
7242 *  void
7243 */
7244
7245static void s2io_tx_watchdog(struct net_device *dev)
7246{
7247        struct s2io_nic *sp = netdev_priv(dev);
7248        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7249
7250        if (netif_carrier_ok(dev)) {
7251                swstats->watchdog_timer_cnt++;
7252                schedule_work(&sp->rst_timer_task);
7253                swstats->soft_reset_cnt++;
7254        }
7255}
7256
7257/**
7258 *   rx_osm_handler - To perform some OS related operations on SKB.
7259 *   @sp: private member of the device structure,pointer to s2io_nic structure.
7260 *   @skb : the socket buffer pointer.
7261 *   @len : length of the packet
7262 *   @cksum : FCS checksum of the frame.
7263 *   @ring_no : the ring from which this RxD was extracted.
7264 *   Description:
7265 *   This function is called by the Rx interrupt serivce routine to perform
7266 *   some OS related operations on the SKB before passing it to the upper
7267 *   layers. It mainly checks if the checksum is OK, if so adds it to the
7268 *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7269 *   to the upper layer. If the checksum is wrong, it increments the Rx
7270 *   packet error count, frees the SKB and returns error.
7271 *   Return value:
7272 *   SUCCESS on success and -1 on failure.
7273 */
7274static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7275{
7276        struct s2io_nic *sp = ring_data->nic;
7277        struct net_device *dev = ring_data->dev;
7278        struct sk_buff *skb = (struct sk_buff *)
7279                ((unsigned long)rxdp->Host_Control);
7280        int ring_no = ring_data->ring_no;
7281        u16 l3_csum, l4_csum;
7282        unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7283        struct lro *uninitialized_var(lro);
7284        u8 err_mask;
7285        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7286
7287        skb->dev = dev;
7288
7289        if (err) {
7290                /* Check for parity error */
7291                if (err & 0x1)
7292                        swstats->parity_err_cnt++;
7293
7294                err_mask = err >> 48;
7295                switch (err_mask) {
7296                case 1:
7297                        swstats->rx_parity_err_cnt++;
7298                        break;
7299
7300                case 2:
7301                        swstats->rx_abort_cnt++;
7302                        break;
7303
7304                case 3:
7305                        swstats->rx_parity_abort_cnt++;
7306                        break;
7307
7308                case 4:
7309                        swstats->rx_rda_fail_cnt++;
7310                        break;
7311
7312                case 5:
7313                        swstats->rx_unkn_prot_cnt++;
7314                        break;
7315
7316                case 6:
7317                        swstats->rx_fcs_err_cnt++;
7318                        break;
7319
7320                case 7:
7321                        swstats->rx_buf_size_err_cnt++;
7322                        break;
7323
7324                case 8:
7325                        swstats->rx_rxd_corrupt_cnt++;
7326                        break;
7327
7328                case 15:
7329                        swstats->rx_unkn_err_cnt++;
7330                        break;
7331                }
7332                /*
7333                 * Drop the packet if bad transfer code. Exception being
7334                 * 0x5, which could be due to unsupported IPv6 extension header.
7335                 * In this case, we let stack handle the packet.
7336                 * Note that in this case, since checksum will be incorrect,
7337                 * stack will validate the same.
7338                 */
7339                if (err_mask != 0x5) {
7340                        DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7341                                  dev->name, err_mask);
7342                        dev->stats.rx_crc_errors++;
7343                        swstats->mem_freed
7344                                += skb->truesize;
7345                        dev_kfree_skb(skb);
7346                        ring_data->rx_bufs_left -= 1;
7347                        rxdp->Host_Control = 0;
7348                        return 0;
7349                }
7350        }
7351
7352        rxdp->Host_Control = 0;
7353        if (sp->rxd_mode == RXD_MODE_1) {
7354                int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7355
7356                skb_put(skb, len);
7357        } else if (sp->rxd_mode == RXD_MODE_3B) {
7358                int get_block = ring_data->rx_curr_get_info.block_index;
7359                int get_off = ring_data->rx_curr_get_info.offset;
7360                int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7361                int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7362                unsigned char *buff = skb_push(skb, buf0_len);
7363
7364                struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7365                memcpy(buff, ba->ba_0, buf0_len);
7366                skb_put(skb, buf2_len);
7367        }
7368
7369        if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7370            ((!ring_data->lro) ||
7371             (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7372            (dev->features & NETIF_F_RXCSUM)) {
7373                l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7374                l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7375                if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7376                        /*
7377                         * NIC verifies if the Checksum of the received
7378                         * frame is Ok or not and accordingly returns
7379                         * a flag in the RxD.
7380                         */
7381                        skb->ip_summed = CHECKSUM_UNNECESSARY;
7382                        if (ring_data->lro) {
7383                                u32 tcp_len = 0;
7384                                u8 *tcp;
7385                                int ret = 0;
7386
7387                                ret = s2io_club_tcp_session(ring_data,
7388                                                            skb->data, &tcp,
7389                                                            &tcp_len, &lro,
7390                                                            rxdp, sp);
7391                                switch (ret) {
7392                                case 3: /* Begin anew */
7393                                        lro->parent = skb;
7394                                        goto aggregate;
7395                                case 1: /* Aggregate */
7396                                        lro_append_pkt(sp, lro, skb, tcp_len);
7397                                        goto aggregate;
7398                                case 4: /* Flush session */
7399                                        lro_append_pkt(sp, lro, skb, tcp_len);
7400                                        queue_rx_frame(lro->parent,
7401                                                       lro->vlan_tag);
7402                                        clear_lro_session(lro);
7403                                        swstats->flush_max_pkts++;
7404                                        goto aggregate;
7405                                case 2: /* Flush both */
7406                                        lro->parent->data_len = lro->frags_len;
7407                                        swstats->sending_both++;
7408                                        queue_rx_frame(lro->parent,
7409                                                       lro->vlan_tag);
7410                                        clear_lro_session(lro);
7411                                        goto send_up;
7412                                case 0: /* sessions exceeded */
7413                                case -1: /* non-TCP or not L2 aggregatable */
7414                                case 5: /*
7415                                         * First pkt in session not
7416                                         * L3/L4 aggregatable
7417                                         */
7418                                        break;
7419                                default:
7420                                        DBG_PRINT(ERR_DBG,
7421                                                  "%s: Samadhana!!\n",
7422                                                  __func__);
7423                                        BUG();
7424                                }
7425                        }
7426                } else {
7427                        /*
7428                         * Packet with erroneous checksum, let the
7429                         * upper layers deal with it.
7430                         */
7431                        skb_checksum_none_assert(skb);
7432                }
7433        } else
7434                skb_checksum_none_assert(skb);
7435
7436        swstats->mem_freed += skb->truesize;
7437send_up:
7438        skb_record_rx_queue(skb, ring_no);
7439        queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7440aggregate:
7441        sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7442        return SUCCESS;
7443}
7444
7445/**
7446 *  s2io_link - stops/starts the Tx queue.
7447 *  @sp : private member of the device structure, which is a pointer to the
7448 *  s2io_nic structure.
7449 *  @link : inidicates whether link is UP/DOWN.
7450 *  Description:
7451 *  This function stops/starts the Tx queue depending on whether the link
7452 *  status of the NIC is is down or up. This is called by the Alarm
7453 *  interrupt handler whenever a link change interrupt comes up.
7454 *  Return value:
7455 *  void.
7456 */
7457
7458static void s2io_link(struct s2io_nic *sp, int link)
7459{
7460        struct net_device *dev = sp->dev;
7461        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7462
7463        if (link != sp->last_link_state) {
7464                init_tti(sp, link);
7465                if (link == LINK_DOWN) {
7466                        DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7467                        s2io_stop_all_tx_queue(sp);
7468                        netif_carrier_off(dev);
7469                        if (swstats->link_up_cnt)
7470                                swstats->link_up_time =
7471                                        jiffies - sp->start_time;
7472                        swstats->link_down_cnt++;
7473                } else {
7474                        DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7475                        if (swstats->link_down_cnt)
7476                                swstats->link_down_time =
7477                                        jiffies - sp->start_time;
7478                        swstats->link_up_cnt++;
7479                        netif_carrier_on(dev);
7480                        s2io_wake_all_tx_queue(sp);
7481                }
7482        }
7483        sp->last_link_state = link;
7484        sp->start_time = jiffies;
7485}
7486
7487/**
7488 *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7489 *  @sp : private member of the device structure, which is a pointer to the
7490 *  s2io_nic structure.
7491 *  Description:
7492 *  This function initializes a few of the PCI and PCI-X configuration registers
7493 *  with recommended values.
7494 *  Return value:
7495 *  void
7496 */
7497
7498static void s2io_init_pci(struct s2io_nic *sp)
7499{
7500        u16 pci_cmd = 0, pcix_cmd = 0;
7501
7502        /* Enable Data Parity Error Recovery in PCI-X command register. */
7503        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7504                             &(pcix_cmd));
7505        pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7506                              (pcix_cmd | 1));
7507        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7508                             &(pcix_cmd));
7509
7510        /* Set the PErr Response bit in PCI command register. */
7511        pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512        pci_write_config_word(sp->pdev, PCI_COMMAND,
7513                              (pci_cmd | PCI_COMMAND_PARITY));
7514        pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7515}
7516
7517static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7518                            u8 *dev_multiq)
7519{
7520        int i;
7521
7522        if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7523                DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7524                          "(%d) not supported\n", tx_fifo_num);
7525
7526                if (tx_fifo_num < 1)
7527                        tx_fifo_num = 1;
7528                else
7529                        tx_fifo_num = MAX_TX_FIFOS;
7530
7531                DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7532        }
7533
7534        if (multiq)
7535                *dev_multiq = multiq;
7536
7537        if (tx_steering_type && (1 == tx_fifo_num)) {
7538                if (tx_steering_type != TX_DEFAULT_STEERING)
7539                        DBG_PRINT(ERR_DBG,
7540                                  "Tx steering is not supported with "
7541                                  "one fifo. Disabling Tx steering.\n");
7542                tx_steering_type = NO_STEERING;
7543        }
7544
7545        if ((tx_steering_type < NO_STEERING) ||
7546            (tx_steering_type > TX_DEFAULT_STEERING)) {
7547                DBG_PRINT(ERR_DBG,
7548                          "Requested transmit steering not supported\n");
7549                DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7550                tx_steering_type = NO_STEERING;
7551        }
7552
7553        if (rx_ring_num > MAX_RX_RINGS) {
7554                DBG_PRINT(ERR_DBG,
7555                          "Requested number of rx rings not supported\n");
7556                DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7557                          MAX_RX_RINGS);
7558                rx_ring_num = MAX_RX_RINGS;
7559        }
7560
7561        if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7562                DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7563                          "Defaulting to INTA\n");
7564                *dev_intr_type = INTA;
7565        }
7566
7567        if ((*dev_intr_type == MSI_X) &&
7568            ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7569             (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7570                DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7571                          "Defaulting to INTA\n");
7572                *dev_intr_type = INTA;
7573        }
7574
7575        if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7576                DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7577                DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7578                rx_ring_mode = 1;
7579        }
7580
7581        for (i = 0; i < MAX_RX_RINGS; i++)
7582                if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7583                        DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7584                                  "supported\nDefaulting to %d\n",
7585                                  MAX_RX_BLOCKS_PER_RING);
7586                        rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7587                }
7588
7589        return SUCCESS;
7590}
7591
7592/**
7593 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7594 * or Traffic class respectively.
7595 * @nic: device private variable
7596 * Description: The function configures the receive steering to
7597 * desired receive ring.
7598 * Return Value:  SUCCESS on success and
7599 * '-1' on failure (endian settings incorrect).
7600 */
7601static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7602{
7603        struct XENA_dev_config __iomem *bar0 = nic->bar0;
7604        register u64 val64 = 0;
7605
7606        if (ds_codepoint > 63)
7607                return FAILURE;
7608
7609        val64 = RTS_DS_MEM_DATA(ring);
7610        writeq(val64, &bar0->rts_ds_mem_data);
7611
7612        val64 = RTS_DS_MEM_CTRL_WE |
7613                RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7614                RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7615
7616        writeq(val64, &bar0->rts_ds_mem_ctrl);
7617
7618        return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7619                                     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7620                                     S2IO_BIT_RESET);
7621}
7622
7623static const struct net_device_ops s2io_netdev_ops = {
7624        .ndo_open               = s2io_open,
7625        .ndo_stop               = s2io_close,
7626        .ndo_get_stats          = s2io_get_stats,
7627        .ndo_start_xmit         = s2io_xmit,
7628        .ndo_validate_addr      = eth_validate_addr,
7629        .ndo_set_rx_mode        = s2io_set_multicast,
7630        .ndo_do_ioctl           = s2io_ioctl,
7631        .ndo_set_mac_address    = s2io_set_mac_addr,
7632        .ndo_change_mtu         = s2io_change_mtu,
7633        .ndo_set_features       = s2io_set_features,
7634        .ndo_tx_timeout         = s2io_tx_watchdog,
7635#ifdef CONFIG_NET_POLL_CONTROLLER
7636        .ndo_poll_controller    = s2io_netpoll,
7637#endif
7638};
7639
7640/**
7641 *  s2io_init_nic - Initialization of the adapter .
7642 *  @pdev : structure containing the PCI related information of the device.
7643 *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7644 *  Description:
7645 *  The function initializes an adapter identified by the pci_dec structure.
7646 *  All OS related initialization including memory and device structure and
7647 *  initlaization of the device private variable is done. Also the swapper
7648 *  control register is initialized to enable read and write into the I/O
7649 *  registers of the device.
7650 *  Return value:
7651 *  returns 0 on success and negative on failure.
7652 */
7653
7654static int
7655s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7656{
7657        struct s2io_nic *sp;
7658        struct net_device *dev;
7659        int i, j, ret;
7660        int dma_flag = false;
7661        u32 mac_up, mac_down;
7662        u64 val64 = 0, tmp64 = 0;
7663        struct XENA_dev_config __iomem *bar0 = NULL;
7664        u16 subid;
7665        struct config_param *config;
7666        struct mac_info *mac_control;
7667        int mode;
7668        u8 dev_intr_type = intr_type;
7669        u8 dev_multiq = 0;
7670
7671        ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7672        if (ret)
7673                return ret;
7674
7675        ret = pci_enable_device(pdev);
7676        if (ret) {
7677                DBG_PRINT(ERR_DBG,
7678                          "%s: pci_enable_device failed\n", __func__);
7679                return ret;
7680        }
7681
7682        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7683                DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7684                dma_flag = true;
7685                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7686                        DBG_PRINT(ERR_DBG,
7687                                  "Unable to obtain 64bit DMA "
7688                                  "for consistent allocations\n");
7689                        pci_disable_device(pdev);
7690                        return -ENOMEM;
7691                }
7692        } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7693                DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7694        } else {
7695                pci_disable_device(pdev);
7696                return -ENOMEM;
7697        }
7698        ret = pci_request_regions(pdev, s2io_driver_name);
7699        if (ret) {
7700                DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7701                          __func__, ret);
7702                pci_disable_device(pdev);
7703                return -ENODEV;
7704        }
7705        if (dev_multiq)
7706                dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7707        else
7708                dev = alloc_etherdev(sizeof(struct s2io_nic));
7709        if (dev == NULL) {
7710                pci_disable_device(pdev);
7711                pci_release_regions(pdev);
7712                return -ENODEV;
7713        }
7714
7715        pci_set_master(pdev);
7716        pci_set_drvdata(pdev, dev);
7717        SET_NETDEV_DEV(dev, &pdev->dev);
7718
7719        /*  Private member variable initialized to s2io NIC structure */
7720        sp = netdev_priv(dev);
7721        sp->dev = dev;
7722        sp->pdev = pdev;
7723        sp->high_dma_flag = dma_flag;
7724        sp->device_enabled_once = false;
7725        if (rx_ring_mode == 1)
7726                sp->rxd_mode = RXD_MODE_1;
7727        if (rx_ring_mode == 2)
7728                sp->rxd_mode = RXD_MODE_3B;
7729
7730        sp->config.intr_type = dev_intr_type;
7731
7732        if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7733            (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7734                sp->device_type = XFRAME_II_DEVICE;
7735        else
7736                sp->device_type = XFRAME_I_DEVICE;
7737
7738
7739        /* Initialize some PCI/PCI-X fields of the NIC. */
7740        s2io_init_pci(sp);
7741
7742        /*
7743         * Setting the device configuration parameters.
7744         * Most of these parameters can be specified by the user during
7745         * module insertion as they are module loadable parameters. If
7746         * these parameters are not not specified during load time, they
7747         * are initialized with default values.
7748         */
7749        config = &sp->config;
7750        mac_control = &sp->mac_control;
7751
7752        config->napi = napi;
7753        config->tx_steering_type = tx_steering_type;
7754
7755        /* Tx side parameters. */
7756        if (config->tx_steering_type == TX_PRIORITY_STEERING)
7757                config->tx_fifo_num = MAX_TX_FIFOS;
7758        else
7759                config->tx_fifo_num = tx_fifo_num;
7760
7761        /* Initialize the fifos used for tx steering */
7762        if (config->tx_fifo_num < 5) {
7763                if (config->tx_fifo_num  == 1)
7764                        sp->total_tcp_fifos = 1;
7765                else
7766                        sp->total_tcp_fifos = config->tx_fifo_num - 1;
7767                sp->udp_fifo_idx = config->tx_fifo_num - 1;
7768                sp->total_udp_fifos = 1;
7769                sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7770        } else {
7771                sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7772                                       FIFO_OTHER_MAX_NUM);
7773                sp->udp_fifo_idx = sp->total_tcp_fifos;
7774                sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7775                sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7776        }
7777
7778        config->multiq = dev_multiq;
7779        for (i = 0; i < config->tx_fifo_num; i++) {
7780                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7781
7782                tx_cfg->fifo_len = tx_fifo_len[i];
7783                tx_cfg->fifo_priority = i;
7784        }
7785
7786        /* mapping the QoS priority to the configured fifos */
7787        for (i = 0; i < MAX_TX_FIFOS; i++)
7788                config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7789
7790        /* map the hashing selector table to the configured fifos */
7791        for (i = 0; i < config->tx_fifo_num; i++)
7792                sp->fifo_selector[i] = fifo_selector[i];
7793
7794
7795        config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7796        for (i = 0; i < config->tx_fifo_num; i++) {
7797                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7798
7799                tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7800                if (tx_cfg->fifo_len < 65) {
7801                        config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7802                        break;
7803                }
7804        }
7805        /* + 2 because one Txd for skb->data and one Txd for UFO */
7806        config->max_txds = MAX_SKB_FRAGS + 2;
7807
7808        /* Rx side parameters. */
7809        config->rx_ring_num = rx_ring_num;
7810        for (i = 0; i < config->rx_ring_num; i++) {
7811                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7812                struct ring_info *ring = &mac_control->rings[i];
7813
7814                rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7815                rx_cfg->ring_priority = i;
7816                ring->rx_bufs_left = 0;
7817                ring->rxd_mode = sp->rxd_mode;
7818                ring->rxd_count = rxd_count[sp->rxd_mode];
7819                ring->pdev = sp->pdev;
7820                ring->dev = sp->dev;
7821        }
7822
7823        for (i = 0; i < rx_ring_num; i++) {
7824                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7825
7826                rx_cfg->ring_org = RING_ORG_BUFF1;
7827                rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7828        }
7829
7830        /*  Setting Mac Control parameters */
7831        mac_control->rmac_pause_time = rmac_pause_time;
7832        mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7833        mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7834
7835
7836        /*  initialize the shared memory used by the NIC and the host */
7837        if (init_shared_mem(sp)) {
7838                DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7839                ret = -ENOMEM;
7840                goto mem_alloc_failed;
7841        }
7842
7843        sp->bar0 = pci_ioremap_bar(pdev, 0);
7844        if (!sp->bar0) {
7845                DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7846                          dev->name);
7847                ret = -ENOMEM;
7848                goto bar0_remap_failed;
7849        }
7850
7851        sp->bar1 = pci_ioremap_bar(pdev, 2);
7852        if (!sp->bar1) {
7853                DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7854                          dev->name);
7855                ret = -ENOMEM;
7856                goto bar1_remap_failed;
7857        }
7858
7859        /* Initializing the BAR1 address as the start of the FIFO pointer. */
7860        for (j = 0; j < MAX_TX_FIFOS; j++) {
7861                mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7862        }
7863
7864        /*  Driver entry points */
7865        dev->netdev_ops = &s2io_netdev_ops;
7866        dev->ethtool_ops = &netdev_ethtool_ops;
7867        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7868                NETIF_F_TSO | NETIF_F_TSO6 |
7869                NETIF_F_RXCSUM | NETIF_F_LRO;
7870        dev->features |= dev->hw_features |
7871                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7872        if (sp->high_dma_flag == true)
7873                dev->features |= NETIF_F_HIGHDMA;
7874        dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7875        INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7876        INIT_WORK(&sp->set_link_task, s2io_set_link);
7877
7878        pci_save_state(sp->pdev);
7879
7880        /* Setting swapper control on the NIC, for proper reset operation */
7881        if (s2io_set_swapper(sp)) {
7882                DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7883                          dev->name);
7884                ret = -EAGAIN;
7885                goto set_swap_failed;
7886        }
7887
7888        /* Verify if the Herc works on the slot its placed into */
7889        if (sp->device_type & XFRAME_II_DEVICE) {
7890                mode = s2io_verify_pci_mode(sp);
7891                if (mode < 0) {
7892                        DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7893                                  __func__);
7894                        ret = -EBADSLT;
7895                        goto set_swap_failed;
7896                }
7897        }
7898
7899        if (sp->config.intr_type == MSI_X) {
7900                sp->num_entries = config->rx_ring_num + 1;
7901                ret = s2io_enable_msi_x(sp);
7902
7903                if (!ret) {
7904                        ret = s2io_test_msi(sp);
7905                        /* rollback MSI-X, will re-enable during add_isr() */
7906                        remove_msix_isr(sp);
7907                }
7908                if (ret) {
7909
7910                        DBG_PRINT(ERR_DBG,
7911                                  "MSI-X requested but failed to enable\n");
7912                        sp->config.intr_type = INTA;
7913                }
7914        }
7915
7916        if (config->intr_type ==  MSI_X) {
7917                for (i = 0; i < config->rx_ring_num ; i++) {
7918                        struct ring_info *ring = &mac_control->rings[i];
7919
7920                        netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7921                }
7922        } else {
7923                netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7924        }
7925
7926        /* Not needed for Herc */
7927        if (sp->device_type & XFRAME_I_DEVICE) {
7928                /*
7929                 * Fix for all "FFs" MAC address problems observed on
7930                 * Alpha platforms
7931                 */
7932                fix_mac_address(sp);
7933                s2io_reset(sp);
7934        }
7935
7936        /*
7937         * MAC address initialization.
7938         * For now only one mac address will be read and used.
7939         */
7940        bar0 = sp->bar0;
7941        val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7942                RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7943        writeq(val64, &bar0->rmac_addr_cmd_mem);
7944        wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7945                              RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7946                              S2IO_BIT_RESET);
7947        tmp64 = readq(&bar0->rmac_addr_data0_mem);
7948        mac_down = (u32)tmp64;
7949        mac_up = (u32) (tmp64 >> 32);
7950
7951        sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7952        sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7953        sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7954        sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7955        sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7956        sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7957
7958        /*  Set the factory defined MAC address initially   */
7959        dev->addr_len = ETH_ALEN;
7960        memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7961
7962        /* initialize number of multicast & unicast MAC entries variables */
7963        if (sp->device_type == XFRAME_I_DEVICE) {
7964                config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7965                config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7966                config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7967        } else if (sp->device_type == XFRAME_II_DEVICE) {
7968                config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7969                config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7970                config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7971        }
7972
7973        /* MTU range: 46 - 9600 */
7974        dev->min_mtu = MIN_MTU;
7975        dev->max_mtu = S2IO_JUMBO_SIZE;
7976
7977        /* store mac addresses from CAM to s2io_nic structure */
7978        do_s2io_store_unicast_mc(sp);
7979
7980        /* Configure MSIX vector for number of rings configured plus one */
7981        if ((sp->device_type == XFRAME_II_DEVICE) &&
7982            (config->intr_type == MSI_X))
7983                sp->num_entries = config->rx_ring_num + 1;
7984
7985        /* Store the values of the MSIX table in the s2io_nic structure */
7986        store_xmsi_data(sp);
7987        /* reset Nic and bring it to known state */
7988        s2io_reset(sp);
7989
7990        /*
7991         * Initialize link state flags
7992         * and the card state parameter
7993         */
7994        sp->state = 0;
7995
7996        /* Initialize spinlocks */
7997        for (i = 0; i < sp->config.tx_fifo_num; i++) {
7998                struct fifo_info *fifo = &mac_control->fifos[i];
7999
8000                spin_lock_init(&fifo->tx_lock);
8001        }
8002
8003        /*
8004         * SXE-002: Configure link and activity LED to init state
8005         * on driver load.
8006         */
8007        subid = sp->pdev->subsystem_device;
8008        if ((subid & 0xFF) >= 0x07) {
8009                val64 = readq(&bar0->gpio_control);
8010                val64 |= 0x0000800000000000ULL;
8011                writeq(val64, &bar0->gpio_control);
8012                val64 = 0x0411040400000000ULL;
8013                writeq(val64, (void __iomem *)bar0 + 0x2700);
8014                val64 = readq(&bar0->gpio_control);
8015        }
8016
8017        sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8018
8019        if (register_netdev(dev)) {
8020                DBG_PRINT(ERR_DBG, "Device registration failed\n");
8021                ret = -ENODEV;
8022                goto register_failed;
8023        }
8024        s2io_vpd_read(sp);
8025        DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8026        DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8027                  sp->product_name, pdev->revision);
8028        DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8029                  s2io_driver_version);
8030        DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8031        DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8032        if (sp->device_type & XFRAME_II_DEVICE) {
8033                mode = s2io_print_pci_mode(sp);
8034                if (mode < 0) {
8035                        ret = -EBADSLT;
8036                        unregister_netdev(dev);
8037                        goto set_swap_failed;
8038                }
8039        }
8040        switch (sp->rxd_mode) {
8041        case RXD_MODE_1:
8042                DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8043                          dev->name);
8044                break;
8045        case RXD_MODE_3B:
8046                DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8047                          dev->name);
8048                break;
8049        }
8050
8051        switch (sp->config.napi) {
8052        case 0:
8053                DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8054                break;
8055        case 1:
8056                DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8057                break;
8058        }
8059
8060        DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8061                  sp->config.tx_fifo_num);
8062
8063        DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8064                  sp->config.rx_ring_num);
8065
8066        switch (sp->config.intr_type) {
8067        case INTA:
8068                DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8069                break;
8070        case MSI_X:
8071                DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8072                break;
8073        }
8074        if (sp->config.multiq) {
8075                for (i = 0; i < sp->config.tx_fifo_num; i++) {
8076                        struct fifo_info *fifo = &mac_control->fifos[i];
8077
8078                        fifo->multiq = config->multiq;
8079                }
8080                DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8081                          dev->name);
8082        } else
8083                DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8084                          dev->name);
8085
8086        switch (sp->config.tx_steering_type) {
8087        case NO_STEERING:
8088                DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8089                          dev->name);
8090                break;
8091        case TX_PRIORITY_STEERING:
8092                DBG_PRINT(ERR_DBG,
8093                          "%s: Priority steering enabled for transmit\n",
8094                          dev->name);
8095                break;
8096        case TX_DEFAULT_STEERING:
8097                DBG_PRINT(ERR_DBG,
8098                          "%s: Default steering enabled for transmit\n",
8099                          dev->name);
8100        }
8101
8102        DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8103                  dev->name);
8104        /* Initialize device name */
8105        snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8106                 sp->product_name);
8107
8108        if (vlan_tag_strip)
8109                sp->vlan_strip_flag = 1;
8110        else
8111                sp->vlan_strip_flag = 0;
8112
8113        /*
8114         * Make Link state as off at this point, when the Link change
8115         * interrupt comes the state will be automatically changed to
8116         * the right state.
8117         */
8118        netif_carrier_off(dev);
8119
8120        return 0;
8121
8122register_failed:
8123set_swap_failed:
8124        iounmap(sp->bar1);
8125bar1_remap_failed:
8126        iounmap(sp->bar0);
8127bar0_remap_failed:
8128mem_alloc_failed:
8129        free_shared_mem(sp);
8130        pci_disable_device(pdev);
8131        pci_release_regions(pdev);
8132        free_netdev(dev);
8133
8134        return ret;
8135}
8136
8137/**
8138 * s2io_rem_nic - Free the PCI device
8139 * @pdev: structure containing the PCI related information of the device.
8140 * Description: This function is called by the Pci subsystem to release a
8141 * PCI device and free up all resource held up by the device. This could
8142 * be in response to a Hot plug event or when the driver is to be removed
8143 * from memory.
8144 */
8145
8146static void s2io_rem_nic(struct pci_dev *pdev)
8147{
8148        struct net_device *dev = pci_get_drvdata(pdev);
8149        struct s2io_nic *sp;
8150
8151        if (dev == NULL) {
8152                DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8153                return;
8154        }
8155
8156        sp = netdev_priv(dev);
8157
8158        cancel_work_sync(&sp->rst_timer_task);
8159        cancel_work_sync(&sp->set_link_task);
8160
8161        unregister_netdev(dev);
8162
8163        free_shared_mem(sp);
8164        iounmap(sp->bar0);
8165        iounmap(sp->bar1);
8166        pci_release_regions(pdev);
8167        free_netdev(dev);
8168        pci_disable_device(pdev);
8169}
8170
8171module_pci_driver(s2io_driver);
8172
8173static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8174                                struct tcphdr **tcp, struct RxD_t *rxdp,
8175                                struct s2io_nic *sp)
8176{
8177        int ip_off;
8178        u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8179
8180        if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8181                DBG_PRINT(INIT_DBG,
8182                          "%s: Non-TCP frames not supported for LRO\n",
8183                          __func__);
8184                return -1;
8185        }
8186
8187        /* Checking for DIX type or DIX type with VLAN */
8188        if ((l2_type == 0) || (l2_type == 4)) {
8189                ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8190                /*
8191                 * If vlan stripping is disabled and the frame is VLAN tagged,
8192                 * shift the offset by the VLAN header size bytes.
8193                 */
8194                if ((!sp->vlan_strip_flag) &&
8195                    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8196                        ip_off += HEADER_VLAN_SIZE;
8197        } else {
8198                /* LLC, SNAP etc are considered non-mergeable */
8199                return -1;
8200        }
8201
8202        *ip = (struct iphdr *)(buffer + ip_off);
8203        ip_len = (u8)((*ip)->ihl);
8204        ip_len <<= 2;
8205        *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8206
8207        return 0;
8208}
8209
8210static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8211                                  struct tcphdr *tcp)
8212{
8213        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8214        if ((lro->iph->saddr != ip->saddr) ||
8215            (lro->iph->daddr != ip->daddr) ||
8216            (lro->tcph->source != tcp->source) ||
8217            (lro->tcph->dest != tcp->dest))
8218                return -1;
8219        return 0;
8220}
8221
8222static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8223{
8224        return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8225}
8226
8227static void initiate_new_session(struct lro *lro, u8 *l2h,
8228                                 struct iphdr *ip, struct tcphdr *tcp,
8229                                 u32 tcp_pyld_len, u16 vlan_tag)
8230{
8231        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8232        lro->l2h = l2h;
8233        lro->iph = ip;
8234        lro->tcph = tcp;
8235        lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8236        lro->tcp_ack = tcp->ack_seq;
8237        lro->sg_num = 1;
8238        lro->total_len = ntohs(ip->tot_len);
8239        lro->frags_len = 0;
8240        lro->vlan_tag = vlan_tag;
8241        /*
8242         * Check if we saw TCP timestamp.
8243         * Other consistency checks have already been done.
8244         */
8245        if (tcp->doff == 8) {
8246                __be32 *ptr;
8247                ptr = (__be32 *)(tcp+1);
8248                lro->saw_ts = 1;
8249                lro->cur_tsval = ntohl(*(ptr+1));
8250                lro->cur_tsecr = *(ptr+2);
8251        }
8252        lro->in_use = 1;
8253}
8254
8255static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8256{
8257        struct iphdr *ip = lro->iph;
8258        struct tcphdr *tcp = lro->tcph;
8259        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8260
8261        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8262
8263        /* Update L3 header */
8264        csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8265        ip->tot_len = htons(lro->total_len);
8266
8267        /* Update L4 header */
8268        tcp->ack_seq = lro->tcp_ack;
8269        tcp->window = lro->window;
8270
8271        /* Update tsecr field if this session has timestamps enabled */
8272        if (lro->saw_ts) {
8273                __be32 *ptr = (__be32 *)(tcp + 1);
8274                *(ptr+2) = lro->cur_tsecr;
8275        }
8276
8277        /* Update counters required for calculation of
8278         * average no. of packets aggregated.
8279         */
8280        swstats->sum_avg_pkts_aggregated += lro->sg_num;
8281        swstats->num_aggregations++;
8282}
8283
8284static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8285                             struct tcphdr *tcp, u32 l4_pyld)
8286{
8287        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8288        lro->total_len += l4_pyld;
8289        lro->frags_len += l4_pyld;
8290        lro->tcp_next_seq += l4_pyld;
8291        lro->sg_num++;
8292
8293        /* Update ack seq no. and window ad(from this pkt) in LRO object */
8294        lro->tcp_ack = tcp->ack_seq;
8295        lro->window = tcp->window;
8296
8297        if (lro->saw_ts) {
8298                __be32 *ptr;
8299                /* Update tsecr and tsval from this packet */
8300                ptr = (__be32 *)(tcp+1);
8301                lro->cur_tsval = ntohl(*(ptr+1));
8302                lro->cur_tsecr = *(ptr + 2);
8303        }
8304}
8305
8306static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8307                                    struct tcphdr *tcp, u32 tcp_pyld_len)
8308{
8309        u8 *ptr;
8310
8311        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8312
8313        if (!tcp_pyld_len) {
8314                /* Runt frame or a pure ack */
8315                return -1;
8316        }
8317
8318        if (ip->ihl != 5) /* IP has options */
8319                return -1;
8320
8321        /* If we see CE codepoint in IP header, packet is not mergeable */
8322        if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8323                return -1;
8324
8325        /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8326        if (tcp->urg || tcp->psh || tcp->rst ||
8327            tcp->syn || tcp->fin ||
8328            tcp->ece || tcp->cwr || !tcp->ack) {
8329                /*
8330                 * Currently recognize only the ack control word and
8331                 * any other control field being set would result in
8332                 * flushing the LRO session
8333                 */
8334                return -1;
8335        }
8336
8337        /*
8338         * Allow only one TCP timestamp option. Don't aggregate if
8339         * any other options are detected.
8340         */
8341        if (tcp->doff != 5 && tcp->doff != 8)
8342                return -1;
8343
8344        if (tcp->doff == 8) {
8345                ptr = (u8 *)(tcp + 1);
8346                while (*ptr == TCPOPT_NOP)
8347                        ptr++;
8348                if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8349                        return -1;
8350
8351                /* Ensure timestamp value increases monotonically */
8352                if (l_lro)
8353                        if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8354                                return -1;
8355
8356                /* timestamp echo reply should be non-zero */
8357                if (*((__be32 *)(ptr+6)) == 0)
8358                        return -1;
8359        }
8360
8361        return 0;
8362}
8363
8364static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8365                                 u8 **tcp, u32 *tcp_len, struct lro **lro,
8366                                 struct RxD_t *rxdp, struct s2io_nic *sp)
8367{
8368        struct iphdr *ip;
8369        struct tcphdr *tcph;
8370        int ret = 0, i;
8371        u16 vlan_tag = 0;
8372        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8373
8374        ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8375                                   rxdp, sp);
8376        if (ret)
8377                return ret;
8378
8379        DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8380
8381        vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8382        tcph = (struct tcphdr *)*tcp;
8383        *tcp_len = get_l4_pyld_length(ip, tcph);
8384        for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8385                struct lro *l_lro = &ring_data->lro0_n[i];
8386                if (l_lro->in_use) {
8387                        if (check_for_socket_match(l_lro, ip, tcph))
8388                                continue;
8389                        /* Sock pair matched */
8390                        *lro = l_lro;
8391
8392                        if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8393                                DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8394                                          "expected 0x%x, actual 0x%x\n",
8395                                          __func__,
8396                                          (*lro)->tcp_next_seq,
8397                                          ntohl(tcph->seq));
8398
8399                                swstats->outof_sequence_pkts++;
8400                                ret = 2;
8401                                break;
8402                        }
8403
8404                        if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8405                                                      *tcp_len))
8406                                ret = 1; /* Aggregate */
8407                        else
8408                                ret = 2; /* Flush both */
8409                        break;
8410                }
8411        }
8412
8413        if (ret == 0) {
8414                /* Before searching for available LRO objects,
8415                 * check if the pkt is L3/L4 aggregatable. If not
8416                 * don't create new LRO session. Just send this
8417                 * packet up.
8418                 */
8419                if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8420                        return 5;
8421
8422                for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8423                        struct lro *l_lro = &ring_data->lro0_n[i];
8424                        if (!(l_lro->in_use)) {
8425                                *lro = l_lro;
8426                                ret = 3; /* Begin anew */
8427                                break;
8428                        }
8429                }
8430        }
8431
8432        if (ret == 0) { /* sessions exceeded */
8433                DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8434                          __func__);
8435                *lro = NULL;
8436                return ret;
8437        }
8438
8439        switch (ret) {
8440        case 3:
8441                initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8442                                     vlan_tag);
8443                break;
8444        case 2:
8445                update_L3L4_header(sp, *lro);
8446                break;
8447        case 1:
8448                aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8449                if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8450                        update_L3L4_header(sp, *lro);
8451                        ret = 4; /* Flush the LRO */
8452                }
8453                break;
8454        default:
8455                DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8456                break;
8457        }
8458
8459        return ret;
8460}
8461
8462static void clear_lro_session(struct lro *lro)
8463{
8464        static u16 lro_struct_size = sizeof(struct lro);
8465
8466        memset(lro, 0, lro_struct_size);
8467}
8468
8469static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8470{
8471        struct net_device *dev = skb->dev;
8472        struct s2io_nic *sp = netdev_priv(dev);
8473
8474        skb->protocol = eth_type_trans(skb, dev);
8475        if (vlan_tag && sp->vlan_strip_flag)
8476                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8477        if (sp->config.napi)
8478                netif_receive_skb(skb);
8479        else
8480                netif_rx(skb);
8481}
8482
8483static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8484                           struct sk_buff *skb, u32 tcp_len)
8485{
8486        struct sk_buff *first = lro->parent;
8487        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8488
8489        first->len += tcp_len;
8490        first->data_len = lro->frags_len;
8491        skb_pull(skb, (skb->len - tcp_len));
8492        if (skb_shinfo(first)->frag_list)
8493                lro->last_frag->next = skb;
8494        else
8495                skb_shinfo(first)->frag_list = skb;
8496        first->truesize += skb->truesize;
8497        lro->last_frag = skb;
8498        swstats->clubbed_frms_cnt++;
8499}
8500
8501/**
8502 * s2io_io_error_detected - called when PCI error is detected
8503 * @pdev: Pointer to PCI device
8504 * @state: The current pci connection state
8505 *
8506 * This function is called after a PCI bus error affecting
8507 * this device has been detected.
8508 */
8509static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8510                                               pci_channel_state_t state)
8511{
8512        struct net_device *netdev = pci_get_drvdata(pdev);
8513        struct s2io_nic *sp = netdev_priv(netdev);
8514
8515        netif_device_detach(netdev);
8516
8517        if (state == pci_channel_io_perm_failure)
8518                return PCI_ERS_RESULT_DISCONNECT;
8519
8520        if (netif_running(netdev)) {
8521                /* Bring down the card, while avoiding PCI I/O */
8522                do_s2io_card_down(sp, 0);
8523        }
8524        pci_disable_device(pdev);
8525
8526        return PCI_ERS_RESULT_NEED_RESET;
8527}
8528
8529/**
8530 * s2io_io_slot_reset - called after the pci bus has been reset.
8531 * @pdev: Pointer to PCI device
8532 *
8533 * Restart the card from scratch, as if from a cold-boot.
8534 * At this point, the card has exprienced a hard reset,
8535 * followed by fixups by BIOS, and has its config space
8536 * set up identically to what it was at cold boot.
8537 */
8538static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8539{
8540        struct net_device *netdev = pci_get_drvdata(pdev);
8541        struct s2io_nic *sp = netdev_priv(netdev);
8542
8543        if (pci_enable_device(pdev)) {
8544                pr_err("Cannot re-enable PCI device after reset.\n");
8545                return PCI_ERS_RESULT_DISCONNECT;
8546        }
8547
8548        pci_set_master(pdev);
8549        s2io_reset(sp);
8550
8551        return PCI_ERS_RESULT_RECOVERED;
8552}
8553
8554/**
8555 * s2io_io_resume - called when traffic can start flowing again.
8556 * @pdev: Pointer to PCI device
8557 *
8558 * This callback is called when the error recovery driver tells
8559 * us that its OK to resume normal operation.
8560 */
8561static void s2io_io_resume(struct pci_dev *pdev)
8562{
8563        struct net_device *netdev = pci_get_drvdata(pdev);
8564        struct s2io_nic *sp = netdev_priv(netdev);
8565
8566        if (netif_running(netdev)) {
8567                if (s2io_card_up(sp)) {
8568                        pr_err("Can't bring device back up after reset.\n");
8569                        return;
8570                }
8571
8572                if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8573                        s2io_card_down(sp);
8574                        pr_err("Can't restore mac addr after reset.\n");
8575                        return;
8576                }
8577        }
8578
8579        netif_device_attach(netdev);
8580        netif_tx_wake_all_queues(netdev);
8581}
8582