linux/drivers/net/ethernet/neterion/s2io.c
<<
>>
Prefs
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik          : For pointing out the improper error condition
  15 *                        check in the s2io_xmit routine and also some
  16 *                        issues in the Tx watch dog function. Also for
  17 *                        patiently answering all those innumerable
  18 *                        questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
  20 *                        macros available only in 2.6 Kernel.
  21 * Francois Romieu      : For pointing out all code part that were
  22 *                        deprecated and also styling related comments.
  23 * Grant Grundler       : For helping me get rid of some Architecture
  24 *                        dependent code.
  25 * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *              values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
  45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  46 *                 Possible values '1' for enable , '0' for disable.
  47 *                 Default is '2' - which means disable in promisc mode
  48 *                 and enable in non-promiscuous mode.
  49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  50 *      Possible values '1' for enable and '0' for disable. Default is '0'
  51 ************************************************************************/
  52
  53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54
  55#include <linux/module.h>
  56#include <linux/types.h>
  57#include <linux/errno.h>
  58#include <linux/ioport.h>
  59#include <linux/pci.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/kernel.h>
  62#include <linux/netdevice.h>
  63#include <linux/etherdevice.h>
  64#include <linux/mdio.h>
  65#include <linux/skbuff.h>
  66#include <linux/init.h>
  67#include <linux/delay.h>
  68#include <linux/stddef.h>
  69#include <linux/ioctl.h>
  70#include <linux/timex.h>
  71#include <linux/ethtool.h>
  72#include <linux/workqueue.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/uaccess.h>
  77#include <linux/io.h>
  78#include <linux/io-64-nonatomic-lo-hi.h>
  79#include <linux/slab.h>
  80#include <linux/prefetch.h>
  81#include <net/tcp.h>
  82#include <net/checksum.h>
  83
  84#include <asm/div64.h>
  85#include <asm/irq.h>
  86
  87/* local include */
  88#include "s2io.h"
  89#include "s2io-regs.h"
  90
  91#define DRV_VERSION "2.0.26.28"
  92
  93/* S2io Driver name & version. */
  94static const char s2io_driver_name[] = "Neterion";
  95static const char s2io_driver_version[] = DRV_VERSION;
  96
  97static const int rxd_size[2] = {32, 48};
  98static const int rxd_count[2] = {127, 85};
  99
 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 101{
 102        int ret;
 103
 104        ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 105               (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 106
 107        return ret;
 108}
 109
 110/*
 111 * Cards with following subsystem_id have a link state indication
 112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 113 * macro below identifies these cards given the subsystem_id.
 114 */
 115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)              \
 116        (dev_type == XFRAME_I_DEVICE) ?                                 \
 117        ((((subid >= 0x600B) && (subid <= 0x600D)) ||                   \
 118          ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 119
 120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 121                                      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 122
 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
 124{
 125        return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 126}
 127
 128/* Ethtool related variables and Macros. */
 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 130        "Register test\t(offline)",
 131        "Eeprom test\t(offline)",
 132        "Link test\t(online)",
 133        "RLDRAM test\t(offline)",
 134        "BIST Test\t(offline)"
 135};
 136
 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 138        {"tmac_frms"},
 139        {"tmac_data_octets"},
 140        {"tmac_drop_frms"},
 141        {"tmac_mcst_frms"},
 142        {"tmac_bcst_frms"},
 143        {"tmac_pause_ctrl_frms"},
 144        {"tmac_ttl_octets"},
 145        {"tmac_ucst_frms"},
 146        {"tmac_nucst_frms"},
 147        {"tmac_any_err_frms"},
 148        {"tmac_ttl_less_fb_octets"},
 149        {"tmac_vld_ip_octets"},
 150        {"tmac_vld_ip"},
 151        {"tmac_drop_ip"},
 152        {"tmac_icmp"},
 153        {"tmac_rst_tcp"},
 154        {"tmac_tcp"},
 155        {"tmac_udp"},
 156        {"rmac_vld_frms"},
 157        {"rmac_data_octets"},
 158        {"rmac_fcs_err_frms"},
 159        {"rmac_drop_frms"},
 160        {"rmac_vld_mcst_frms"},
 161        {"rmac_vld_bcst_frms"},
 162        {"rmac_in_rng_len_err_frms"},
 163        {"rmac_out_rng_len_err_frms"},
 164        {"rmac_long_frms"},
 165        {"rmac_pause_ctrl_frms"},
 166        {"rmac_unsup_ctrl_frms"},
 167        {"rmac_ttl_octets"},
 168        {"rmac_accepted_ucst_frms"},
 169        {"rmac_accepted_nucst_frms"},
 170        {"rmac_discarded_frms"},
 171        {"rmac_drop_events"},
 172        {"rmac_ttl_less_fb_octets"},
 173        {"rmac_ttl_frms"},
 174        {"rmac_usized_frms"},
 175        {"rmac_osized_frms"},
 176        {"rmac_frag_frms"},
 177        {"rmac_jabber_frms"},
 178        {"rmac_ttl_64_frms"},
 179        {"rmac_ttl_65_127_frms"},
 180        {"rmac_ttl_128_255_frms"},
 181        {"rmac_ttl_256_511_frms"},
 182        {"rmac_ttl_512_1023_frms"},
 183        {"rmac_ttl_1024_1518_frms"},
 184        {"rmac_ip"},
 185        {"rmac_ip_octets"},
 186        {"rmac_hdr_err_ip"},
 187        {"rmac_drop_ip"},
 188        {"rmac_icmp"},
 189        {"rmac_tcp"},
 190        {"rmac_udp"},
 191        {"rmac_err_drp_udp"},
 192        {"rmac_xgmii_err_sym"},
 193        {"rmac_frms_q0"},
 194        {"rmac_frms_q1"},
 195        {"rmac_frms_q2"},
 196        {"rmac_frms_q3"},
 197        {"rmac_frms_q4"},
 198        {"rmac_frms_q5"},
 199        {"rmac_frms_q6"},
 200        {"rmac_frms_q7"},
 201        {"rmac_full_q0"},
 202        {"rmac_full_q1"},
 203        {"rmac_full_q2"},
 204        {"rmac_full_q3"},
 205        {"rmac_full_q4"},
 206        {"rmac_full_q5"},
 207        {"rmac_full_q6"},
 208        {"rmac_full_q7"},
 209        {"rmac_pause_cnt"},
 210        {"rmac_xgmii_data_err_cnt"},
 211        {"rmac_xgmii_ctrl_err_cnt"},
 212        {"rmac_accepted_ip"},
 213        {"rmac_err_tcp"},
 214        {"rd_req_cnt"},
 215        {"new_rd_req_cnt"},
 216        {"new_rd_req_rtry_cnt"},
 217        {"rd_rtry_cnt"},
 218        {"wr_rtry_rd_ack_cnt"},
 219        {"wr_req_cnt"},
 220        {"new_wr_req_cnt"},
 221        {"new_wr_req_rtry_cnt"},
 222        {"wr_rtry_cnt"},
 223        {"wr_disc_cnt"},
 224        {"rd_rtry_wr_ack_cnt"},
 225        {"txp_wr_cnt"},
 226        {"txd_rd_cnt"},
 227        {"txd_wr_cnt"},
 228        {"rxd_rd_cnt"},
 229        {"rxd_wr_cnt"},
 230        {"txf_rd_cnt"},
 231        {"rxf_wr_cnt"}
 232};
 233
 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 235        {"rmac_ttl_1519_4095_frms"},
 236        {"rmac_ttl_4096_8191_frms"},
 237        {"rmac_ttl_8192_max_frms"},
 238        {"rmac_ttl_gt_max_frms"},
 239        {"rmac_osized_alt_frms"},
 240        {"rmac_jabber_alt_frms"},
 241        {"rmac_gt_max_alt_frms"},
 242        {"rmac_vlan_frms"},
 243        {"rmac_len_discard"},
 244        {"rmac_fcs_discard"},
 245        {"rmac_pf_discard"},
 246        {"rmac_da_discard"},
 247        {"rmac_red_discard"},
 248        {"rmac_rts_discard"},
 249        {"rmac_ingm_full_discard"},
 250        {"link_fault_cnt"}
 251};
 252
 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 254        {"\n DRIVER STATISTICS"},
 255        {"single_bit_ecc_errs"},
 256        {"double_bit_ecc_errs"},
 257        {"parity_err_cnt"},
 258        {"serious_err_cnt"},
 259        {"soft_reset_cnt"},
 260        {"fifo_full_cnt"},
 261        {"ring_0_full_cnt"},
 262        {"ring_1_full_cnt"},
 263        {"ring_2_full_cnt"},
 264        {"ring_3_full_cnt"},
 265        {"ring_4_full_cnt"},
 266        {"ring_5_full_cnt"},
 267        {"ring_6_full_cnt"},
 268        {"ring_7_full_cnt"},
 269        {"alarm_transceiver_temp_high"},
 270        {"alarm_transceiver_temp_low"},
 271        {"alarm_laser_bias_current_high"},
 272        {"alarm_laser_bias_current_low"},
 273        {"alarm_laser_output_power_high"},
 274        {"alarm_laser_output_power_low"},
 275        {"warn_transceiver_temp_high"},
 276        {"warn_transceiver_temp_low"},
 277        {"warn_laser_bias_current_high"},
 278        {"warn_laser_bias_current_low"},
 279        {"warn_laser_output_power_high"},
 280        {"warn_laser_output_power_low"},
 281        {"lro_aggregated_pkts"},
 282        {"lro_flush_both_count"},
 283        {"lro_out_of_sequence_pkts"},
 284        {"lro_flush_due_to_max_pkts"},
 285        {"lro_avg_aggr_pkts"},
 286        {"mem_alloc_fail_cnt"},
 287        {"pci_map_fail_cnt"},
 288        {"watchdog_timer_cnt"},
 289        {"mem_allocated"},
 290        {"mem_freed"},
 291        {"link_up_cnt"},
 292        {"link_down_cnt"},
 293        {"link_up_time"},
 294        {"link_down_time"},
 295        {"tx_tcode_buf_abort_cnt"},
 296        {"tx_tcode_desc_abort_cnt"},
 297        {"tx_tcode_parity_err_cnt"},
 298        {"tx_tcode_link_loss_cnt"},
 299        {"tx_tcode_list_proc_err_cnt"},
 300        {"rx_tcode_parity_err_cnt"},
 301        {"rx_tcode_abort_cnt"},
 302        {"rx_tcode_parity_abort_cnt"},
 303        {"rx_tcode_rda_fail_cnt"},
 304        {"rx_tcode_unkn_prot_cnt"},
 305        {"rx_tcode_fcs_err_cnt"},
 306        {"rx_tcode_buf_size_err_cnt"},
 307        {"rx_tcode_rxd_corrupt_cnt"},
 308        {"rx_tcode_unkn_err_cnt"},
 309        {"tda_err_cnt"},
 310        {"pfc_err_cnt"},
 311        {"pcc_err_cnt"},
 312        {"tti_err_cnt"},
 313        {"tpa_err_cnt"},
 314        {"sm_err_cnt"},
 315        {"lso_err_cnt"},
 316        {"mac_tmac_err_cnt"},
 317        {"mac_rmac_err_cnt"},
 318        {"xgxs_txgxs_err_cnt"},
 319        {"xgxs_rxgxs_err_cnt"},
 320        {"rc_err_cnt"},
 321        {"prc_pcix_err_cnt"},
 322        {"rpa_err_cnt"},
 323        {"rda_err_cnt"},
 324        {"rti_err_cnt"},
 325        {"mc_err_cnt"}
 326};
 327
 328#define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
 329#define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
 330#define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
 331
 332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 334
 335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 337
 338#define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
 339#define S2IO_STRINGS_LEN        (S2IO_TEST_LEN * ETH_GSTRING_LEN)
 340
 341/* copy mac addr to def_mac_addr array */
 342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 343{
 344        sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 345        sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 346        sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 347        sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 348        sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 349        sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 350}
 351
 352/*
 353 * Constants to be programmed into the Xena's registers, to configure
 354 * the XAUI.
 355 */
 356
 357#define END_SIGN        0x0
 358static const u64 herc_act_dtx_cfg[] = {
 359        /* Set address */
 360        0x8000051536750000ULL, 0x80000515367500E0ULL,
 361        /* Write data */
 362        0x8000051536750004ULL, 0x80000515367500E4ULL,
 363        /* Set address */
 364        0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 365        /* Write data */
 366        0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 367        /* Set address */
 368        0x801205150D440000ULL, 0x801205150D4400E0ULL,
 369        /* Write data */
 370        0x801205150D440004ULL, 0x801205150D4400E4ULL,
 371        /* Set address */
 372        0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 373        /* Write data */
 374        0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 375        /* Done */
 376        END_SIGN
 377};
 378
 379static const u64 xena_dtx_cfg[] = {
 380        /* Set address */
 381        0x8000051500000000ULL, 0x80000515000000E0ULL,
 382        /* Write data */
 383        0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 384        /* Set address */
 385        0x8001051500000000ULL, 0x80010515000000E0ULL,
 386        /* Write data */
 387        0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 388        /* Set address */
 389        0x8002051500000000ULL, 0x80020515000000E0ULL,
 390        /* Write data */
 391        0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 392        END_SIGN
 393};
 394
 395/*
 396 * Constants for Fixing the MacAddress problem seen mostly on
 397 * Alpha machines.
 398 */
 399static const u64 fix_mac[] = {
 400        0x0060000000000000ULL, 0x0060600000000000ULL,
 401        0x0040600000000000ULL, 0x0000600000000000ULL,
 402        0x0020600000000000ULL, 0x0060600000000000ULL,
 403        0x0020600000000000ULL, 0x0060600000000000ULL,
 404        0x0020600000000000ULL, 0x0060600000000000ULL,
 405        0x0020600000000000ULL, 0x0060600000000000ULL,
 406        0x0020600000000000ULL, 0x0060600000000000ULL,
 407        0x0020600000000000ULL, 0x0060600000000000ULL,
 408        0x0020600000000000ULL, 0x0060600000000000ULL,
 409        0x0020600000000000ULL, 0x0060600000000000ULL,
 410        0x0020600000000000ULL, 0x0060600000000000ULL,
 411        0x0020600000000000ULL, 0x0060600000000000ULL,
 412        0x0020600000000000ULL, 0x0000600000000000ULL,
 413        0x0040600000000000ULL, 0x0060600000000000ULL,
 414        END_SIGN
 415};
 416
 417MODULE_LICENSE("GPL");
 418MODULE_VERSION(DRV_VERSION);
 419
 420
 421/* Module Loadable parameters. */
 422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 423S2IO_PARM_INT(rx_ring_num, 1);
 424S2IO_PARM_INT(multiq, 0);
 425S2IO_PARM_INT(rx_ring_mode, 1);
 426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 427S2IO_PARM_INT(rmac_pause_time, 0x100);
 428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 430S2IO_PARM_INT(shared_splits, 0);
 431S2IO_PARM_INT(tmac_util_period, 5);
 432S2IO_PARM_INT(rmac_util_period, 5);
 433S2IO_PARM_INT(l3l4hdr_size, 128);
 434/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 436/* Frequency of Rx desc syncs expressed as power of 2 */
 437S2IO_PARM_INT(rxsync_frequency, 3);
 438/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 439S2IO_PARM_INT(intr_type, 2);
 440/* Large receive offload feature */
 441
 442/* Max pkts to be aggregated by LRO at one time. If not specified,
 443 * aggregation happens until we hit max IP pkt size(64K)
 444 */
 445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 446S2IO_PARM_INT(indicate_max_pkts, 0);
 447
 448S2IO_PARM_INT(napi, 1);
 449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 450
 451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 455static unsigned int rts_frm_len[MAX_RX_RINGS] =
 456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 457
 458module_param_array(tx_fifo_len, uint, NULL, 0);
 459module_param_array(rx_ring_sz, uint, NULL, 0);
 460module_param_array(rts_frm_len, uint, NULL, 0);
 461
 462/*
 463 * S2IO device table.
 464 * This table lists all the devices that this driver supports.
 465 */
 466static const struct pci_device_id s2io_tbl[] = {
 467        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 468         PCI_ANY_ID, PCI_ANY_ID},
 469        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 470         PCI_ANY_ID, PCI_ANY_ID},
 471        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 472         PCI_ANY_ID, PCI_ANY_ID},
 473        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 474         PCI_ANY_ID, PCI_ANY_ID},
 475        {0,}
 476};
 477
 478MODULE_DEVICE_TABLE(pci, s2io_tbl);
 479
 480static const struct pci_error_handlers s2io_err_handler = {
 481        .error_detected = s2io_io_error_detected,
 482        .slot_reset = s2io_io_slot_reset,
 483        .resume = s2io_io_resume,
 484};
 485
 486static struct pci_driver s2io_driver = {
 487        .name = "S2IO",
 488        .id_table = s2io_tbl,
 489        .probe = s2io_init_nic,
 490        .remove = s2io_rem_nic,
 491        .err_handler = &s2io_err_handler,
 492};
 493
 494/* A simplifier macro used both by init and free shared_mem Fns(). */
 495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
 496
 497/* netqueue manipulation helper functions */
 498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 499{
 500        if (!sp->config.multiq) {
 501                int i;
 502
 503                for (i = 0; i < sp->config.tx_fifo_num; i++)
 504                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 505        }
 506        netif_tx_stop_all_queues(sp->dev);
 507}
 508
 509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 510{
 511        if (!sp->config.multiq)
 512                sp->mac_control.fifos[fifo_no].queue_state =
 513                        FIFO_QUEUE_STOP;
 514
 515        netif_tx_stop_all_queues(sp->dev);
 516}
 517
 518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 519{
 520        if (!sp->config.multiq) {
 521                int i;
 522
 523                for (i = 0; i < sp->config.tx_fifo_num; i++)
 524                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 525        }
 526        netif_tx_start_all_queues(sp->dev);
 527}
 528
 529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 530{
 531        if (!sp->config.multiq) {
 532                int i;
 533
 534                for (i = 0; i < sp->config.tx_fifo_num; i++)
 535                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 536        }
 537        netif_tx_wake_all_queues(sp->dev);
 538}
 539
 540static inline void s2io_wake_tx_queue(
 541        struct fifo_info *fifo, int cnt, u8 multiq)
 542{
 543
 544        if (multiq) {
 545                if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 546                        netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 547        } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 548                if (netif_queue_stopped(fifo->dev)) {
 549                        fifo->queue_state = FIFO_QUEUE_START;
 550                        netif_wake_queue(fifo->dev);
 551                }
 552        }
 553}
 554
 555/**
 556 * init_shared_mem - Allocation and Initialization of Memory
 557 * @nic: Device private variable.
 558 * Description: The function allocates all the memory areas shared
 559 * between the NIC and the driver. This includes Tx descriptors,
 560 * Rx descriptors and the statistics block.
 561 */
 562
 563static int init_shared_mem(struct s2io_nic *nic)
 564{
 565        u32 size;
 566        void *tmp_v_addr, *tmp_v_addr_next;
 567        dma_addr_t tmp_p_addr, tmp_p_addr_next;
 568        struct RxD_block *pre_rxd_blk = NULL;
 569        int i, j, blk_cnt;
 570        int lst_size, lst_per_page;
 571        struct net_device *dev = nic->dev;
 572        unsigned long tmp;
 573        struct buffAdd *ba;
 574        struct config_param *config = &nic->config;
 575        struct mac_info *mac_control = &nic->mac_control;
 576        unsigned long long mem_allocated = 0;
 577
 578        /* Allocation and initialization of TXDLs in FIFOs */
 579        size = 0;
 580        for (i = 0; i < config->tx_fifo_num; i++) {
 581                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 582
 583                size += tx_cfg->fifo_len;
 584        }
 585        if (size > MAX_AVAILABLE_TXDS) {
 586                DBG_PRINT(ERR_DBG,
 587                          "Too many TxDs requested: %d, max supported: %d\n",
 588                          size, MAX_AVAILABLE_TXDS);
 589                return -EINVAL;
 590        }
 591
 592        size = 0;
 593        for (i = 0; i < config->tx_fifo_num; i++) {
 594                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 595
 596                size = tx_cfg->fifo_len;
 597                /*
 598                 * Legal values are from 2 to 8192
 599                 */
 600                if (size < 2) {
 601                        DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 602                                  "Valid lengths are 2 through 8192\n",
 603                                  i, size);
 604                        return -EINVAL;
 605                }
 606        }
 607
 608        lst_size = (sizeof(struct TxD) * config->max_txds);
 609        lst_per_page = PAGE_SIZE / lst_size;
 610
 611        for (i = 0; i < config->tx_fifo_num; i++) {
 612                struct fifo_info *fifo = &mac_control->fifos[i];
 613                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 614                int fifo_len = tx_cfg->fifo_len;
 615                int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 616
 617                fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 618                if (!fifo->list_info) {
 619                        DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 620                        return -ENOMEM;
 621                }
 622                mem_allocated += list_holder_size;
 623        }
 624        for (i = 0; i < config->tx_fifo_num; i++) {
 625                int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 626                                                lst_per_page);
 627                struct fifo_info *fifo = &mac_control->fifos[i];
 628                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 629
 630                fifo->tx_curr_put_info.offset = 0;
 631                fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 632                fifo->tx_curr_get_info.offset = 0;
 633                fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 634                fifo->fifo_no = i;
 635                fifo->nic = nic;
 636                fifo->max_txds = MAX_SKB_FRAGS + 2;
 637                fifo->dev = dev;
 638
 639                for (j = 0; j < page_num; j++) {
 640                        int k = 0;
 641                        dma_addr_t tmp_p;
 642                        void *tmp_v;
 643                        tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
 644                                                   &tmp_p, GFP_KERNEL);
 645                        if (!tmp_v) {
 646                                DBG_PRINT(INFO_DBG,
 647                                          "dma_alloc_coherent failed for TxDL\n");
 648                                return -ENOMEM;
 649                        }
 650                        /* If we got a zero DMA address(can happen on
 651                         * certain platforms like PPC), reallocate.
 652                         * Store virtual address of page we don't want,
 653                         * to be freed later.
 654                         */
 655                        if (!tmp_p) {
 656                                mac_control->zerodma_virt_addr = tmp_v;
 657                                DBG_PRINT(INIT_DBG,
 658                                          "%s: Zero DMA address for TxDL. "
 659                                          "Virtual address %p\n",
 660                                          dev->name, tmp_v);
 661                                tmp_v = dma_alloc_coherent(&nic->pdev->dev,
 662                                                           PAGE_SIZE, &tmp_p,
 663                                                           GFP_KERNEL);
 664                                if (!tmp_v) {
 665                                        DBG_PRINT(INFO_DBG,
 666                                                  "dma_alloc_coherent failed for TxDL\n");
 667                                        return -ENOMEM;
 668                                }
 669                                mem_allocated += PAGE_SIZE;
 670                        }
 671                        while (k < lst_per_page) {
 672                                int l = (j * lst_per_page) + k;
 673                                if (l == tx_cfg->fifo_len)
 674                                        break;
 675                                fifo->list_info[l].list_virt_addr =
 676                                        tmp_v + (k * lst_size);
 677                                fifo->list_info[l].list_phy_addr =
 678                                        tmp_p + (k * lst_size);
 679                                k++;
 680                        }
 681                }
 682        }
 683
 684        for (i = 0; i < config->tx_fifo_num; i++) {
 685                struct fifo_info *fifo = &mac_control->fifos[i];
 686                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 687
 688                size = tx_cfg->fifo_len;
 689                fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 690                if (!fifo->ufo_in_band_v)
 691                        return -ENOMEM;
 692                mem_allocated += (size * sizeof(u64));
 693        }
 694
 695        /* Allocation and initialization of RXDs in Rings */
 696        size = 0;
 697        for (i = 0; i < config->rx_ring_num; i++) {
 698                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 699                struct ring_info *ring = &mac_control->rings[i];
 700
 701                if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 702                        DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 703                                  "multiple of RxDs per Block\n",
 704                                  dev->name, i);
 705                        return FAILURE;
 706                }
 707                size += rx_cfg->num_rxd;
 708                ring->block_count = rx_cfg->num_rxd /
 709                        (rxd_count[nic->rxd_mode] + 1);
 710                ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 711        }
 712        if (nic->rxd_mode == RXD_MODE_1)
 713                size = (size * (sizeof(struct RxD1)));
 714        else
 715                size = (size * (sizeof(struct RxD3)));
 716
 717        for (i = 0; i < config->rx_ring_num; i++) {
 718                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 719                struct ring_info *ring = &mac_control->rings[i];
 720
 721                ring->rx_curr_get_info.block_index = 0;
 722                ring->rx_curr_get_info.offset = 0;
 723                ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 724                ring->rx_curr_put_info.block_index = 0;
 725                ring->rx_curr_put_info.offset = 0;
 726                ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 727                ring->nic = nic;
 728                ring->ring_no = i;
 729
 730                blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 731                /*  Allocating all the Rx blocks */
 732                for (j = 0; j < blk_cnt; j++) {
 733                        struct rx_block_info *rx_blocks;
 734                        int l;
 735
 736                        rx_blocks = &ring->rx_blocks[j];
 737                        size = SIZE_OF_BLOCK;   /* size is always page size */
 738                        tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
 739                                                        &tmp_p_addr, GFP_KERNEL);
 740                        if (tmp_v_addr == NULL) {
 741                                /*
 742                                 * In case of failure, free_shared_mem()
 743                                 * is called, which should free any
 744                                 * memory that was alloced till the
 745                                 * failure happened.
 746                                 */
 747                                rx_blocks->block_virt_addr = tmp_v_addr;
 748                                return -ENOMEM;
 749                        }
 750                        mem_allocated += size;
 751
 752                        size = sizeof(struct rxd_info) *
 753                                rxd_count[nic->rxd_mode];
 754                        rx_blocks->block_virt_addr = tmp_v_addr;
 755                        rx_blocks->block_dma_addr = tmp_p_addr;
 756                        rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 757                        if (!rx_blocks->rxds)
 758                                return -ENOMEM;
 759                        mem_allocated += size;
 760                        for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 761                                rx_blocks->rxds[l].virt_addr =
 762                                        rx_blocks->block_virt_addr +
 763                                        (rxd_size[nic->rxd_mode] * l);
 764                                rx_blocks->rxds[l].dma_addr =
 765                                        rx_blocks->block_dma_addr +
 766                                        (rxd_size[nic->rxd_mode] * l);
 767                        }
 768                }
 769                /* Interlinking all Rx Blocks */
 770                for (j = 0; j < blk_cnt; j++) {
 771                        int next = (j + 1) % blk_cnt;
 772                        tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 773                        tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 774                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 775                        tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 776
 777                        pre_rxd_blk = tmp_v_addr;
 778                        pre_rxd_blk->reserved_2_pNext_RxD_block =
 779                                (unsigned long)tmp_v_addr_next;
 780                        pre_rxd_blk->pNext_RxD_Blk_physical =
 781                                (u64)tmp_p_addr_next;
 782                }
 783        }
 784        if (nic->rxd_mode == RXD_MODE_3B) {
 785                /*
 786                 * Allocation of Storages for buffer addresses in 2BUFF mode
 787                 * and the buffers as well.
 788                 */
 789                for (i = 0; i < config->rx_ring_num; i++) {
 790                        struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 791                        struct ring_info *ring = &mac_control->rings[i];
 792
 793                        blk_cnt = rx_cfg->num_rxd /
 794                                (rxd_count[nic->rxd_mode] + 1);
 795                        size = sizeof(struct buffAdd *) * blk_cnt;
 796                        ring->ba = kmalloc(size, GFP_KERNEL);
 797                        if (!ring->ba)
 798                                return -ENOMEM;
 799                        mem_allocated += size;
 800                        for (j = 0; j < blk_cnt; j++) {
 801                                int k = 0;
 802
 803                                size = sizeof(struct buffAdd) *
 804                                        (rxd_count[nic->rxd_mode] + 1);
 805                                ring->ba[j] = kmalloc(size, GFP_KERNEL);
 806                                if (!ring->ba[j])
 807                                        return -ENOMEM;
 808                                mem_allocated += size;
 809                                while (k != rxd_count[nic->rxd_mode]) {
 810                                        ba = &ring->ba[j][k];
 811                                        size = BUF0_LEN + ALIGN_SIZE;
 812                                        ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 813                                        if (!ba->ba_0_org)
 814                                                return -ENOMEM;
 815                                        mem_allocated += size;
 816                                        tmp = (unsigned long)ba->ba_0_org;
 817                                        tmp += ALIGN_SIZE;
 818                                        tmp &= ~((unsigned long)ALIGN_SIZE);
 819                                        ba->ba_0 = (void *)tmp;
 820
 821                                        size = BUF1_LEN + ALIGN_SIZE;
 822                                        ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 823                                        if (!ba->ba_1_org)
 824                                                return -ENOMEM;
 825                                        mem_allocated += size;
 826                                        tmp = (unsigned long)ba->ba_1_org;
 827                                        tmp += ALIGN_SIZE;
 828                                        tmp &= ~((unsigned long)ALIGN_SIZE);
 829                                        ba->ba_1 = (void *)tmp;
 830                                        k++;
 831                                }
 832                        }
 833                }
 834        }
 835
 836        /* Allocation and initialization of Statistics block */
 837        size = sizeof(struct stat_block);
 838        mac_control->stats_mem =
 839                dma_alloc_coherent(&nic->pdev->dev, size,
 840                                   &mac_control->stats_mem_phy, GFP_KERNEL);
 841
 842        if (!mac_control->stats_mem) {
 843                /*
 844                 * In case of failure, free_shared_mem() is called, which
 845                 * should free any memory that was alloced till the
 846                 * failure happened.
 847                 */
 848                return -ENOMEM;
 849        }
 850        mem_allocated += size;
 851        mac_control->stats_mem_sz = size;
 852
 853        tmp_v_addr = mac_control->stats_mem;
 854        mac_control->stats_info = tmp_v_addr;
 855        memset(tmp_v_addr, 0, size);
 856        DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 857                dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 858        mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 859        return SUCCESS;
 860}
 861
 862/**
 863 * free_shared_mem - Free the allocated Memory
 864 * @nic:  Device private variable.
 865 * Description: This function is to free all memory locations allocated by
 866 * the init_shared_mem() function and return it to the kernel.
 867 */
 868
 869static void free_shared_mem(struct s2io_nic *nic)
 870{
 871        int i, j, blk_cnt, size;
 872        void *tmp_v_addr;
 873        dma_addr_t tmp_p_addr;
 874        int lst_size, lst_per_page;
 875        struct net_device *dev;
 876        int page_num = 0;
 877        struct config_param *config;
 878        struct mac_info *mac_control;
 879        struct stat_block *stats;
 880        struct swStat *swstats;
 881
 882        if (!nic)
 883                return;
 884
 885        dev = nic->dev;
 886
 887        config = &nic->config;
 888        mac_control = &nic->mac_control;
 889        stats = mac_control->stats_info;
 890        swstats = &stats->sw_stat;
 891
 892        lst_size = sizeof(struct TxD) * config->max_txds;
 893        lst_per_page = PAGE_SIZE / lst_size;
 894
 895        for (i = 0; i < config->tx_fifo_num; i++) {
 896                struct fifo_info *fifo = &mac_control->fifos[i];
 897                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 898
 899                page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 900                for (j = 0; j < page_num; j++) {
 901                        int mem_blks = (j * lst_per_page);
 902                        struct list_info_hold *fli;
 903
 904                        if (!fifo->list_info)
 905                                return;
 906
 907                        fli = &fifo->list_info[mem_blks];
 908                        if (!fli->list_virt_addr)
 909                                break;
 910                        dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 911                                          fli->list_virt_addr,
 912                                          fli->list_phy_addr);
 913                        swstats->mem_freed += PAGE_SIZE;
 914                }
 915                /* If we got a zero DMA address during allocation,
 916                 * free the page now
 917                 */
 918                if (mac_control->zerodma_virt_addr) {
 919                        dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 920                                          mac_control->zerodma_virt_addr,
 921                                          (dma_addr_t)0);
 922                        DBG_PRINT(INIT_DBG,
 923                                  "%s: Freeing TxDL with zero DMA address. "
 924                                  "Virtual address %p\n",
 925                                  dev->name, mac_control->zerodma_virt_addr);
 926                        swstats->mem_freed += PAGE_SIZE;
 927                }
 928                kfree(fifo->list_info);
 929                swstats->mem_freed += tx_cfg->fifo_len *
 930                        sizeof(struct list_info_hold);
 931        }
 932
 933        size = SIZE_OF_BLOCK;
 934        for (i = 0; i < config->rx_ring_num; i++) {
 935                struct ring_info *ring = &mac_control->rings[i];
 936
 937                blk_cnt = ring->block_count;
 938                for (j = 0; j < blk_cnt; j++) {
 939                        tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 940                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 941                        if (tmp_v_addr == NULL)
 942                                break;
 943                        dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
 944                                          tmp_p_addr);
 945                        swstats->mem_freed += size;
 946                        kfree(ring->rx_blocks[j].rxds);
 947                        swstats->mem_freed += sizeof(struct rxd_info) *
 948                                rxd_count[nic->rxd_mode];
 949                }
 950        }
 951
 952        if (nic->rxd_mode == RXD_MODE_3B) {
 953                /* Freeing buffer storage addresses in 2BUFF mode. */
 954                for (i = 0; i < config->rx_ring_num; i++) {
 955                        struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 956                        struct ring_info *ring = &mac_control->rings[i];
 957
 958                        blk_cnt = rx_cfg->num_rxd /
 959                                (rxd_count[nic->rxd_mode] + 1);
 960                        for (j = 0; j < blk_cnt; j++) {
 961                                int k = 0;
 962                                if (!ring->ba[j])
 963                                        continue;
 964                                while (k != rxd_count[nic->rxd_mode]) {
 965                                        struct buffAdd *ba = &ring->ba[j][k];
 966                                        kfree(ba->ba_0_org);
 967                                        swstats->mem_freed +=
 968                                                BUF0_LEN + ALIGN_SIZE;
 969                                        kfree(ba->ba_1_org);
 970                                        swstats->mem_freed +=
 971                                                BUF1_LEN + ALIGN_SIZE;
 972                                        k++;
 973                                }
 974                                kfree(ring->ba[j]);
 975                                swstats->mem_freed += sizeof(struct buffAdd) *
 976                                        (rxd_count[nic->rxd_mode] + 1);
 977                        }
 978                        kfree(ring->ba);
 979                        swstats->mem_freed += sizeof(struct buffAdd *) *
 980                                blk_cnt;
 981                }
 982        }
 983
 984        for (i = 0; i < nic->config.tx_fifo_num; i++) {
 985                struct fifo_info *fifo = &mac_control->fifos[i];
 986                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 987
 988                if (fifo->ufo_in_band_v) {
 989                        swstats->mem_freed += tx_cfg->fifo_len *
 990                                sizeof(u64);
 991                        kfree(fifo->ufo_in_band_v);
 992                }
 993        }
 994
 995        if (mac_control->stats_mem) {
 996                swstats->mem_freed += mac_control->stats_mem_sz;
 997                dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
 998                                  mac_control->stats_mem,
 999                                  mac_control->stats_mem_phy);
1000        }
1001}
1002
1003/*
1004 * s2io_verify_pci_mode -
1005 */
1006
1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008{
1009        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010        register u64 val64 = 0;
1011        int     mode;
1012
1013        val64 = readq(&bar0->pci_mode);
1014        mode = (u8)GET_PCI_MODE(val64);
1015
1016        if (val64 & PCI_MODE_UNKNOWN_MODE)
1017                return -1;      /* Unknown PCI mode */
1018        return mode;
1019}
1020
1021#define NEC_VENID   0x1033
1022#define NEC_DEVID   0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025        struct pci_dev *tdev = NULL;
1026        for_each_pci_dev(tdev) {
1027                if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028                        if (tdev->bus == s2io_pdev->bus->parent) {
1029                                pci_dev_put(tdev);
1030                                return 1;
1031                        }
1032                }
1033        }
1034        return 0;
1035}
1036
1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038/*
1039 * s2io_print_pci_mode -
1040 */
1041static int s2io_print_pci_mode(struct s2io_nic *nic)
1042{
1043        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044        register u64 val64 = 0;
1045        int     mode;
1046        struct config_param *config = &nic->config;
1047        const char *pcimode;
1048
1049        val64 = readq(&bar0->pci_mode);
1050        mode = (u8)GET_PCI_MODE(val64);
1051
1052        if (val64 & PCI_MODE_UNKNOWN_MODE)
1053                return -1;      /* Unknown PCI mode */
1054
1055        config->bus_speed = bus_speed[mode];
1056
1057        if (s2io_on_nec_bridge(nic->pdev)) {
1058                DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059                          nic->dev->name);
1060                return mode;
1061        }
1062
1063        switch (mode) {
1064        case PCI_MODE_PCI_33:
1065                pcimode = "33MHz PCI bus";
1066                break;
1067        case PCI_MODE_PCI_66:
1068                pcimode = "66MHz PCI bus";
1069                break;
1070        case PCI_MODE_PCIX_M1_66:
1071                pcimode = "66MHz PCIX(M1) bus";
1072                break;
1073        case PCI_MODE_PCIX_M1_100:
1074                pcimode = "100MHz PCIX(M1) bus";
1075                break;
1076        case PCI_MODE_PCIX_M1_133:
1077                pcimode = "133MHz PCIX(M1) bus";
1078                break;
1079        case PCI_MODE_PCIX_M2_66:
1080                pcimode = "133MHz PCIX(M2) bus";
1081                break;
1082        case PCI_MODE_PCIX_M2_100:
1083                pcimode = "200MHz PCIX(M2) bus";
1084                break;
1085        case PCI_MODE_PCIX_M2_133:
1086                pcimode = "266MHz PCIX(M2) bus";
1087                break;
1088        default:
1089                pcimode = "unsupported bus!";
1090                mode = -1;
1091        }
1092
1093        DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094                  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
1096        return mode;
1097}
1098
1099/**
1100 *  init_tti - Initialization transmit traffic interrupt scheme
1101 *  @nic: device private variable
1102 *  @link: link status (UP/DOWN) used to enable/disable continuous
1103 *  transmit interrupts
1104 *  @may_sleep: parameter indicates if sleeping when waiting for
1105 *  command complete
1106 *  Description: The function configures transmit traffic interrupts
1107 *  Return Value:  SUCCESS on success and
1108 *  '-1' on failure
1109 */
1110
1111static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
1112{
1113        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114        register u64 val64 = 0;
1115        int i;
1116        struct config_param *config = &nic->config;
1117
1118        for (i = 0; i < config->tx_fifo_num; i++) {
1119                /*
1120                 * TTI Initialization. Default Tx timer gets us about
1121                 * 250 interrupts per sec. Continuous interrupts are enabled
1122                 * by default.
1123                 */
1124                if (nic->device_type == XFRAME_II_DEVICE) {
1125                        int count = (nic->config.bus_speed * 125)/2;
1126                        val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1127                } else
1128                        val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1129
1130                val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1131                        TTI_DATA1_MEM_TX_URNG_B(0x10) |
1132                        TTI_DATA1_MEM_TX_URNG_C(0x30) |
1133                        TTI_DATA1_MEM_TX_TIMER_AC_EN;
1134                if (i == 0)
1135                        if (use_continuous_tx_intrs && (link == LINK_UP))
1136                                val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1137                writeq(val64, &bar0->tti_data1_mem);
1138
1139                if (nic->config.intr_type == MSI_X) {
1140                        val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1141                                TTI_DATA2_MEM_TX_UFC_B(0x100) |
1142                                TTI_DATA2_MEM_TX_UFC_C(0x200) |
1143                                TTI_DATA2_MEM_TX_UFC_D(0x300);
1144                } else {
1145                        if ((nic->config.tx_steering_type ==
1146                             TX_DEFAULT_STEERING) &&
1147                            (config->tx_fifo_num > 1) &&
1148                            (i >= nic->udp_fifo_idx) &&
1149                            (i < (nic->udp_fifo_idx +
1150                                  nic->total_udp_fifos)))
1151                                val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1152                                        TTI_DATA2_MEM_TX_UFC_B(0x80) |
1153                                        TTI_DATA2_MEM_TX_UFC_C(0x100) |
1154                                        TTI_DATA2_MEM_TX_UFC_D(0x120);
1155                        else
1156                                val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1157                                        TTI_DATA2_MEM_TX_UFC_B(0x20) |
1158                                        TTI_DATA2_MEM_TX_UFC_C(0x40) |
1159                                        TTI_DATA2_MEM_TX_UFC_D(0x80);
1160                }
1161
1162                writeq(val64, &bar0->tti_data2_mem);
1163
1164                val64 = TTI_CMD_MEM_WE |
1165                        TTI_CMD_MEM_STROBE_NEW_CMD |
1166                        TTI_CMD_MEM_OFFSET(i);
1167                writeq(val64, &bar0->tti_command_mem);
1168
1169                if (wait_for_cmd_complete(&bar0->tti_command_mem,
1170                                          TTI_CMD_MEM_STROBE_NEW_CMD,
1171                                          S2IO_BIT_RESET, may_sleep) != SUCCESS)
1172                        return FAILURE;
1173        }
1174
1175        return SUCCESS;
1176}
1177
1178/**
1179 *  init_nic - Initialization of hardware
1180 *  @nic: device private variable
1181 *  Description: The function sequentially configures every block
1182 *  of the H/W from their reset values.
1183 *  Return Value:  SUCCESS on success and
1184 *  '-1' on failure (endian settings incorrect).
1185 */
1186
1187static int init_nic(struct s2io_nic *nic)
1188{
1189        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1190        struct net_device *dev = nic->dev;
1191        register u64 val64 = 0;
1192        void __iomem *add;
1193        u32 time;
1194        int i, j;
1195        int dtx_cnt = 0;
1196        unsigned long long mem_share;
1197        int mem_size;
1198        struct config_param *config = &nic->config;
1199        struct mac_info *mac_control = &nic->mac_control;
1200
1201        /* to set the swapper controle on the card */
1202        if (s2io_set_swapper(nic)) {
1203                DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1204                return -EIO;
1205        }
1206
1207        /*
1208         * Herc requires EOI to be removed from reset before XGXS, so..
1209         */
1210        if (nic->device_type & XFRAME_II_DEVICE) {
1211                val64 = 0xA500000000ULL;
1212                writeq(val64, &bar0->sw_reset);
1213                msleep(500);
1214                val64 = readq(&bar0->sw_reset);
1215        }
1216
1217        /* Remove XGXS from reset state */
1218        val64 = 0;
1219        writeq(val64, &bar0->sw_reset);
1220        msleep(500);
1221        val64 = readq(&bar0->sw_reset);
1222
1223        /* Ensure that it's safe to access registers by checking
1224         * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1225         */
1226        if (nic->device_type == XFRAME_II_DEVICE) {
1227                for (i = 0; i < 50; i++) {
1228                        val64 = readq(&bar0->adapter_status);
1229                        if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1230                                break;
1231                        msleep(10);
1232                }
1233                if (i == 50)
1234                        return -ENODEV;
1235        }
1236
1237        /*  Enable Receiving broadcasts */
1238        add = &bar0->mac_cfg;
1239        val64 = readq(&bar0->mac_cfg);
1240        val64 |= MAC_RMAC_BCAST_ENABLE;
1241        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242        writel((u32)val64, add);
1243        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1244        writel((u32) (val64 >> 32), (add + 4));
1245
1246        /* Read registers in all blocks */
1247        val64 = readq(&bar0->mac_int_mask);
1248        val64 = readq(&bar0->mc_int_mask);
1249        val64 = readq(&bar0->xgxs_int_mask);
1250
1251        /*  Set MTU */
1252        val64 = dev->mtu;
1253        writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1254
1255        if (nic->device_type & XFRAME_II_DEVICE) {
1256                while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1257                        SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1258                                          &bar0->dtx_control, UF);
1259                        if (dtx_cnt & 0x1)
1260                                msleep(1); /* Necessary!! */
1261                        dtx_cnt++;
1262                }
1263        } else {
1264                while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1265                        SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1266                                          &bar0->dtx_control, UF);
1267                        val64 = readq(&bar0->dtx_control);
1268                        dtx_cnt++;
1269                }
1270        }
1271
1272        /*  Tx DMA Initialization */
1273        val64 = 0;
1274        writeq(val64, &bar0->tx_fifo_partition_0);
1275        writeq(val64, &bar0->tx_fifo_partition_1);
1276        writeq(val64, &bar0->tx_fifo_partition_2);
1277        writeq(val64, &bar0->tx_fifo_partition_3);
1278
1279        for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1280                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1281
1282                val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1283                        vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1284
1285                if (i == (config->tx_fifo_num - 1)) {
1286                        if (i % 2 == 0)
1287                                i++;
1288                }
1289
1290                switch (i) {
1291                case 1:
1292                        writeq(val64, &bar0->tx_fifo_partition_0);
1293                        val64 = 0;
1294                        j = 0;
1295                        break;
1296                case 3:
1297                        writeq(val64, &bar0->tx_fifo_partition_1);
1298                        val64 = 0;
1299                        j = 0;
1300                        break;
1301                case 5:
1302                        writeq(val64, &bar0->tx_fifo_partition_2);
1303                        val64 = 0;
1304                        j = 0;
1305                        break;
1306                case 7:
1307                        writeq(val64, &bar0->tx_fifo_partition_3);
1308                        val64 = 0;
1309                        j = 0;
1310                        break;
1311                default:
1312                        j++;
1313                        break;
1314                }
1315        }
1316
1317        /*
1318         * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1319         * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1320         */
1321        if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1322                writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1323
1324        val64 = readq(&bar0->tx_fifo_partition_0);
1325        DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1326                  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1327
1328        /*
1329         * Initialization of Tx_PA_CONFIG register to ignore packet
1330         * integrity checking.
1331         */
1332        val64 = readq(&bar0->tx_pa_cfg);
1333        val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1334                TX_PA_CFG_IGNORE_SNAP_OUI |
1335                TX_PA_CFG_IGNORE_LLC_CTRL |
1336                TX_PA_CFG_IGNORE_L2_ERR;
1337        writeq(val64, &bar0->tx_pa_cfg);
1338
1339        /* Rx DMA initialization. */
1340        val64 = 0;
1341        for (i = 0; i < config->rx_ring_num; i++) {
1342                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1343
1344                val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1345        }
1346        writeq(val64, &bar0->rx_queue_priority);
1347
1348        /*
1349         * Allocating equal share of memory to all the
1350         * configured Rings.
1351         */
1352        val64 = 0;
1353        if (nic->device_type & XFRAME_II_DEVICE)
1354                mem_size = 32;
1355        else
1356                mem_size = 64;
1357
1358        for (i = 0; i < config->rx_ring_num; i++) {
1359                switch (i) {
1360                case 0:
1361                        mem_share = (mem_size / config->rx_ring_num +
1362                                     mem_size % config->rx_ring_num);
1363                        val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1364                        continue;
1365                case 1:
1366                        mem_share = (mem_size / config->rx_ring_num);
1367                        val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1368                        continue;
1369                case 2:
1370                        mem_share = (mem_size / config->rx_ring_num);
1371                        val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1372                        continue;
1373                case 3:
1374                        mem_share = (mem_size / config->rx_ring_num);
1375                        val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1376                        continue;
1377                case 4:
1378                        mem_share = (mem_size / config->rx_ring_num);
1379                        val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1380                        continue;
1381                case 5:
1382                        mem_share = (mem_size / config->rx_ring_num);
1383                        val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1384                        continue;
1385                case 6:
1386                        mem_share = (mem_size / config->rx_ring_num);
1387                        val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1388                        continue;
1389                case 7:
1390                        mem_share = (mem_size / config->rx_ring_num);
1391                        val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1392                        continue;
1393                }
1394        }
1395        writeq(val64, &bar0->rx_queue_cfg);
1396
1397        /*
1398         * Filling Tx round robin registers
1399         * as per the number of FIFOs for equal scheduling priority
1400         */
1401        switch (config->tx_fifo_num) {
1402        case 1:
1403                val64 = 0x0;
1404                writeq(val64, &bar0->tx_w_round_robin_0);
1405                writeq(val64, &bar0->tx_w_round_robin_1);
1406                writeq(val64, &bar0->tx_w_round_robin_2);
1407                writeq(val64, &bar0->tx_w_round_robin_3);
1408                writeq(val64, &bar0->tx_w_round_robin_4);
1409                break;
1410        case 2:
1411                val64 = 0x0001000100010001ULL;
1412                writeq(val64, &bar0->tx_w_round_robin_0);
1413                writeq(val64, &bar0->tx_w_round_robin_1);
1414                writeq(val64, &bar0->tx_w_round_robin_2);
1415                writeq(val64, &bar0->tx_w_round_robin_3);
1416                val64 = 0x0001000100000000ULL;
1417                writeq(val64, &bar0->tx_w_round_robin_4);
1418                break;
1419        case 3:
1420                val64 = 0x0001020001020001ULL;
1421                writeq(val64, &bar0->tx_w_round_robin_0);
1422                val64 = 0x0200010200010200ULL;
1423                writeq(val64, &bar0->tx_w_round_robin_1);
1424                val64 = 0x0102000102000102ULL;
1425                writeq(val64, &bar0->tx_w_round_robin_2);
1426                val64 = 0x0001020001020001ULL;
1427                writeq(val64, &bar0->tx_w_round_robin_3);
1428                val64 = 0x0200010200000000ULL;
1429                writeq(val64, &bar0->tx_w_round_robin_4);
1430                break;
1431        case 4:
1432                val64 = 0x0001020300010203ULL;
1433                writeq(val64, &bar0->tx_w_round_robin_0);
1434                writeq(val64, &bar0->tx_w_round_robin_1);
1435                writeq(val64, &bar0->tx_w_round_robin_2);
1436                writeq(val64, &bar0->tx_w_round_robin_3);
1437                val64 = 0x0001020300000000ULL;
1438                writeq(val64, &bar0->tx_w_round_robin_4);
1439                break;
1440        case 5:
1441                val64 = 0x0001020304000102ULL;
1442                writeq(val64, &bar0->tx_w_round_robin_0);
1443                val64 = 0x0304000102030400ULL;
1444                writeq(val64, &bar0->tx_w_round_robin_1);
1445                val64 = 0x0102030400010203ULL;
1446                writeq(val64, &bar0->tx_w_round_robin_2);
1447                val64 = 0x0400010203040001ULL;
1448                writeq(val64, &bar0->tx_w_round_robin_3);
1449                val64 = 0x0203040000000000ULL;
1450                writeq(val64, &bar0->tx_w_round_robin_4);
1451                break;
1452        case 6:
1453                val64 = 0x0001020304050001ULL;
1454                writeq(val64, &bar0->tx_w_round_robin_0);
1455                val64 = 0x0203040500010203ULL;
1456                writeq(val64, &bar0->tx_w_round_robin_1);
1457                val64 = 0x0405000102030405ULL;
1458                writeq(val64, &bar0->tx_w_round_robin_2);
1459                val64 = 0x0001020304050001ULL;
1460                writeq(val64, &bar0->tx_w_round_robin_3);
1461                val64 = 0x0203040500000000ULL;
1462                writeq(val64, &bar0->tx_w_round_robin_4);
1463                break;
1464        case 7:
1465                val64 = 0x0001020304050600ULL;
1466                writeq(val64, &bar0->tx_w_round_robin_0);
1467                val64 = 0x0102030405060001ULL;
1468                writeq(val64, &bar0->tx_w_round_robin_1);
1469                val64 = 0x0203040506000102ULL;
1470                writeq(val64, &bar0->tx_w_round_robin_2);
1471                val64 = 0x0304050600010203ULL;
1472                writeq(val64, &bar0->tx_w_round_robin_3);
1473                val64 = 0x0405060000000000ULL;
1474                writeq(val64, &bar0->tx_w_round_robin_4);
1475                break;
1476        case 8:
1477                val64 = 0x0001020304050607ULL;
1478                writeq(val64, &bar0->tx_w_round_robin_0);
1479                writeq(val64, &bar0->tx_w_round_robin_1);
1480                writeq(val64, &bar0->tx_w_round_robin_2);
1481                writeq(val64, &bar0->tx_w_round_robin_3);
1482                val64 = 0x0001020300000000ULL;
1483                writeq(val64, &bar0->tx_w_round_robin_4);
1484                break;
1485        }
1486
1487        /* Enable all configured Tx FIFO partitions */
1488        val64 = readq(&bar0->tx_fifo_partition_0);
1489        val64 |= (TX_FIFO_PARTITION_EN);
1490        writeq(val64, &bar0->tx_fifo_partition_0);
1491
1492        /* Filling the Rx round robin registers as per the
1493         * number of Rings and steering based on QoS with
1494         * equal priority.
1495         */
1496        switch (config->rx_ring_num) {
1497        case 1:
1498                val64 = 0x0;
1499                writeq(val64, &bar0->rx_w_round_robin_0);
1500                writeq(val64, &bar0->rx_w_round_robin_1);
1501                writeq(val64, &bar0->rx_w_round_robin_2);
1502                writeq(val64, &bar0->rx_w_round_robin_3);
1503                writeq(val64, &bar0->rx_w_round_robin_4);
1504
1505                val64 = 0x8080808080808080ULL;
1506                writeq(val64, &bar0->rts_qos_steering);
1507                break;
1508        case 2:
1509                val64 = 0x0001000100010001ULL;
1510                writeq(val64, &bar0->rx_w_round_robin_0);
1511                writeq(val64, &bar0->rx_w_round_robin_1);
1512                writeq(val64, &bar0->rx_w_round_robin_2);
1513                writeq(val64, &bar0->rx_w_round_robin_3);
1514                val64 = 0x0001000100000000ULL;
1515                writeq(val64, &bar0->rx_w_round_robin_4);
1516
1517                val64 = 0x8080808040404040ULL;
1518                writeq(val64, &bar0->rts_qos_steering);
1519                break;
1520        case 3:
1521                val64 = 0x0001020001020001ULL;
1522                writeq(val64, &bar0->rx_w_round_robin_0);
1523                val64 = 0x0200010200010200ULL;
1524                writeq(val64, &bar0->rx_w_round_robin_1);
1525                val64 = 0x0102000102000102ULL;
1526                writeq(val64, &bar0->rx_w_round_robin_2);
1527                val64 = 0x0001020001020001ULL;
1528                writeq(val64, &bar0->rx_w_round_robin_3);
1529                val64 = 0x0200010200000000ULL;
1530                writeq(val64, &bar0->rx_w_round_robin_4);
1531
1532                val64 = 0x8080804040402020ULL;
1533                writeq(val64, &bar0->rts_qos_steering);
1534                break;
1535        case 4:
1536                val64 = 0x0001020300010203ULL;
1537                writeq(val64, &bar0->rx_w_round_robin_0);
1538                writeq(val64, &bar0->rx_w_round_robin_1);
1539                writeq(val64, &bar0->rx_w_round_robin_2);
1540                writeq(val64, &bar0->rx_w_round_robin_3);
1541                val64 = 0x0001020300000000ULL;
1542                writeq(val64, &bar0->rx_w_round_robin_4);
1543
1544                val64 = 0x8080404020201010ULL;
1545                writeq(val64, &bar0->rts_qos_steering);
1546                break;
1547        case 5:
1548                val64 = 0x0001020304000102ULL;
1549                writeq(val64, &bar0->rx_w_round_robin_0);
1550                val64 = 0x0304000102030400ULL;
1551                writeq(val64, &bar0->rx_w_round_robin_1);
1552                val64 = 0x0102030400010203ULL;
1553                writeq(val64, &bar0->rx_w_round_robin_2);
1554                val64 = 0x0400010203040001ULL;
1555                writeq(val64, &bar0->rx_w_round_robin_3);
1556                val64 = 0x0203040000000000ULL;
1557                writeq(val64, &bar0->rx_w_round_robin_4);
1558
1559                val64 = 0x8080404020201008ULL;
1560                writeq(val64, &bar0->rts_qos_steering);
1561                break;
1562        case 6:
1563                val64 = 0x0001020304050001ULL;
1564                writeq(val64, &bar0->rx_w_round_robin_0);
1565                val64 = 0x0203040500010203ULL;
1566                writeq(val64, &bar0->rx_w_round_robin_1);
1567                val64 = 0x0405000102030405ULL;
1568                writeq(val64, &bar0->rx_w_round_robin_2);
1569                val64 = 0x0001020304050001ULL;
1570                writeq(val64, &bar0->rx_w_round_robin_3);
1571                val64 = 0x0203040500000000ULL;
1572                writeq(val64, &bar0->rx_w_round_robin_4);
1573
1574                val64 = 0x8080404020100804ULL;
1575                writeq(val64, &bar0->rts_qos_steering);
1576                break;
1577        case 7:
1578                val64 = 0x0001020304050600ULL;
1579                writeq(val64, &bar0->rx_w_round_robin_0);
1580                val64 = 0x0102030405060001ULL;
1581                writeq(val64, &bar0->rx_w_round_robin_1);
1582                val64 = 0x0203040506000102ULL;
1583                writeq(val64, &bar0->rx_w_round_robin_2);
1584                val64 = 0x0304050600010203ULL;
1585                writeq(val64, &bar0->rx_w_round_robin_3);
1586                val64 = 0x0405060000000000ULL;
1587                writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589                val64 = 0x8080402010080402ULL;
1590                writeq(val64, &bar0->rts_qos_steering);
1591                break;
1592        case 8:
1593                val64 = 0x0001020304050607ULL;
1594                writeq(val64, &bar0->rx_w_round_robin_0);
1595                writeq(val64, &bar0->rx_w_round_robin_1);
1596                writeq(val64, &bar0->rx_w_round_robin_2);
1597                writeq(val64, &bar0->rx_w_round_robin_3);
1598                val64 = 0x0001020300000000ULL;
1599                writeq(val64, &bar0->rx_w_round_robin_4);
1600
1601                val64 = 0x8040201008040201ULL;
1602                writeq(val64, &bar0->rts_qos_steering);
1603                break;
1604        }
1605
1606        /* UDP Fix */
1607        val64 = 0;
1608        for (i = 0; i < 8; i++)
1609                writeq(val64, &bar0->rts_frm_len_n[i]);
1610
1611        /* Set the default rts frame length for the rings configured */
1612        val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1613        for (i = 0 ; i < config->rx_ring_num ; i++)
1614                writeq(val64, &bar0->rts_frm_len_n[i]);
1615
1616        /* Set the frame length for the configured rings
1617         * desired by the user
1618         */
1619        for (i = 0; i < config->rx_ring_num; i++) {
1620                /* If rts_frm_len[i] == 0 then it is assumed that user not
1621                 * specified frame length steering.
1622                 * If the user provides the frame length then program
1623                 * the rts_frm_len register for those values or else
1624                 * leave it as it is.
1625                 */
1626                if (rts_frm_len[i] != 0) {
1627                        writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1628                               &bar0->rts_frm_len_n[i]);
1629                }
1630        }
1631
1632        /* Disable differentiated services steering logic */
1633        for (i = 0; i < 64; i++) {
1634                if (rts_ds_steer(nic, i, 0) == FAILURE) {
1635                        DBG_PRINT(ERR_DBG,
1636                                  "%s: rts_ds_steer failed on codepoint %d\n",
1637                                  dev->name, i);
1638                        return -ENODEV;
1639                }
1640        }
1641
1642        /* Program statistics memory */
1643        writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1644
1645        if (nic->device_type == XFRAME_II_DEVICE) {
1646                val64 = STAT_BC(0x320);
1647                writeq(val64, &bar0->stat_byte_cnt);
1648        }
1649
1650        /*
1651         * Initializing the sampling rate for the device to calculate the
1652         * bandwidth utilization.
1653         */
1654        val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1655                MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1656        writeq(val64, &bar0->mac_link_util);
1657
1658        /*
1659         * Initializing the Transmit and Receive Traffic Interrupt
1660         * Scheme.
1661         */
1662
1663        /* Initialize TTI */
1664        if (SUCCESS != init_tti(nic, nic->last_link_state, true))
1665                return -ENODEV;
1666
1667        /* RTI Initialization */
1668        if (nic->device_type == XFRAME_II_DEVICE) {
1669                /*
1670                 * Programmed to generate Apprx 500 Intrs per
1671                 * second
1672                 */
1673                int count = (nic->config.bus_speed * 125)/4;
1674                val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1675        } else
1676                val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1677        val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1678                RTI_DATA1_MEM_RX_URNG_B(0x10) |
1679                RTI_DATA1_MEM_RX_URNG_C(0x30) |
1680                RTI_DATA1_MEM_RX_TIMER_AC_EN;
1681
1682        writeq(val64, &bar0->rti_data1_mem);
1683
1684        val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1685                RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1686        if (nic->config.intr_type == MSI_X)
1687                val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1688                          RTI_DATA2_MEM_RX_UFC_D(0x40));
1689        else
1690                val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1691                          RTI_DATA2_MEM_RX_UFC_D(0x80));
1692        writeq(val64, &bar0->rti_data2_mem);
1693
1694        for (i = 0; i < config->rx_ring_num; i++) {
1695                val64 = RTI_CMD_MEM_WE |
1696                        RTI_CMD_MEM_STROBE_NEW_CMD |
1697                        RTI_CMD_MEM_OFFSET(i);
1698                writeq(val64, &bar0->rti_command_mem);
1699
1700                /*
1701                 * Once the operation completes, the Strobe bit of the
1702                 * command register will be reset. We poll for this
1703                 * particular condition. We wait for a maximum of 500ms
1704                 * for the operation to complete, if it's not complete
1705                 * by then we return error.
1706                 */
1707                time = 0;
1708                while (true) {
1709                        val64 = readq(&bar0->rti_command_mem);
1710                        if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1711                                break;
1712
1713                        if (time > 10) {
1714                                DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1715                                          dev->name);
1716                                return -ENODEV;
1717                        }
1718                        time++;
1719                        msleep(50);
1720                }
1721        }
1722
1723        /*
1724         * Initializing proper values as Pause threshold into all
1725         * the 8 Queues on Rx side.
1726         */
1727        writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1728        writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1729
1730        /* Disable RMAC PAD STRIPPING */
1731        add = &bar0->mac_cfg;
1732        val64 = readq(&bar0->mac_cfg);
1733        val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1734        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735        writel((u32) (val64), add);
1736        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1737        writel((u32) (val64 >> 32), (add + 4));
1738        val64 = readq(&bar0->mac_cfg);
1739
1740        /* Enable FCS stripping by adapter */
1741        add = &bar0->mac_cfg;
1742        val64 = readq(&bar0->mac_cfg);
1743        val64 |= MAC_CFG_RMAC_STRIP_FCS;
1744        if (nic->device_type == XFRAME_II_DEVICE)
1745                writeq(val64, &bar0->mac_cfg);
1746        else {
1747                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748                writel((u32) (val64), add);
1749                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1750                writel((u32) (val64 >> 32), (add + 4));
1751        }
1752
1753        /*
1754         * Set the time value to be inserted in the pause frame
1755         * generated by xena.
1756         */
1757        val64 = readq(&bar0->rmac_pause_cfg);
1758        val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1759        val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1760        writeq(val64, &bar0->rmac_pause_cfg);
1761
1762        /*
1763         * Set the Threshold Limit for Generating the pause frame
1764         * If the amount of data in any Queue exceeds ratio of
1765         * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1766         * pause frame is generated
1767         */
1768        val64 = 0;
1769        for (i = 0; i < 4; i++) {
1770                val64 |= (((u64)0xFF00 |
1771                           nic->mac_control.mc_pause_threshold_q0q3)
1772                          << (i * 2 * 8));
1773        }
1774        writeq(val64, &bar0->mc_pause_thresh_q0q3);
1775
1776        val64 = 0;
1777        for (i = 0; i < 4; i++) {
1778                val64 |= (((u64)0xFF00 |
1779                           nic->mac_control.mc_pause_threshold_q4q7)
1780                          << (i * 2 * 8));
1781        }
1782        writeq(val64, &bar0->mc_pause_thresh_q4q7);
1783
1784        /*
1785         * TxDMA will stop Read request if the number of read split has
1786         * exceeded the limit pointed by shared_splits
1787         */
1788        val64 = readq(&bar0->pic_control);
1789        val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1790        writeq(val64, &bar0->pic_control);
1791
1792        if (nic->config.bus_speed == 266) {
1793                writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1794                writeq(0x0, &bar0->read_retry_delay);
1795                writeq(0x0, &bar0->write_retry_delay);
1796        }
1797
1798        /*
1799         * Programming the Herc to split every write transaction
1800         * that does not start on an ADB to reduce disconnects.
1801         */
1802        if (nic->device_type == XFRAME_II_DEVICE) {
1803                val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1804                        MISC_LINK_STABILITY_PRD(3);
1805                writeq(val64, &bar0->misc_control);
1806                val64 = readq(&bar0->pic_control2);
1807                val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1808                writeq(val64, &bar0->pic_control2);
1809        }
1810        if (strstr(nic->product_name, "CX4")) {
1811                val64 = TMAC_AVG_IPG(0x17);
1812                writeq(val64, &bar0->tmac_avg_ipg);
1813        }
1814
1815        return SUCCESS;
1816}
1817#define LINK_UP_DOWN_INTERRUPT          1
1818#define MAC_RMAC_ERR_TIMER              2
1819
1820static int s2io_link_fault_indication(struct s2io_nic *nic)
1821{
1822        if (nic->device_type == XFRAME_II_DEVICE)
1823                return LINK_UP_DOWN_INTERRUPT;
1824        else
1825                return MAC_RMAC_ERR_TIMER;
1826}
1827
1828/**
1829 *  do_s2io_write_bits -  update alarm bits in alarm register
1830 *  @value: alarm bits
1831 *  @flag: interrupt status
1832 *  @addr: address value
1833 *  Description: update alarm bits in alarm register
1834 *  Return Value:
1835 *  NONE.
1836 */
1837static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1838{
1839        u64 temp64;
1840
1841        temp64 = readq(addr);
1842
1843        if (flag == ENABLE_INTRS)
1844                temp64 &= ~((u64)value);
1845        else
1846                temp64 |= ((u64)value);
1847        writeq(temp64, addr);
1848}
1849
1850static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1851{
1852        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1853        register u64 gen_int_mask = 0;
1854        u64 interruptible;
1855
1856        writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1857        if (mask & TX_DMA_INTR) {
1858                gen_int_mask |= TXDMA_INT_M;
1859
1860                do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1861                                   TXDMA_PCC_INT | TXDMA_TTI_INT |
1862                                   TXDMA_LSO_INT | TXDMA_TPA_INT |
1863                                   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1864
1865                do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1866                                   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1867                                   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1868                                   &bar0->pfc_err_mask);
1869
1870                do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1871                                   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1872                                   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1873
1874                do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1875                                   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1876                                   PCC_N_SERR | PCC_6_COF_OV_ERR |
1877                                   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1878                                   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1879                                   PCC_TXB_ECC_SG_ERR,
1880                                   flag, &bar0->pcc_err_mask);
1881
1882                do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1883                                   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1884
1885                do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1886                                   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1887                                   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1888                                   flag, &bar0->lso_err_mask);
1889
1890                do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1891                                   flag, &bar0->tpa_err_mask);
1892
1893                do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1894        }
1895
1896        if (mask & TX_MAC_INTR) {
1897                gen_int_mask |= TXMAC_INT_M;
1898                do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1899                                   &bar0->mac_int_mask);
1900                do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1901                                   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1902                                   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1903                                   flag, &bar0->mac_tmac_err_mask);
1904        }
1905
1906        if (mask & TX_XGXS_INTR) {
1907                gen_int_mask |= TXXGXS_INT_M;
1908                do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1909                                   &bar0->xgxs_int_mask);
1910                do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1911                                   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1912                                   flag, &bar0->xgxs_txgxs_err_mask);
1913        }
1914
1915        if (mask & RX_DMA_INTR) {
1916                gen_int_mask |= RXDMA_INT_M;
1917                do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1918                                   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1919                                   flag, &bar0->rxdma_int_mask);
1920                do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1921                                   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1922                                   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1923                                   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1924                do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1925                                   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1926                                   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1927                                   &bar0->prc_pcix_err_mask);
1928                do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1929                                   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1930                                   &bar0->rpa_err_mask);
1931                do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1932                                   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1933                                   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1934                                   RDA_FRM_ECC_SG_ERR |
1935                                   RDA_MISC_ERR|RDA_PCIX_ERR,
1936                                   flag, &bar0->rda_err_mask);
1937                do_s2io_write_bits(RTI_SM_ERR_ALARM |
1938                                   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1939                                   flag, &bar0->rti_err_mask);
1940        }
1941
1942        if (mask & RX_MAC_INTR) {
1943                gen_int_mask |= RXMAC_INT_M;
1944                do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1945                                   &bar0->mac_int_mask);
1946                interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1947                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1948                                 RMAC_DOUBLE_ECC_ERR);
1949                if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1950                        interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1951                do_s2io_write_bits(interruptible,
1952                                   flag, &bar0->mac_rmac_err_mask);
1953        }
1954
1955        if (mask & RX_XGXS_INTR) {
1956                gen_int_mask |= RXXGXS_INT_M;
1957                do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1958                                   &bar0->xgxs_int_mask);
1959                do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1960                                   &bar0->xgxs_rxgxs_err_mask);
1961        }
1962
1963        if (mask & MC_INTR) {
1964                gen_int_mask |= MC_INT_M;
1965                do_s2io_write_bits(MC_INT_MASK_MC_INT,
1966                                   flag, &bar0->mc_int_mask);
1967                do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1968                                   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1969                                   &bar0->mc_err_mask);
1970        }
1971        nic->general_int_mask = gen_int_mask;
1972
1973        /* Remove this line when alarm interrupts are enabled */
1974        nic->general_int_mask = 0;
1975}
1976
1977/**
1978 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1979 *  @nic: device private variable,
1980 *  @mask: A mask indicating which Intr block must be modified and,
1981 *  @flag: A flag indicating whether to enable or disable the Intrs.
1982 *  Description: This function will either disable or enable the interrupts
1983 *  depending on the flag argument. The mask argument can be used to
1984 *  enable/disable any Intr block.
1985 *  Return Value: NONE.
1986 */
1987
1988static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1989{
1990        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1991        register u64 temp64 = 0, intr_mask = 0;
1992
1993        intr_mask = nic->general_int_mask;
1994
1995        /*  Top level interrupt classification */
1996        /*  PIC Interrupts */
1997        if (mask & TX_PIC_INTR) {
1998                /*  Enable PIC Intrs in the general intr mask register */
1999                intr_mask |= TXPIC_INT_M;
2000                if (flag == ENABLE_INTRS) {
2001                        /*
2002                         * If Hercules adapter enable GPIO otherwise
2003                         * disable all PCIX, Flash, MDIO, IIC and GPIO
2004                         * interrupts for now.
2005                         * TODO
2006                         */
2007                        if (s2io_link_fault_indication(nic) ==
2008                            LINK_UP_DOWN_INTERRUPT) {
2009                                do_s2io_write_bits(PIC_INT_GPIO, flag,
2010                                                   &bar0->pic_int_mask);
2011                                do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2012                                                   &bar0->gpio_int_mask);
2013                        } else
2014                                writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2015                } else if (flag == DISABLE_INTRS) {
2016                        /*
2017                         * Disable PIC Intrs in the general
2018                         * intr mask register
2019                         */
2020                        writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2021                }
2022        }
2023
2024        /*  Tx traffic interrupts */
2025        if (mask & TX_TRAFFIC_INTR) {
2026                intr_mask |= TXTRAFFIC_INT_M;
2027                if (flag == ENABLE_INTRS) {
2028                        /*
2029                         * Enable all the Tx side interrupts
2030                         * writing 0 Enables all 64 TX interrupt levels
2031                         */
2032                        writeq(0x0, &bar0->tx_traffic_mask);
2033                } else if (flag == DISABLE_INTRS) {
2034                        /*
2035                         * Disable Tx Traffic Intrs in the general intr mask
2036                         * register.
2037                         */
2038                        writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2039                }
2040        }
2041
2042        /*  Rx traffic interrupts */
2043        if (mask & RX_TRAFFIC_INTR) {
2044                intr_mask |= RXTRAFFIC_INT_M;
2045                if (flag == ENABLE_INTRS) {
2046                        /* writing 0 Enables all 8 RX interrupt levels */
2047                        writeq(0x0, &bar0->rx_traffic_mask);
2048                } else if (flag == DISABLE_INTRS) {
2049                        /*
2050                         * Disable Rx Traffic Intrs in the general intr mask
2051                         * register.
2052                         */
2053                        writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2054                }
2055        }
2056
2057        temp64 = readq(&bar0->general_int_mask);
2058        if (flag == ENABLE_INTRS)
2059                temp64 &= ~((u64)intr_mask);
2060        else
2061                temp64 = DISABLE_ALL_INTRS;
2062        writeq(temp64, &bar0->general_int_mask);
2063
2064        nic->general_int_mask = readq(&bar0->general_int_mask);
2065}
2066
2067/**
2068 *  verify_pcc_quiescent- Checks for PCC quiescent state
2069 *  @sp : private member of the device structure, which is a pointer to the
2070 *  s2io_nic structure.
2071 *  @flag: boolean controlling function path
2072 *  Return: 1 If PCC is quiescence
2073 *          0 If PCC is not quiescence
2074 */
2075static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2076{
2077        int ret = 0, herc;
2078        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2079        u64 val64 = readq(&bar0->adapter_status);
2080
2081        herc = (sp->device_type == XFRAME_II_DEVICE);
2082
2083        if (flag == false) {
2084                if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2085                        if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2086                                ret = 1;
2087                } else {
2088                        if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2089                                ret = 1;
2090                }
2091        } else {
2092                if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2093                        if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2094                             ADAPTER_STATUS_RMAC_PCC_IDLE))
2095                                ret = 1;
2096                } else {
2097                        if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2098                             ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2099                                ret = 1;
2100                }
2101        }
2102
2103        return ret;
2104}
2105/**
2106 *  verify_xena_quiescence - Checks whether the H/W is ready
2107 *  @sp : private member of the device structure, which is a pointer to the
2108 *  s2io_nic structure.
2109 *  Description: Returns whether the H/W is ready to go or not. Depending
2110 *  on whether adapter enable bit was written or not the comparison
2111 *  differs and the calling function passes the input argument flag to
2112 *  indicate this.
2113 *  Return: 1 If xena is quiescence
2114 *          0 If Xena is not quiescence
2115 */
2116
2117static int verify_xena_quiescence(struct s2io_nic *sp)
2118{
2119        int  mode;
2120        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2121        u64 val64 = readq(&bar0->adapter_status);
2122        mode = s2io_verify_pci_mode(sp);
2123
2124        if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2125                DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2126                return 0;
2127        }
2128        if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2129                DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2130                return 0;
2131        }
2132        if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2133                DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2134                return 0;
2135        }
2136        if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2137                DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2138                return 0;
2139        }
2140        if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2141                DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2142                return 0;
2143        }
2144        if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2145                DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2146                return 0;
2147        }
2148        if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2149                DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2150                return 0;
2151        }
2152        if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2153                DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2154                return 0;
2155        }
2156
2157        /*
2158         * In PCI 33 mode, the P_PLL is not used, and therefore,
2159         * the the P_PLL_LOCK bit in the adapter_status register will
2160         * not be asserted.
2161         */
2162        if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2163            sp->device_type == XFRAME_II_DEVICE &&
2164            mode != PCI_MODE_PCI_33) {
2165                DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2166                return 0;
2167        }
2168        if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2169              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2170                DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2171                return 0;
2172        }
2173        return 1;
2174}
2175
2176/**
2177 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2178 * @sp: Pointer to device specifc structure
2179 * Description :
2180 * New procedure to clear mac address reading  problems on Alpha platforms
2181 *
2182 */
2183
2184static void fix_mac_address(struct s2io_nic *sp)
2185{
2186        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2187        int i = 0;
2188
2189        while (fix_mac[i] != END_SIGN) {
2190                writeq(fix_mac[i++], &bar0->gpio_control);
2191                udelay(10);
2192                (void) readq(&bar0->gpio_control);
2193        }
2194}
2195
2196/**
2197 *  start_nic - Turns the device on
2198 *  @nic : device private variable.
2199 *  Description:
2200 *  This function actually turns the device on. Before this  function is
2201 *  called,all Registers are configured from their reset states
2202 *  and shared memory is allocated but the NIC is still quiescent. On
2203 *  calling this function, the device interrupts are cleared and the NIC is
2204 *  literally switched on by writing into the adapter control register.
2205 *  Return Value:
2206 *  SUCCESS on success and -1 on failure.
2207 */
2208
2209static int start_nic(struct s2io_nic *nic)
2210{
2211        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2212        struct net_device *dev = nic->dev;
2213        register u64 val64 = 0;
2214        u16 subid, i;
2215        struct config_param *config = &nic->config;
2216        struct mac_info *mac_control = &nic->mac_control;
2217
2218        /*  PRC Initialization and configuration */
2219        for (i = 0; i < config->rx_ring_num; i++) {
2220                struct ring_info *ring = &mac_control->rings[i];
2221
2222                writeq((u64)ring->rx_blocks[0].block_dma_addr,
2223                       &bar0->prc_rxd0_n[i]);
2224
2225                val64 = readq(&bar0->prc_ctrl_n[i]);
2226                if (nic->rxd_mode == RXD_MODE_1)
2227                        val64 |= PRC_CTRL_RC_ENABLED;
2228                else
2229                        val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2230                if (nic->device_type == XFRAME_II_DEVICE)
2231                        val64 |= PRC_CTRL_GROUP_READS;
2232                val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2233                val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2234                writeq(val64, &bar0->prc_ctrl_n[i]);
2235        }
2236
2237        if (nic->rxd_mode == RXD_MODE_3B) {
2238                /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2239                val64 = readq(&bar0->rx_pa_cfg);
2240                val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2241                writeq(val64, &bar0->rx_pa_cfg);
2242        }
2243
2244        if (vlan_tag_strip == 0) {
2245                val64 = readq(&bar0->rx_pa_cfg);
2246                val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2247                writeq(val64, &bar0->rx_pa_cfg);
2248                nic->vlan_strip_flag = 0;
2249        }
2250
2251        /*
2252         * Enabling MC-RLDRAM. After enabling the device, we timeout
2253         * for around 100ms, which is approximately the time required
2254         * for the device to be ready for operation.
2255         */
2256        val64 = readq(&bar0->mc_rldram_mrs);
2257        val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2258        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2259        val64 = readq(&bar0->mc_rldram_mrs);
2260
2261        msleep(100);    /* Delay by around 100 ms. */
2262
2263        /* Enabling ECC Protection. */
2264        val64 = readq(&bar0->adapter_control);
2265        val64 &= ~ADAPTER_ECC_EN;
2266        writeq(val64, &bar0->adapter_control);
2267
2268        /*
2269         * Verify if the device is ready to be enabled, if so enable
2270         * it.
2271         */
2272        val64 = readq(&bar0->adapter_status);
2273        if (!verify_xena_quiescence(nic)) {
2274                DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2275                          "Adapter status reads: 0x%llx\n",
2276                          dev->name, (unsigned long long)val64);
2277                return FAILURE;
2278        }
2279
2280        /*
2281         * With some switches, link might be already up at this point.
2282         * Because of this weird behavior, when we enable laser,
2283         * we may not get link. We need to handle this. We cannot
2284         * figure out which switch is misbehaving. So we are forced to
2285         * make a global change.
2286         */
2287
2288        /* Enabling Laser. */
2289        val64 = readq(&bar0->adapter_control);
2290        val64 |= ADAPTER_EOI_TX_ON;
2291        writeq(val64, &bar0->adapter_control);
2292
2293        if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2294                /*
2295                 * Dont see link state interrupts initially on some switches,
2296                 * so directly scheduling the link state task here.
2297                 */
2298                schedule_work(&nic->set_link_task);
2299        }
2300        /* SXE-002: Initialize link and activity LED */
2301        subid = nic->pdev->subsystem_device;
2302        if (((subid & 0xFF) >= 0x07) &&
2303            (nic->device_type == XFRAME_I_DEVICE)) {
2304                val64 = readq(&bar0->gpio_control);
2305                val64 |= 0x0000800000000000ULL;
2306                writeq(val64, &bar0->gpio_control);
2307                val64 = 0x0411040400000000ULL;
2308                writeq(val64, (void __iomem *)bar0 + 0x2700);
2309        }
2310
2311        return SUCCESS;
2312}
2313/**
2314 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2315 * @fifo_data: fifo data pointer
2316 * @txdlp: descriptor
2317 * @get_off: unused
2318 */
2319static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2320                                        struct TxD *txdlp, int get_off)
2321{
2322        struct s2io_nic *nic = fifo_data->nic;
2323        struct sk_buff *skb;
2324        struct TxD *txds;
2325        u16 j, frg_cnt;
2326
2327        txds = txdlp;
2328        if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2329                dma_unmap_single(&nic->pdev->dev,
2330                                 (dma_addr_t)txds->Buffer_Pointer,
2331                                 sizeof(u64), DMA_TO_DEVICE);
2332                txds++;
2333        }
2334
2335        skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2336        if (!skb) {
2337                memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2338                return NULL;
2339        }
2340        dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2341                         skb_headlen(skb), DMA_TO_DEVICE);
2342        frg_cnt = skb_shinfo(skb)->nr_frags;
2343        if (frg_cnt) {
2344                txds++;
2345                for (j = 0; j < frg_cnt; j++, txds++) {
2346                        const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2347                        if (!txds->Buffer_Pointer)
2348                                break;
2349                        dma_unmap_page(&nic->pdev->dev,
2350                                       (dma_addr_t)txds->Buffer_Pointer,
2351                                       skb_frag_size(frag), DMA_TO_DEVICE);
2352                }
2353        }
2354        memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2355        return skb;
2356}
2357
2358/**
2359 *  free_tx_buffers - Free all queued Tx buffers
2360 *  @nic : device private variable.
2361 *  Description:
2362 *  Free all queued Tx buffers.
2363 *  Return Value: void
2364 */
2365
2366static void free_tx_buffers(struct s2io_nic *nic)
2367{
2368        struct net_device *dev = nic->dev;
2369        struct sk_buff *skb;
2370        struct TxD *txdp;
2371        int i, j;
2372        int cnt = 0;
2373        struct config_param *config = &nic->config;
2374        struct mac_info *mac_control = &nic->mac_control;
2375        struct stat_block *stats = mac_control->stats_info;
2376        struct swStat *swstats = &stats->sw_stat;
2377
2378        for (i = 0; i < config->tx_fifo_num; i++) {
2379                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2380                struct fifo_info *fifo = &mac_control->fifos[i];
2381                unsigned long flags;
2382
2383                spin_lock_irqsave(&fifo->tx_lock, flags);
2384                for (j = 0; j < tx_cfg->fifo_len; j++) {
2385                        txdp = fifo->list_info[j].list_virt_addr;
2386                        skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2387                        if (skb) {
2388                                swstats->mem_freed += skb->truesize;
2389                                dev_kfree_skb(skb);
2390                                cnt++;
2391                        }
2392                }
2393                DBG_PRINT(INTR_DBG,
2394                          "%s: forcibly freeing %d skbs on FIFO%d\n",
2395                          dev->name, cnt, i);
2396                fifo->tx_curr_get_info.offset = 0;
2397                fifo->tx_curr_put_info.offset = 0;
2398                spin_unlock_irqrestore(&fifo->tx_lock, flags);
2399        }
2400}
2401
2402/**
2403 *   stop_nic -  To stop the nic
2404 *   @nic : device private variable.
2405 *   Description:
2406 *   This function does exactly the opposite of what the start_nic()
2407 *   function does. This function is called to stop the device.
2408 *   Return Value:
2409 *   void.
2410 */
2411
2412static void stop_nic(struct s2io_nic *nic)
2413{
2414        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2415        register u64 val64 = 0;
2416        u16 interruptible;
2417
2418        /*  Disable all interrupts */
2419        en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2420        interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2421        interruptible |= TX_PIC_INTR;
2422        en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2423
2424        /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2425        val64 = readq(&bar0->adapter_control);
2426        val64 &= ~(ADAPTER_CNTL_EN);
2427        writeq(val64, &bar0->adapter_control);
2428}
2429
2430/**
2431 *  fill_rx_buffers - Allocates the Rx side skbs
2432 *  @nic : device private variable.
2433 *  @ring: per ring structure
2434 *  @from_card_up: If this is true, we will map the buffer to get
2435 *     the dma address for buf0 and buf1 to give it to the card.
2436 *     Else we will sync the already mapped buffer to give it to the card.
2437 *  Description:
2438 *  The function allocates Rx side skbs and puts the physical
2439 *  address of these buffers into the RxD buffer pointers, so that the NIC
2440 *  can DMA the received frame into these locations.
2441 *  The NIC supports 3 receive modes, viz
2442 *  1. single buffer,
2443 *  2. three buffer and
2444 *  3. Five buffer modes.
2445 *  Each mode defines how many fragments the received frame will be split
2446 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2447 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2448 *  is split into 3 fragments. As of now only single buffer mode is
2449 *  supported.
2450 *   Return Value:
2451 *  SUCCESS on success or an appropriate -ve value on failure.
2452 */
2453static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2454                           int from_card_up)
2455{
2456        struct sk_buff *skb;
2457        struct RxD_t *rxdp;
2458        int off, size, block_no, block_no1;
2459        u32 alloc_tab = 0;
2460        u32 alloc_cnt;
2461        u64 tmp;
2462        struct buffAdd *ba;
2463        struct RxD_t *first_rxdp = NULL;
2464        u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2465        struct RxD1 *rxdp1;
2466        struct RxD3 *rxdp3;
2467        struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2468
2469        alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2470
2471        block_no1 = ring->rx_curr_get_info.block_index;
2472        while (alloc_tab < alloc_cnt) {
2473                block_no = ring->rx_curr_put_info.block_index;
2474
2475                off = ring->rx_curr_put_info.offset;
2476
2477                rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2478
2479                if ((block_no == block_no1) &&
2480                    (off == ring->rx_curr_get_info.offset) &&
2481                    (rxdp->Host_Control)) {
2482                        DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2483                                  ring->dev->name);
2484                        goto end;
2485                }
2486                if (off && (off == ring->rxd_count)) {
2487                        ring->rx_curr_put_info.block_index++;
2488                        if (ring->rx_curr_put_info.block_index ==
2489                            ring->block_count)
2490                                ring->rx_curr_put_info.block_index = 0;
2491                        block_no = ring->rx_curr_put_info.block_index;
2492                        off = 0;
2493                        ring->rx_curr_put_info.offset = off;
2494                        rxdp = ring->rx_blocks[block_no].block_virt_addr;
2495                        DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2496                                  ring->dev->name, rxdp);
2497
2498                }
2499
2500                if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2501                    ((ring->rxd_mode == RXD_MODE_3B) &&
2502                     (rxdp->Control_2 & s2BIT(0)))) {
2503                        ring->rx_curr_put_info.offset = off;
2504                        goto end;
2505                }
2506                /* calculate size of skb based on ring mode */
2507                size = ring->mtu +
2508                        HEADER_ETHERNET_II_802_3_SIZE +
2509                        HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2510                if (ring->rxd_mode == RXD_MODE_1)
2511                        size += NET_IP_ALIGN;
2512                else
2513                        size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2514
2515                /* allocate skb */
2516                skb = netdev_alloc_skb(nic->dev, size);
2517                if (!skb) {
2518                        DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2519                                  ring->dev->name);
2520                        if (first_rxdp) {
2521                                dma_wmb();
2522                                first_rxdp->Control_1 |= RXD_OWN_XENA;
2523                        }
2524                        swstats->mem_alloc_fail_cnt++;
2525
2526                        return -ENOMEM ;
2527                }
2528                swstats->mem_allocated += skb->truesize;
2529
2530                if (ring->rxd_mode == RXD_MODE_1) {
2531                        /* 1 buffer mode - normal operation mode */
2532                        rxdp1 = (struct RxD1 *)rxdp;
2533                        memset(rxdp, 0, sizeof(struct RxD1));
2534                        skb_reserve(skb, NET_IP_ALIGN);
2535                        rxdp1->Buffer0_ptr =
2536                                dma_map_single(&ring->pdev->dev, skb->data,
2537                                               size - NET_IP_ALIGN,
2538                                               DMA_FROM_DEVICE);
2539                        if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2540                                goto pci_map_failed;
2541
2542                        rxdp->Control_2 =
2543                                SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2544                        rxdp->Host_Control = (unsigned long)skb;
2545                } else if (ring->rxd_mode == RXD_MODE_3B) {
2546                        /*
2547                         * 2 buffer mode -
2548                         * 2 buffer mode provides 128
2549                         * byte aligned receive buffers.
2550                         */
2551
2552                        rxdp3 = (struct RxD3 *)rxdp;
2553                        /* save buffer pointers to avoid frequent dma mapping */
2554                        Buffer0_ptr = rxdp3->Buffer0_ptr;
2555                        Buffer1_ptr = rxdp3->Buffer1_ptr;
2556                        memset(rxdp, 0, sizeof(struct RxD3));
2557                        /* restore the buffer pointers for dma sync*/
2558                        rxdp3->Buffer0_ptr = Buffer0_ptr;
2559                        rxdp3->Buffer1_ptr = Buffer1_ptr;
2560
2561                        ba = &ring->ba[block_no][off];
2562                        skb_reserve(skb, BUF0_LEN);
2563                        tmp = (u64)(unsigned long)skb->data;
2564                        tmp += ALIGN_SIZE;
2565                        tmp &= ~ALIGN_SIZE;
2566                        skb->data = (void *) (unsigned long)tmp;
2567                        skb_reset_tail_pointer(skb);
2568
2569                        if (from_card_up) {
2570                                rxdp3->Buffer0_ptr =
2571                                        dma_map_single(&ring->pdev->dev,
2572                                                       ba->ba_0, BUF0_LEN,
2573                                                       DMA_FROM_DEVICE);
2574                                if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2575                                        goto pci_map_failed;
2576                        } else
2577                                dma_sync_single_for_device(&ring->pdev->dev,
2578                                                           (dma_addr_t)rxdp3->Buffer0_ptr,
2579                                                           BUF0_LEN,
2580                                                           DMA_FROM_DEVICE);
2581
2582                        rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2583                        if (ring->rxd_mode == RXD_MODE_3B) {
2584                                /* Two buffer mode */
2585
2586                                /*
2587                                 * Buffer2 will have L3/L4 header plus
2588                                 * L4 payload
2589                                 */
2590                                rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2591                                                                    skb->data,
2592                                                                    ring->mtu + 4,
2593                                                                    DMA_FROM_DEVICE);
2594
2595                                if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2596                                        goto pci_map_failed;
2597
2598                                if (from_card_up) {
2599                                        rxdp3->Buffer1_ptr =
2600                                                dma_map_single(&ring->pdev->dev,
2601                                                               ba->ba_1,
2602                                                               BUF1_LEN,
2603                                                               DMA_FROM_DEVICE);
2604
2605                                        if (dma_mapping_error(&nic->pdev->dev,
2606                                                              rxdp3->Buffer1_ptr)) {
2607                                                dma_unmap_single(&ring->pdev->dev,
2608                                                                 (dma_addr_t)(unsigned long)
2609                                                                 skb->data,
2610                                                                 ring->mtu + 4,
2611                                                                 DMA_FROM_DEVICE);
2612                                                goto pci_map_failed;
2613                                        }
2614                                }
2615                                rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2616                                rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2617                                        (ring->mtu + 4);
2618                        }
2619                        rxdp->Control_2 |= s2BIT(0);
2620                        rxdp->Host_Control = (unsigned long) (skb);
2621                }
2622                if (alloc_tab & ((1 << rxsync_frequency) - 1))
2623                        rxdp->Control_1 |= RXD_OWN_XENA;
2624                off++;
2625                if (off == (ring->rxd_count + 1))
2626                        off = 0;
2627                ring->rx_curr_put_info.offset = off;
2628
2629                rxdp->Control_2 |= SET_RXD_MARKER;
2630                if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2631                        if (first_rxdp) {
2632                                dma_wmb();
2633                                first_rxdp->Control_1 |= RXD_OWN_XENA;
2634                        }
2635                        first_rxdp = rxdp;
2636                }
2637                ring->rx_bufs_left += 1;
2638                alloc_tab++;
2639        }
2640
2641end:
2642        /* Transfer ownership of first descriptor to adapter just before
2643         * exiting. Before that, use memory barrier so that ownership
2644         * and other fields are seen by adapter correctly.
2645         */
2646        if (first_rxdp) {
2647                dma_wmb();
2648                first_rxdp->Control_1 |= RXD_OWN_XENA;
2649        }
2650
2651        return SUCCESS;
2652
2653pci_map_failed:
2654        swstats->pci_map_fail_cnt++;
2655        swstats->mem_freed += skb->truesize;
2656        dev_kfree_skb_irq(skb);
2657        return -ENOMEM;
2658}
2659
2660static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2661{
2662        struct net_device *dev = sp->dev;
2663        int j;
2664        struct sk_buff *skb;
2665        struct RxD_t *rxdp;
2666        struct RxD1 *rxdp1;
2667        struct RxD3 *rxdp3;
2668        struct mac_info *mac_control = &sp->mac_control;
2669        struct stat_block *stats = mac_control->stats_info;
2670        struct swStat *swstats = &stats->sw_stat;
2671
2672        for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2673                rxdp = mac_control->rings[ring_no].
2674                        rx_blocks[blk].rxds[j].virt_addr;
2675                skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2676                if (!skb)
2677                        continue;
2678                if (sp->rxd_mode == RXD_MODE_1) {
2679                        rxdp1 = (struct RxD1 *)rxdp;
2680                        dma_unmap_single(&sp->pdev->dev,
2681                                         (dma_addr_t)rxdp1->Buffer0_ptr,
2682                                         dev->mtu +
2683                                         HEADER_ETHERNET_II_802_3_SIZE +
2684                                         HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2685                                         DMA_FROM_DEVICE);
2686                        memset(rxdp, 0, sizeof(struct RxD1));
2687                } else if (sp->rxd_mode == RXD_MODE_3B) {
2688                        rxdp3 = (struct RxD3 *)rxdp;
2689                        dma_unmap_single(&sp->pdev->dev,
2690                                         (dma_addr_t)rxdp3->Buffer0_ptr,
2691                                         BUF0_LEN, DMA_FROM_DEVICE);
2692                        dma_unmap_single(&sp->pdev->dev,
2693                                         (dma_addr_t)rxdp3->Buffer1_ptr,
2694                                         BUF1_LEN, DMA_FROM_DEVICE);
2695                        dma_unmap_single(&sp->pdev->dev,
2696                                         (dma_addr_t)rxdp3->Buffer2_ptr,
2697                                         dev->mtu + 4, DMA_FROM_DEVICE);
2698                        memset(rxdp, 0, sizeof(struct RxD3));
2699                }
2700                swstats->mem_freed += skb->truesize;
2701                dev_kfree_skb(skb);
2702                mac_control->rings[ring_no].rx_bufs_left -= 1;
2703        }
2704}
2705
2706/**
2707 *  free_rx_buffers - Frees all Rx buffers
2708 *  @sp: device private variable.
2709 *  Description:
2710 *  This function will free all Rx buffers allocated by host.
2711 *  Return Value:
2712 *  NONE.
2713 */
2714
2715static void free_rx_buffers(struct s2io_nic *sp)
2716{
2717        struct net_device *dev = sp->dev;
2718        int i, blk = 0, buf_cnt = 0;
2719        struct config_param *config = &sp->config;
2720        struct mac_info *mac_control = &sp->mac_control;
2721
2722        for (i = 0; i < config->rx_ring_num; i++) {
2723                struct ring_info *ring = &mac_control->rings[i];
2724
2725                for (blk = 0; blk < rx_ring_sz[i]; blk++)
2726                        free_rxd_blk(sp, i, blk);
2727
2728                ring->rx_curr_put_info.block_index = 0;
2729                ring->rx_curr_get_info.block_index = 0;
2730                ring->rx_curr_put_info.offset = 0;
2731                ring->rx_curr_get_info.offset = 0;
2732                ring->rx_bufs_left = 0;
2733                DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2734                          dev->name, buf_cnt, i);
2735        }
2736}
2737
2738static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2739{
2740        if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2741                DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2742                          ring->dev->name);
2743        }
2744        return 0;
2745}
2746
2747/**
2748 * s2io_poll_msix - Rx interrupt handler for NAPI support
2749 * @napi : pointer to the napi structure.
2750 * @budget : The number of packets that were budgeted to be processed
2751 * during  one pass through the 'Poll" function.
2752 * Description:
2753 * Comes into picture only if NAPI support has been incorporated. It does
2754 * the same thing that rx_intr_handler does, but not in a interrupt context
2755 * also It will process only a given number of packets.
2756 * Return value:
2757 * 0 on success and 1 if there are No Rx packets to be processed.
2758 */
2759
2760static int s2io_poll_msix(struct napi_struct *napi, int budget)
2761{
2762        struct ring_info *ring = container_of(napi, struct ring_info, napi);
2763        struct net_device *dev = ring->dev;
2764        int pkts_processed = 0;
2765        u8 __iomem *addr = NULL;
2766        u8 val8 = 0;
2767        struct s2io_nic *nic = netdev_priv(dev);
2768        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2769        int budget_org = budget;
2770
2771        if (unlikely(!is_s2io_card_up(nic)))
2772                return 0;
2773
2774        pkts_processed = rx_intr_handler(ring, budget);
2775        s2io_chk_rx_buffers(nic, ring);
2776
2777        if (pkts_processed < budget_org) {
2778                napi_complete_done(napi, pkts_processed);
2779                /*Re Enable MSI-Rx Vector*/
2780                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2781                addr += 7 - ring->ring_no;
2782                val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2783                writeb(val8, addr);
2784                val8 = readb(addr);
2785        }
2786        return pkts_processed;
2787}
2788
2789static int s2io_poll_inta(struct napi_struct *napi, int budget)
2790{
2791        struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2792        int pkts_processed = 0;
2793        int ring_pkts_processed, i;
2794        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795        int budget_org = budget;
2796        struct config_param *config = &nic->config;
2797        struct mac_info *mac_control = &nic->mac_control;
2798
2799        if (unlikely(!is_s2io_card_up(nic)))
2800                return 0;
2801
2802        for (i = 0; i < config->rx_ring_num; i++) {
2803                struct ring_info *ring = &mac_control->rings[i];
2804                ring_pkts_processed = rx_intr_handler(ring, budget);
2805                s2io_chk_rx_buffers(nic, ring);
2806                pkts_processed += ring_pkts_processed;
2807                budget -= ring_pkts_processed;
2808                if (budget <= 0)
2809                        break;
2810        }
2811        if (pkts_processed < budget_org) {
2812                napi_complete_done(napi, pkts_processed);
2813                /* Re enable the Rx interrupts for the ring */
2814                writeq(0, &bar0->rx_traffic_mask);
2815                readl(&bar0->rx_traffic_mask);
2816        }
2817        return pkts_processed;
2818}
2819
2820#ifdef CONFIG_NET_POLL_CONTROLLER
2821/**
2822 * s2io_netpoll - netpoll event handler entry point
2823 * @dev : pointer to the device structure.
2824 * Description:
2825 *      This function will be called by upper layer to check for events on the
2826 * interface in situations where interrupts are disabled. It is used for
2827 * specific in-kernel networking tasks, such as remote consoles and kernel
2828 * debugging over the network (example netdump in RedHat).
2829 */
2830static void s2io_netpoll(struct net_device *dev)
2831{
2832        struct s2io_nic *nic = netdev_priv(dev);
2833        const int irq = nic->pdev->irq;
2834        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835        u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2836        int i;
2837        struct config_param *config = &nic->config;
2838        struct mac_info *mac_control = &nic->mac_control;
2839
2840        if (pci_channel_offline(nic->pdev))
2841                return;
2842
2843        disable_irq(irq);
2844
2845        writeq(val64, &bar0->rx_traffic_int);
2846        writeq(val64, &bar0->tx_traffic_int);
2847
2848        /* we need to free up the transmitted skbufs or else netpoll will
2849         * run out of skbs and will fail and eventually netpoll application such
2850         * as netdump will fail.
2851         */
2852        for (i = 0; i < config->tx_fifo_num; i++)
2853                tx_intr_handler(&mac_control->fifos[i]);
2854
2855        /* check for received packet and indicate up to network */
2856        for (i = 0; i < config->rx_ring_num; i++) {
2857                struct ring_info *ring = &mac_control->rings[i];
2858
2859                rx_intr_handler(ring, 0);
2860        }
2861
2862        for (i = 0; i < config->rx_ring_num; i++) {
2863                struct ring_info *ring = &mac_control->rings[i];
2864
2865                if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2866                        DBG_PRINT(INFO_DBG,
2867                                  "%s: Out of memory in Rx Netpoll!!\n",
2868                                  dev->name);
2869                        break;
2870                }
2871        }
2872        enable_irq(irq);
2873}
2874#endif
2875
2876/**
2877 *  rx_intr_handler - Rx interrupt handler
2878 *  @ring_data: per ring structure.
2879 *  @budget: budget for napi processing.
2880 *  Description:
2881 *  If the interrupt is because of a received frame or if the
2882 *  receive ring contains fresh as yet un-processed frames,this function is
2883 *  called. It picks out the RxD at which place the last Rx processing had
2884 *  stopped and sends the skb to the OSM's Rx handler and then increments
2885 *  the offset.
2886 *  Return Value:
2887 *  No. of napi packets processed.
2888 */
2889static int rx_intr_handler(struct ring_info *ring_data, int budget)
2890{
2891        int get_block, put_block;
2892        struct rx_curr_get_info get_info, put_info;
2893        struct RxD_t *rxdp;
2894        struct sk_buff *skb;
2895        int pkt_cnt = 0, napi_pkts = 0;
2896        int i;
2897        struct RxD1 *rxdp1;
2898        struct RxD3 *rxdp3;
2899
2900        if (budget <= 0)
2901                return napi_pkts;
2902
2903        get_info = ring_data->rx_curr_get_info;
2904        get_block = get_info.block_index;
2905        memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2906        put_block = put_info.block_index;
2907        rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2908
2909        while (RXD_IS_UP2DT(rxdp)) {
2910                /*
2911                 * If your are next to put index then it's
2912                 * FIFO full condition
2913                 */
2914                if ((get_block == put_block) &&
2915                    (get_info.offset + 1) == put_info.offset) {
2916                        DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2917                                  ring_data->dev->name);
2918                        break;
2919                }
2920                skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2921                if (skb == NULL) {
2922                        DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2923                                  ring_data->dev->name);
2924                        return 0;
2925                }
2926                if (ring_data->rxd_mode == RXD_MODE_1) {
2927                        rxdp1 = (struct RxD1 *)rxdp;
2928                        dma_unmap_single(&ring_data->pdev->dev,
2929                                         (dma_addr_t)rxdp1->Buffer0_ptr,
2930                                         ring_data->mtu +
2931                                         HEADER_ETHERNET_II_802_3_SIZE +
2932                                         HEADER_802_2_SIZE +
2933                                         HEADER_SNAP_SIZE,
2934                                         DMA_FROM_DEVICE);
2935                } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2936                        rxdp3 = (struct RxD3 *)rxdp;
2937                        dma_sync_single_for_cpu(&ring_data->pdev->dev,
2938                                                (dma_addr_t)rxdp3->Buffer0_ptr,
2939                                                BUF0_LEN, DMA_FROM_DEVICE);
2940                        dma_unmap_single(&ring_data->pdev->dev,
2941                                         (dma_addr_t)rxdp3->Buffer2_ptr,
2942                                         ring_data->mtu + 4, DMA_FROM_DEVICE);
2943                }
2944                prefetch(skb->data);
2945                rx_osm_handler(ring_data, rxdp);
2946                get_info.offset++;
2947                ring_data->rx_curr_get_info.offset = get_info.offset;
2948                rxdp = ring_data->rx_blocks[get_block].
2949                        rxds[get_info.offset].virt_addr;
2950                if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2951                        get_info.offset = 0;
2952                        ring_data->rx_curr_get_info.offset = get_info.offset;
2953                        get_block++;
2954                        if (get_block == ring_data->block_count)
2955                                get_block = 0;
2956                        ring_data->rx_curr_get_info.block_index = get_block;
2957                        rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2958                }
2959
2960                if (ring_data->nic->config.napi) {
2961                        budget--;
2962                        napi_pkts++;
2963                        if (!budget)
2964                                break;
2965                }
2966                pkt_cnt++;
2967                if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2968                        break;
2969        }
2970        if (ring_data->lro) {
2971                /* Clear all LRO sessions before exiting */
2972                for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2973                        struct lro *lro = &ring_data->lro0_n[i];
2974                        if (lro->in_use) {
2975                                update_L3L4_header(ring_data->nic, lro);
2976                                queue_rx_frame(lro->parent, lro->vlan_tag);
2977                                clear_lro_session(lro);
2978                        }
2979                }
2980        }
2981        return napi_pkts;
2982}
2983
2984/**
2985 *  tx_intr_handler - Transmit interrupt handler
2986 *  @fifo_data : fifo data pointer
2987 *  Description:
2988 *  If an interrupt was raised to indicate DMA complete of the
2989 *  Tx packet, this function is called. It identifies the last TxD
2990 *  whose buffer was freed and frees all skbs whose data have already
2991 *  DMA'ed into the NICs internal memory.
2992 *  Return Value:
2993 *  NONE
2994 */
2995
2996static void tx_intr_handler(struct fifo_info *fifo_data)
2997{
2998        struct s2io_nic *nic = fifo_data->nic;
2999        struct tx_curr_get_info get_info, put_info;
3000        struct sk_buff *skb = NULL;
3001        struct TxD *txdlp;
3002        int pkt_cnt = 0;
3003        unsigned long flags = 0;
3004        u8 err_mask;
3005        struct stat_block *stats = nic->mac_control.stats_info;
3006        struct swStat *swstats = &stats->sw_stat;
3007
3008        if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3009                return;
3010
3011        get_info = fifo_data->tx_curr_get_info;
3012        memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3013        txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3014        while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3015               (get_info.offset != put_info.offset) &&
3016               (txdlp->Host_Control)) {
3017                /* Check for TxD errors */
3018                if (txdlp->Control_1 & TXD_T_CODE) {
3019                        unsigned long long err;
3020                        err = txdlp->Control_1 & TXD_T_CODE;
3021                        if (err & 0x1) {
3022                                swstats->parity_err_cnt++;
3023                        }
3024
3025                        /* update t_code statistics */
3026                        err_mask = err >> 48;
3027                        switch (err_mask) {
3028                        case 2:
3029                                swstats->tx_buf_abort_cnt++;
3030                                break;
3031
3032                        case 3:
3033                                swstats->tx_desc_abort_cnt++;
3034                                break;
3035
3036                        case 7:
3037                                swstats->tx_parity_err_cnt++;
3038                                break;
3039
3040                        case 10:
3041                                swstats->tx_link_loss_cnt++;
3042                                break;
3043
3044                        case 15:
3045                                swstats->tx_list_proc_err_cnt++;
3046                                break;
3047                        }
3048                }
3049
3050                skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3051                if (skb == NULL) {
3052                        spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3053                        DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3054                                  __func__);
3055                        return;
3056                }
3057                pkt_cnt++;
3058
3059                /* Updating the statistics block */
3060                swstats->mem_freed += skb->truesize;
3061                dev_consume_skb_irq(skb);
3062
3063                get_info.offset++;
3064                if (get_info.offset == get_info.fifo_len + 1)
3065                        get_info.offset = 0;
3066                txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3067                fifo_data->tx_curr_get_info.offset = get_info.offset;
3068        }
3069
3070        s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3071
3072        spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3073}
3074
3075/**
3076 *  s2io_mdio_write - Function to write in to MDIO registers
3077 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3078 *  @addr     : address value
3079 *  @value    : data value
3080 *  @dev      : pointer to net_device structure
3081 *  Description:
3082 *  This function is used to write values to the MDIO registers
3083 *  NONE
3084 */
3085static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3086                            struct net_device *dev)
3087{
3088        u64 val64;
3089        struct s2io_nic *sp = netdev_priv(dev);
3090        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3091
3092        /* address transaction */
3093        val64 = MDIO_MMD_INDX_ADDR(addr) |
3094                MDIO_MMD_DEV_ADDR(mmd_type) |
3095                MDIO_MMS_PRT_ADDR(0x0);
3096        writeq(val64, &bar0->mdio_control);
3097        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3098        writeq(val64, &bar0->mdio_control);
3099        udelay(100);
3100
3101        /* Data transaction */
3102        val64 = MDIO_MMD_INDX_ADDR(addr) |
3103                MDIO_MMD_DEV_ADDR(mmd_type) |
3104                MDIO_MMS_PRT_ADDR(0x0) |
3105                MDIO_MDIO_DATA(value) |
3106                MDIO_OP(MDIO_OP_WRITE_TRANS);
3107        writeq(val64, &bar0->mdio_control);
3108        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3109        writeq(val64, &bar0->mdio_control);
3110        udelay(100);
3111
3112        val64 = MDIO_MMD_INDX_ADDR(addr) |
3113                MDIO_MMD_DEV_ADDR(mmd_type) |
3114                MDIO_MMS_PRT_ADDR(0x0) |
3115                MDIO_OP(MDIO_OP_READ_TRANS);
3116        writeq(val64, &bar0->mdio_control);
3117        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3118        writeq(val64, &bar0->mdio_control);
3119        udelay(100);
3120}
3121
3122/**
3123 *  s2io_mdio_read - Function to write in to MDIO registers
3124 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3125 *  @addr     : address value
3126 *  @dev      : pointer to net_device structure
3127 *  Description:
3128 *  This function is used to read values to the MDIO registers
3129 *  NONE
3130 */
3131static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3132{
3133        u64 val64 = 0x0;
3134        u64 rval64 = 0x0;
3135        struct s2io_nic *sp = netdev_priv(dev);
3136        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3137
3138        /* address transaction */
3139        val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3140                         | MDIO_MMD_DEV_ADDR(mmd_type)
3141                         | MDIO_MMS_PRT_ADDR(0x0));
3142        writeq(val64, &bar0->mdio_control);
3143        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3144        writeq(val64, &bar0->mdio_control);
3145        udelay(100);
3146
3147        /* Data transaction */
3148        val64 = MDIO_MMD_INDX_ADDR(addr) |
3149                MDIO_MMD_DEV_ADDR(mmd_type) |
3150                MDIO_MMS_PRT_ADDR(0x0) |
3151                MDIO_OP(MDIO_OP_READ_TRANS);
3152        writeq(val64, &bar0->mdio_control);
3153        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154        writeq(val64, &bar0->mdio_control);
3155        udelay(100);
3156
3157        /* Read the value from regs */
3158        rval64 = readq(&bar0->mdio_control);
3159        rval64 = rval64 & 0xFFFF0000;
3160        rval64 = rval64 >> 16;
3161        return rval64;
3162}
3163
3164/**
3165 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3166 *  @counter      : counter value to be updated
3167 *  @regs_stat    : registers status
3168 *  @index        : index
3169 *  @flag         : flag to indicate the status
3170 *  @type         : counter type
3171 *  Description:
3172 *  This function is to check the status of the xpak counters value
3173 *  NONE
3174 */
3175
3176static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3177                                  u16 flag, u16 type)
3178{
3179        u64 mask = 0x3;
3180        u64 val64;
3181        int i;
3182        for (i = 0; i < index; i++)
3183                mask = mask << 0x2;
3184
3185        if (flag > 0) {
3186                *counter = *counter + 1;
3187                val64 = *regs_stat & mask;
3188                val64 = val64 >> (index * 0x2);
3189                val64 = val64 + 1;
3190                if (val64 == 3) {
3191                        switch (type) {
3192                        case 1:
3193                                DBG_PRINT(ERR_DBG,
3194                                          "Take Xframe NIC out of service.\n");
3195                                DBG_PRINT(ERR_DBG,
3196"Excessive temperatures may result in premature transceiver failure.\n");
3197                                break;
3198                        case 2:
3199                                DBG_PRINT(ERR_DBG,
3200                                          "Take Xframe NIC out of service.\n");
3201                                DBG_PRINT(ERR_DBG,
3202"Excessive bias currents may indicate imminent laser diode failure.\n");
3203                                break;
3204                        case 3:
3205                                DBG_PRINT(ERR_DBG,
3206                                          "Take Xframe NIC out of service.\n");
3207                                DBG_PRINT(ERR_DBG,
3208"Excessive laser output power may saturate far-end receiver.\n");
3209                                break;
3210                        default:
3211                                DBG_PRINT(ERR_DBG,
3212                                          "Incorrect XPAK Alarm type\n");
3213                        }
3214                        val64 = 0x0;
3215                }
3216                val64 = val64 << (index * 0x2);
3217                *regs_stat = (*regs_stat & (~mask)) | (val64);
3218
3219        } else {
3220                *regs_stat = *regs_stat & (~mask);
3221        }
3222}
3223
3224/**
3225 *  s2io_updt_xpak_counter - Function to update the xpak counters
3226 *  @dev         : pointer to net_device struct
3227 *  Description:
3228 *  This function is to upate the status of the xpak counters value
3229 *  NONE
3230 */
3231static void s2io_updt_xpak_counter(struct net_device *dev)
3232{
3233        u16 flag  = 0x0;
3234        u16 type  = 0x0;
3235        u16 val16 = 0x0;
3236        u64 val64 = 0x0;
3237        u64 addr  = 0x0;
3238
3239        struct s2io_nic *sp = netdev_priv(dev);
3240        struct stat_block *stats = sp->mac_control.stats_info;
3241        struct xpakStat *xstats = &stats->xpak_stat;
3242
3243        /* Check the communication with the MDIO slave */
3244        addr = MDIO_CTRL1;
3245        val64 = 0x0;
3246        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3247        if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3248                DBG_PRINT(ERR_DBG,
3249                          "ERR: MDIO slave access failed - Returned %llx\n",
3250                          (unsigned long long)val64);
3251                return;
3252        }
3253
3254        /* Check for the expected value of control reg 1 */
3255        if (val64 != MDIO_CTRL1_SPEED10G) {
3256                DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3257                          "Returned: %llx- Expected: 0x%x\n",
3258                          (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3259                return;
3260        }
3261
3262        /* Loading the DOM register to MDIO register */
3263        addr = 0xA100;
3264        s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3265        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266
3267        /* Reading the Alarm flags */
3268        addr = 0xA070;
3269        val64 = 0x0;
3270        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3271
3272        flag = CHECKBIT(val64, 0x7);
3273        type = 1;
3274        s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3275                              &xstats->xpak_regs_stat,
3276                              0x0, flag, type);
3277
3278        if (CHECKBIT(val64, 0x6))
3279                xstats->alarm_transceiver_temp_low++;
3280
3281        flag = CHECKBIT(val64, 0x3);
3282        type = 2;
3283        s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3284                              &xstats->xpak_regs_stat,
3285                              0x2, flag, type);
3286
3287        if (CHECKBIT(val64, 0x2))
3288                xstats->alarm_laser_bias_current_low++;
3289
3290        flag = CHECKBIT(val64, 0x1);
3291        type = 3;
3292        s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3293                              &xstats->xpak_regs_stat,
3294                              0x4, flag, type);
3295
3296        if (CHECKBIT(val64, 0x0))
3297                xstats->alarm_laser_output_power_low++;
3298
3299        /* Reading the Warning flags */
3300        addr = 0xA074;
3301        val64 = 0x0;
3302        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3303
3304        if (CHECKBIT(val64, 0x7))
3305                xstats->warn_transceiver_temp_high++;
3306
3307        if (CHECKBIT(val64, 0x6))
3308                xstats->warn_transceiver_temp_low++;
3309
3310        if (CHECKBIT(val64, 0x3))
3311                xstats->warn_laser_bias_current_high++;
3312
3313        if (CHECKBIT(val64, 0x2))
3314                xstats->warn_laser_bias_current_low++;
3315
3316        if (CHECKBIT(val64, 0x1))
3317                xstats->warn_laser_output_power_high++;
3318
3319        if (CHECKBIT(val64, 0x0))
3320                xstats->warn_laser_output_power_low++;
3321}
3322
3323/**
3324 *  wait_for_cmd_complete - waits for a command to complete.
3325 *  @addr: address
3326 *  @busy_bit: bit to check for busy
3327 *  @bit_state: state to check
3328 *  @may_sleep: parameter indicates if sleeping when waiting for
3329 *  command complete
3330 *  Description: Function that waits for a command to Write into RMAC
3331 *  ADDR DATA registers to be completed and returns either success or
3332 *  error depending on whether the command was complete or not.
3333 *  Return value:
3334 *   SUCCESS on success and FAILURE on failure.
3335 */
3336
3337static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3338                                 int bit_state, bool may_sleep)
3339{
3340        int ret = FAILURE, cnt = 0, delay = 1;
3341        u64 val64;
3342
3343        if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3344                return FAILURE;
3345
3346        do {
3347                val64 = readq(addr);
3348                if (bit_state == S2IO_BIT_RESET) {
3349                        if (!(val64 & busy_bit)) {
3350                                ret = SUCCESS;
3351                                break;
3352                        }
3353                } else {
3354                        if (val64 & busy_bit) {
3355                                ret = SUCCESS;
3356                                break;
3357                        }
3358                }
3359
3360                if (!may_sleep)
3361                        mdelay(delay);
3362                else
3363                        msleep(delay);
3364
3365                if (++cnt >= 10)
3366                        delay = 50;
3367        } while (cnt < 20);
3368        return ret;
3369}
3370/**
3371 * check_pci_device_id - Checks if the device id is supported
3372 * @id : device id
3373 * Description: Function to check if the pci device id is supported by driver.
3374 * Return value: Actual device id if supported else PCI_ANY_ID
3375 */
3376static u16 check_pci_device_id(u16 id)
3377{
3378        switch (id) {
3379        case PCI_DEVICE_ID_HERC_WIN:
3380        case PCI_DEVICE_ID_HERC_UNI:
3381                return XFRAME_II_DEVICE;
3382        case PCI_DEVICE_ID_S2IO_UNI:
3383        case PCI_DEVICE_ID_S2IO_WIN:
3384                return XFRAME_I_DEVICE;
3385        default:
3386                return PCI_ANY_ID;
3387        }
3388}
3389
3390/**
3391 *  s2io_reset - Resets the card.
3392 *  @sp : private member of the device structure.
3393 *  Description: Function to Reset the card. This function then also
3394 *  restores the previously saved PCI configuration space registers as
3395 *  the card reset also resets the configuration space.
3396 *  Return value:
3397 *  void.
3398 */
3399
3400static void s2io_reset(struct s2io_nic *sp)
3401{
3402        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3403        u64 val64;
3404        u16 subid, pci_cmd;
3405        int i;
3406        u16 val16;
3407        unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3408        unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3409        struct stat_block *stats;
3410        struct swStat *swstats;
3411
3412        DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3413                  __func__, pci_name(sp->pdev));
3414
3415        /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3416        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3417
3418        val64 = SW_RESET_ALL;
3419        writeq(val64, &bar0->sw_reset);
3420        if (strstr(sp->product_name, "CX4"))
3421                msleep(750);
3422        msleep(250);
3423        for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3424
3425                /* Restore the PCI state saved during initialization. */
3426                pci_restore_state(sp->pdev);
3427                pci_save_state(sp->pdev);
3428                pci_read_config_word(sp->pdev, 0x2, &val16);
3429                if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3430                        break;
3431                msleep(200);
3432        }
3433
3434        if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3435                DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3436
3437        pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3438
3439        s2io_init_pci(sp);
3440
3441        /* Set swapper to enable I/O register access */
3442        s2io_set_swapper(sp);
3443
3444        /* restore mac_addr entries */
3445        do_s2io_restore_unicast_mc(sp);
3446
3447        /* Restore the MSIX table entries from local variables */
3448        restore_xmsi_data(sp);
3449
3450        /* Clear certain PCI/PCI-X fields after reset */
3451        if (sp->device_type == XFRAME_II_DEVICE) {
3452                /* Clear "detected parity error" bit */
3453                pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3454
3455                /* Clearing PCIX Ecc status register */
3456                pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3457
3458                /* Clearing PCI_STATUS error reflected here */
3459                writeq(s2BIT(62), &bar0->txpic_int_reg);
3460        }
3461
3462        /* Reset device statistics maintained by OS */
3463        memset(&sp->stats, 0, sizeof(struct net_device_stats));
3464
3465        stats = sp->mac_control.stats_info;
3466        swstats = &stats->sw_stat;
3467
3468        /* save link up/down time/cnt, reset/memory/watchdog cnt */
3469        up_cnt = swstats->link_up_cnt;
3470        down_cnt = swstats->link_down_cnt;
3471        up_time = swstats->link_up_time;
3472        down_time = swstats->link_down_time;
3473        reset_cnt = swstats->soft_reset_cnt;
3474        mem_alloc_cnt = swstats->mem_allocated;
3475        mem_free_cnt = swstats->mem_freed;
3476        watchdog_cnt = swstats->watchdog_timer_cnt;
3477
3478        memset(stats, 0, sizeof(struct stat_block));
3479
3480        /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3481        swstats->link_up_cnt = up_cnt;
3482        swstats->link_down_cnt = down_cnt;
3483        swstats->link_up_time = up_time;
3484        swstats->link_down_time = down_time;
3485        swstats->soft_reset_cnt = reset_cnt;
3486        swstats->mem_allocated = mem_alloc_cnt;
3487        swstats->mem_freed = mem_free_cnt;
3488        swstats->watchdog_timer_cnt = watchdog_cnt;
3489
3490        /* SXE-002: Configure link and activity LED to turn it off */
3491        subid = sp->pdev->subsystem_device;
3492        if (((subid & 0xFF) >= 0x07) &&
3493            (sp->device_type == XFRAME_I_DEVICE)) {
3494                val64 = readq(&bar0->gpio_control);
3495                val64 |= 0x0000800000000000ULL;
3496                writeq(val64, &bar0->gpio_control);
3497                val64 = 0x0411040400000000ULL;
3498                writeq(val64, (void __iomem *)bar0 + 0x2700);
3499        }
3500
3501        /*
3502         * Clear spurious ECC interrupts that would have occurred on
3503         * XFRAME II cards after reset.
3504         */
3505        if (sp->device_type == XFRAME_II_DEVICE) {
3506                val64 = readq(&bar0->pcc_err_reg);
3507                writeq(val64, &bar0->pcc_err_reg);
3508        }
3509
3510        sp->device_enabled_once = false;
3511}
3512
3513/**
3514 *  s2io_set_swapper - to set the swapper controle on the card
3515 *  @sp : private member of the device structure,
3516 *  pointer to the s2io_nic structure.
3517 *  Description: Function to set the swapper control on the card
3518 *  correctly depending on the 'endianness' of the system.
3519 *  Return value:
3520 *  SUCCESS on success and FAILURE on failure.
3521 */
3522
3523static int s2io_set_swapper(struct s2io_nic *sp)
3524{
3525        struct net_device *dev = sp->dev;
3526        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3527        u64 val64, valt, valr;
3528
3529        /*
3530         * Set proper endian settings and verify the same by reading
3531         * the PIF Feed-back register.
3532         */
3533
3534        val64 = readq(&bar0->pif_rd_swapper_fb);
3535        if (val64 != 0x0123456789ABCDEFULL) {
3536                int i = 0;
3537                static const u64 value[] = {
3538                        0xC30000C3C30000C3ULL,  /* FE=1, SE=1 */
3539                        0x8100008181000081ULL,  /* FE=1, SE=0 */
3540                        0x4200004242000042ULL,  /* FE=0, SE=1 */
3541                        0                       /* FE=0, SE=0 */
3542                };
3543
3544                while (i < 4) {
3545                        writeq(value[i], &bar0->swapper_ctrl);
3546                        val64 = readq(&bar0->pif_rd_swapper_fb);
3547                        if (val64 == 0x0123456789ABCDEFULL)
3548                                break;
3549                        i++;
3550                }
3551                if (i == 4) {
3552                        DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3553                                  "feedback read %llx\n",
3554                                  dev->name, (unsigned long long)val64);
3555                        return FAILURE;
3556                }
3557                valr = value[i];
3558        } else {
3559                valr = readq(&bar0->swapper_ctrl);
3560        }
3561
3562        valt = 0x0123456789ABCDEFULL;
3563        writeq(valt, &bar0->xmsi_address);
3564        val64 = readq(&bar0->xmsi_address);
3565
3566        if (val64 != valt) {
3567                int i = 0;
3568                static const u64 value[] = {
3569                        0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3570                        0x0081810000818100ULL,  /* FE=1, SE=0 */
3571                        0x0042420000424200ULL,  /* FE=0, SE=1 */
3572                        0                       /* FE=0, SE=0 */
3573                };
3574
3575                while (i < 4) {
3576                        writeq((value[i] | valr), &bar0->swapper_ctrl);
3577                        writeq(valt, &bar0->xmsi_address);
3578                        val64 = readq(&bar0->xmsi_address);
3579                        if (val64 == valt)
3580                                break;
3581                        i++;
3582                }
3583                if (i == 4) {
3584                        unsigned long long x = val64;
3585                        DBG_PRINT(ERR_DBG,
3586                                  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3587                        return FAILURE;
3588                }
3589        }
3590        val64 = readq(&bar0->swapper_ctrl);
3591        val64 &= 0xFFFF000000000000ULL;
3592
3593#ifdef __BIG_ENDIAN
3594        /*
3595         * The device by default set to a big endian format, so a
3596         * big endian driver need not set anything.
3597         */
3598        val64 |= (SWAPPER_CTRL_TXP_FE |
3599                  SWAPPER_CTRL_TXP_SE |
3600                  SWAPPER_CTRL_TXD_R_FE |
3601                  SWAPPER_CTRL_TXD_W_FE |
3602                  SWAPPER_CTRL_TXF_R_FE |
3603                  SWAPPER_CTRL_RXD_R_FE |
3604                  SWAPPER_CTRL_RXD_W_FE |
3605                  SWAPPER_CTRL_RXF_W_FE |
3606                  SWAPPER_CTRL_XMSI_FE |
3607                  SWAPPER_CTRL_STATS_FE |
3608                  SWAPPER_CTRL_STATS_SE);
3609        if (sp->config.intr_type == INTA)
3610                val64 |= SWAPPER_CTRL_XMSI_SE;
3611        writeq(val64, &bar0->swapper_ctrl);
3612#else
3613        /*
3614         * Initially we enable all bits to make it accessible by the
3615         * driver, then we selectively enable only those bits that
3616         * we want to set.
3617         */
3618        val64 |= (SWAPPER_CTRL_TXP_FE |
3619                  SWAPPER_CTRL_TXP_SE |
3620                  SWAPPER_CTRL_TXD_R_FE |
3621                  SWAPPER_CTRL_TXD_R_SE |
3622                  SWAPPER_CTRL_TXD_W_FE |
3623                  SWAPPER_CTRL_TXD_W_SE |
3624                  SWAPPER_CTRL_TXF_R_FE |
3625                  SWAPPER_CTRL_RXD_R_FE |
3626                  SWAPPER_CTRL_RXD_R_SE |
3627                  SWAPPER_CTRL_RXD_W_FE |
3628                  SWAPPER_CTRL_RXD_W_SE |
3629                  SWAPPER_CTRL_RXF_W_FE |
3630                  SWAPPER_CTRL_XMSI_FE |
3631                  SWAPPER_CTRL_STATS_FE |
3632                  SWAPPER_CTRL_STATS_SE);
3633        if (sp->config.intr_type == INTA)
3634                val64 |= SWAPPER_CTRL_XMSI_SE;
3635        writeq(val64, &bar0->swapper_ctrl);
3636#endif
3637        val64 = readq(&bar0->swapper_ctrl);
3638
3639        /*
3640         * Verifying if endian settings are accurate by reading a
3641         * feedback register.
3642         */
3643        val64 = readq(&bar0->pif_rd_swapper_fb);
3644        if (val64 != 0x0123456789ABCDEFULL) {
3645                /* Endian settings are incorrect, calls for another dekko. */
3646                DBG_PRINT(ERR_DBG,
3647                          "%s: Endian settings are wrong, feedback read %llx\n",
3648                          dev->name, (unsigned long long)val64);
3649                return FAILURE;
3650        }
3651
3652        return SUCCESS;
3653}
3654
3655static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3656{
3657        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3658        u64 val64;
3659        int ret = 0, cnt = 0;
3660
3661        do {
3662                val64 = readq(&bar0->xmsi_access);
3663                if (!(val64 & s2BIT(15)))
3664                        break;
3665                mdelay(1);
3666                cnt++;
3667        } while (cnt < 5);
3668        if (cnt == 5) {
3669                DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3670                ret = 1;
3671        }
3672
3673        return ret;
3674}
3675
3676static void restore_xmsi_data(struct s2io_nic *nic)
3677{
3678        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3679        u64 val64;
3680        int i, msix_index;
3681
3682        if (nic->device_type == XFRAME_I_DEVICE)
3683                return;
3684
3685        for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3686                msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3687                writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3688                writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3689                val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3690                writeq(val64, &bar0->xmsi_access);
3691                if (wait_for_msix_trans(nic, msix_index))
3692                        DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3693                                  __func__, msix_index);
3694        }
3695}
3696
3697static void store_xmsi_data(struct s2io_nic *nic)
3698{
3699        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3700        u64 val64, addr, data;
3701        int i, msix_index;
3702
3703        if (nic->device_type == XFRAME_I_DEVICE)
3704                return;
3705
3706        /* Store and display */
3707        for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3708                msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3709                val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3710                writeq(val64, &bar0->xmsi_access);
3711                if (wait_for_msix_trans(nic, msix_index)) {
3712                        DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3713                                  __func__, msix_index);
3714                        continue;
3715                }
3716                addr = readq(&bar0->xmsi_address);
3717                data = readq(&bar0->xmsi_data);
3718                if (addr && data) {
3719                        nic->msix_info[i].addr = addr;
3720                        nic->msix_info[i].data = data;
3721                }
3722        }
3723}
3724
3725static int s2io_enable_msi_x(struct s2io_nic *nic)
3726{
3727        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3728        u64 rx_mat;
3729        u16 msi_control; /* Temp variable */
3730        int ret, i, j, msix_indx = 1;
3731        int size;
3732        struct stat_block *stats = nic->mac_control.stats_info;
3733        struct swStat *swstats = &stats->sw_stat;
3734
3735        size = nic->num_entries * sizeof(struct msix_entry);
3736        nic->entries = kzalloc(size, GFP_KERNEL);
3737        if (!nic->entries) {
3738                DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3739                          __func__);
3740                swstats->mem_alloc_fail_cnt++;
3741                return -ENOMEM;
3742        }
3743        swstats->mem_allocated += size;
3744
3745        size = nic->num_entries * sizeof(struct s2io_msix_entry);
3746        nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3747        if (!nic->s2io_entries) {
3748                DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3749                          __func__);
3750                swstats->mem_alloc_fail_cnt++;
3751                kfree(nic->entries);
3752                swstats->mem_freed
3753                        += (nic->num_entries * sizeof(struct msix_entry));
3754                return -ENOMEM;
3755        }
3756        swstats->mem_allocated += size;
3757
3758        nic->entries[0].entry = 0;
3759        nic->s2io_entries[0].entry = 0;
3760        nic->s2io_entries[0].in_use = MSIX_FLG;
3761        nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3762        nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3763
3764        for (i = 1; i < nic->num_entries; i++) {
3765                nic->entries[i].entry = ((i - 1) * 8) + 1;
3766                nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3767                nic->s2io_entries[i].arg = NULL;
3768                nic->s2io_entries[i].in_use = 0;
3769        }
3770
3771        rx_mat = readq(&bar0->rx_mat);
3772        for (j = 0; j < nic->config.rx_ring_num; j++) {
3773                rx_mat |= RX_MAT_SET(j, msix_indx);
3774                nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3775                nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3776                nic->s2io_entries[j+1].in_use = MSIX_FLG;
3777                msix_indx += 8;
3778        }
3779        writeq(rx_mat, &bar0->rx_mat);
3780        readq(&bar0->rx_mat);
3781
3782        ret = pci_enable_msix_range(nic->pdev, nic->entries,
3783                                    nic->num_entries, nic->num_entries);
3784        /* We fail init if error or we get less vectors than min required */
3785        if (ret < 0) {
3786                DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3787                kfree(nic->entries);
3788                swstats->mem_freed += nic->num_entries *
3789                        sizeof(struct msix_entry);
3790                kfree(nic->s2io_entries);
3791                swstats->mem_freed += nic->num_entries *
3792                        sizeof(struct s2io_msix_entry);
3793                nic->entries = NULL;
3794                nic->s2io_entries = NULL;
3795                return -ENOMEM;
3796        }
3797
3798        /*
3799         * To enable MSI-X, MSI also needs to be enabled, due to a bug
3800         * in the herc NIC. (Temp change, needs to be removed later)
3801         */
3802        pci_read_config_word(nic->pdev, 0x42, &msi_control);
3803        msi_control |= 0x1; /* Enable MSI */
3804        pci_write_config_word(nic->pdev, 0x42, msi_control);
3805
3806        return 0;
3807}
3808
3809/* Handle software interrupt used during MSI(X) test */
3810static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3811{
3812        struct s2io_nic *sp = dev_id;
3813
3814        sp->msi_detected = 1;
3815        wake_up(&sp->msi_wait);
3816
3817        return IRQ_HANDLED;
3818}
3819
3820/* Test interrupt path by forcing a a software IRQ */
3821static int s2io_test_msi(struct s2io_nic *sp)
3822{
3823        struct pci_dev *pdev = sp->pdev;
3824        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3825        int err;
3826        u64 val64, saved64;
3827
3828        err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3829                          sp->name, sp);
3830        if (err) {
3831                DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3832                          sp->dev->name, pci_name(pdev), pdev->irq);
3833                return err;
3834        }
3835
3836        init_waitqueue_head(&sp->msi_wait);
3837        sp->msi_detected = 0;
3838
3839        saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3840        val64 |= SCHED_INT_CTRL_ONE_SHOT;
3841        val64 |= SCHED_INT_CTRL_TIMER_EN;
3842        val64 |= SCHED_INT_CTRL_INT2MSI(1);
3843        writeq(val64, &bar0->scheduled_int_ctrl);
3844
3845        wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3846
3847        if (!sp->msi_detected) {
3848                /* MSI(X) test failed, go back to INTx mode */
3849                DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3850                          "using MSI(X) during test\n",
3851                          sp->dev->name, pci_name(pdev));
3852
3853                err = -EOPNOTSUPP;
3854        }
3855
3856        free_irq(sp->entries[1].vector, sp);
3857
3858        writeq(saved64, &bar0->scheduled_int_ctrl);
3859
3860        return err;
3861}
3862
3863static void remove_msix_isr(struct s2io_nic *sp)
3864{
3865        int i;
3866        u16 msi_control;
3867
3868        for (i = 0; i < sp->num_entries; i++) {
3869                if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3870                        int vector = sp->entries[i].vector;
3871                        void *arg = sp->s2io_entries[i].arg;
3872                        free_irq(vector, arg);
3873                }
3874        }
3875
3876        kfree(sp->entries);
3877        kfree(sp->s2io_entries);
3878        sp->entries = NULL;
3879        sp->s2io_entries = NULL;
3880
3881        pci_read_config_word(sp->pdev, 0x42, &msi_control);
3882        msi_control &= 0xFFFE; /* Disable MSI */
3883        pci_write_config_word(sp->pdev, 0x42, msi_control);
3884
3885        pci_disable_msix(sp->pdev);
3886}
3887
3888static void remove_inta_isr(struct s2io_nic *sp)
3889{
3890        free_irq(sp->pdev->irq, sp->dev);
3891}
3892
3893/* ********************************************************* *
3894 * Functions defined below concern the OS part of the driver *
3895 * ********************************************************* */
3896
3897/**
3898 *  s2io_open - open entry point of the driver
3899 *  @dev : pointer to the device structure.
3900 *  Description:
3901 *  This function is the open entry point of the driver. It mainly calls a
3902 *  function to allocate Rx buffers and inserts them into the buffer
3903 *  descriptors and then enables the Rx part of the NIC.
3904 *  Return value:
3905 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3906 *   file on failure.
3907 */
3908
3909static int s2io_open(struct net_device *dev)
3910{
3911        struct s2io_nic *sp = netdev_priv(dev);
3912        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3913        int err = 0;
3914
3915        /*
3916         * Make sure you have link off by default every time
3917         * Nic is initialized
3918         */
3919        netif_carrier_off(dev);
3920        sp->last_link_state = 0;
3921
3922        /* Initialize H/W and enable interrupts */
3923        err = s2io_card_up(sp);
3924        if (err) {
3925                DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3926                          dev->name);
3927                goto hw_init_failed;
3928        }
3929
3930        if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3931                DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3932                s2io_card_down(sp);
3933                err = -ENODEV;
3934                goto hw_init_failed;
3935        }
3936        s2io_start_all_tx_queue(sp);
3937        return 0;
3938
3939hw_init_failed:
3940        if (sp->config.intr_type == MSI_X) {
3941                if (sp->entries) {
3942                        kfree(sp->entries);
3943                        swstats->mem_freed += sp->num_entries *
3944                                sizeof(struct msix_entry);
3945                }
3946                if (sp->s2io_entries) {
3947                        kfree(sp->s2io_entries);
3948                        swstats->mem_freed += sp->num_entries *
3949                                sizeof(struct s2io_msix_entry);
3950                }
3951        }
3952        return err;
3953}
3954
3955/**
3956 *  s2io_close -close entry point of the driver
3957 *  @dev : device pointer.
3958 *  Description:
3959 *  This is the stop entry point of the driver. It needs to undo exactly
3960 *  whatever was done by the open entry point,thus it's usually referred to
3961 *  as the close function.Among other things this function mainly stops the
3962 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3963 *  Return value:
3964 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3965 *  file on failure.
3966 */
3967
3968static int s2io_close(struct net_device *dev)
3969{
3970        struct s2io_nic *sp = netdev_priv(dev);
3971        struct config_param *config = &sp->config;
3972        u64 tmp64;
3973        int offset;
3974
3975        /* Return if the device is already closed               *
3976         *  Can happen when s2io_card_up failed in change_mtu    *
3977         */
3978        if (!is_s2io_card_up(sp))
3979                return 0;
3980
3981        s2io_stop_all_tx_queue(sp);
3982        /* delete all populated mac entries */
3983        for (offset = 1; offset < config->max_mc_addr; offset++) {
3984                tmp64 = do_s2io_read_unicast_mc(sp, offset);
3985                if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3986                        do_s2io_delete_unicast_mc(sp, tmp64);
3987        }
3988
3989        s2io_card_down(sp);
3990
3991        return 0;
3992}
3993
3994/**
3995 *  s2io_xmit - Tx entry point of te driver
3996 *  @skb : the socket buffer containing the Tx data.
3997 *  @dev : device pointer.
3998 *  Description :
3999 *  This function is the Tx entry point of the driver. S2IO NIC supports
4000 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4001 *  NOTE: when device can't queue the pkt,just the trans_start variable will
4002 *  not be upadted.
4003 *  Return value:
4004 *  0 on success & 1 on failure.
4005 */
4006
4007static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4008{
4009        struct s2io_nic *sp = netdev_priv(dev);
4010        u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4011        register u64 val64;
4012        struct TxD *txdp;
4013        struct TxFIFO_element __iomem *tx_fifo;
4014        unsigned long flags = 0;
4015        u16 vlan_tag = 0;
4016        struct fifo_info *fifo = NULL;
4017        int offload_type;
4018        int enable_per_list_interrupt = 0;
4019        struct config_param *config = &sp->config;
4020        struct mac_info *mac_control = &sp->mac_control;
4021        struct stat_block *stats = mac_control->stats_info;
4022        struct swStat *swstats = &stats->sw_stat;
4023
4024        DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4025
4026        if (unlikely(skb->len <= 0)) {
4027                DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4028                dev_kfree_skb_any(skb);
4029                return NETDEV_TX_OK;
4030        }
4031
4032        if (!is_s2io_card_up(sp)) {
4033                DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4034                          dev->name);
4035                dev_kfree_skb_any(skb);
4036                return NETDEV_TX_OK;
4037        }
4038
4039        queue = 0;
4040        if (skb_vlan_tag_present(skb))
4041                vlan_tag = skb_vlan_tag_get(skb);
4042        if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4043                if (skb->protocol == htons(ETH_P_IP)) {
4044                        struct iphdr *ip;
4045                        struct tcphdr *th;
4046                        ip = ip_hdr(skb);
4047
4048                        if (!ip_is_fragment(ip)) {
4049                                th = (struct tcphdr *)(((unsigned char *)ip) +
4050                                                       ip->ihl*4);
4051
4052                                if (ip->protocol == IPPROTO_TCP) {
4053                                        queue_len = sp->total_tcp_fifos;
4054                                        queue = (ntohs(th->source) +
4055                                                 ntohs(th->dest)) &
4056                                                sp->fifo_selector[queue_len - 1];
4057                                        if (queue >= queue_len)
4058                                                queue = queue_len - 1;
4059                                } else if (ip->protocol == IPPROTO_UDP) {
4060                                        queue_len = sp->total_udp_fifos;
4061                                        queue = (ntohs(th->source) +
4062                                                 ntohs(th->dest)) &
4063                                                sp->fifo_selector[queue_len - 1];
4064                                        if (queue >= queue_len)
4065                                                queue = queue_len - 1;
4066                                        queue += sp->udp_fifo_idx;
4067                                        if (skb->len > 1024)
4068                                                enable_per_list_interrupt = 1;
4069                                }
4070                        }
4071                }
4072        } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4073                /* get fifo number based on skb->priority value */
4074                queue = config->fifo_mapping
4075                        [skb->priority & (MAX_TX_FIFOS - 1)];
4076        fifo = &mac_control->fifos[queue];
4077
4078        spin_lock_irqsave(&fifo->tx_lock, flags);
4079
4080        if (sp->config.multiq) {
4081                if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4082                        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4083                        return NETDEV_TX_BUSY;
4084                }
4085        } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4086                if (netif_queue_stopped(dev)) {
4087                        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4088                        return NETDEV_TX_BUSY;
4089                }
4090        }
4091
4092        put_off = (u16)fifo->tx_curr_put_info.offset;
4093        get_off = (u16)fifo->tx_curr_get_info.offset;
4094        txdp = fifo->list_info[put_off].list_virt_addr;
4095
4096        queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4097        /* Avoid "put" pointer going beyond "get" pointer */
4098        if (txdp->Host_Control ||
4099            ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4100                DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4101                s2io_stop_tx_queue(sp, fifo->fifo_no);
4102                dev_kfree_skb_any(skb);
4103                spin_unlock_irqrestore(&fifo->tx_lock, flags);
4104                return NETDEV_TX_OK;
4105        }
4106
4107        offload_type = s2io_offload_type(skb);
4108        if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4109                txdp->Control_1 |= TXD_TCP_LSO_EN;
4110                txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4111        }
4112        if (skb->ip_summed == CHECKSUM_PARTIAL) {
4113                txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4114                                    TXD_TX_CKO_TCP_EN |
4115                                    TXD_TX_CKO_UDP_EN);
4116        }
4117        txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4118        txdp->Control_1 |= TXD_LIST_OWN_XENA;
4119        txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4120        if (enable_per_list_interrupt)
4121                if (put_off & (queue_len >> 5))
4122                        txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4123        if (vlan_tag) {
4124                txdp->Control_2 |= TXD_VLAN_ENABLE;
4125                txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4126        }
4127
4128        frg_len = skb_headlen(skb);
4129        txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4130                                              frg_len, DMA_TO_DEVICE);
4131        if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4132                goto pci_map_failed;
4133
4134        txdp->Host_Control = (unsigned long)skb;
4135        txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4136
4137        frg_cnt = skb_shinfo(skb)->nr_frags;
4138        /* For fragmented SKB. */
4139        for (i = 0; i < frg_cnt; i++) {
4140                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4141                /* A '0' length fragment will be ignored */
4142                if (!skb_frag_size(frag))
4143                        continue;
4144                txdp++;
4145                txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4146                                                             frag, 0,
4147                                                             skb_frag_size(frag),
4148                                                             DMA_TO_DEVICE);
4149                txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4150        }
4151        txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4152
4153        tx_fifo = mac_control->tx_FIFO_start[queue];
4154        val64 = fifo->list_info[put_off].list_phy_addr;
4155        writeq(val64, &tx_fifo->TxDL_Pointer);
4156
4157        val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4158                 TX_FIFO_LAST_LIST);
4159        if (offload_type)
4160                val64 |= TX_FIFO_SPECIAL_FUNC;
4161
4162        writeq(val64, &tx_fifo->List_Control);
4163
4164        put_off++;
4165        if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4166                put_off = 0;
4167        fifo->tx_curr_put_info.offset = put_off;
4168
4169        /* Avoid "put" pointer going beyond "get" pointer */
4170        if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4171                swstats->fifo_full_cnt++;
4172                DBG_PRINT(TX_DBG,
4173                          "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4174                          put_off, get_off);
4175                s2io_stop_tx_queue(sp, fifo->fifo_no);
4176        }
4177        swstats->mem_allocated += skb->truesize;
4178        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4179
4180        if (sp->config.intr_type == MSI_X)
4181                tx_intr_handler(fifo);
4182
4183        return NETDEV_TX_OK;
4184
4185pci_map_failed:
4186        swstats->pci_map_fail_cnt++;
4187        s2io_stop_tx_queue(sp, fifo->fifo_no);
4188        swstats->mem_freed += skb->truesize;
4189        dev_kfree_skb_any(skb);
4190        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4191        return NETDEV_TX_OK;
4192}
4193
4194static void
4195s2io_alarm_handle(struct timer_list *t)
4196{
4197        struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4198        struct net_device *dev = sp->dev;
4199
4200        s2io_handle_errors(dev);
4201        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4202}
4203
4204static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4205{
4206        struct ring_info *ring = (struct ring_info *)dev_id;
4207        struct s2io_nic *sp = ring->nic;
4208        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4209
4210        if (unlikely(!is_s2io_card_up(sp)))
4211                return IRQ_HANDLED;
4212
4213        if (sp->config.napi) {
4214                u8 __iomem *addr = NULL;
4215                u8 val8 = 0;
4216
4217                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4218                addr += (7 - ring->ring_no);
4219                val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4220                writeb(val8, addr);
4221                val8 = readb(addr);
4222                napi_schedule(&ring->napi);
4223        } else {
4224                rx_intr_handler(ring, 0);
4225                s2io_chk_rx_buffers(sp, ring);
4226        }
4227
4228        return IRQ_HANDLED;
4229}
4230
4231static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4232{
4233        int i;
4234        struct fifo_info *fifos = (struct fifo_info *)dev_id;
4235        struct s2io_nic *sp = fifos->nic;
4236        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4237        struct config_param *config  = &sp->config;
4238        u64 reason;
4239
4240        if (unlikely(!is_s2io_card_up(sp)))
4241                return IRQ_NONE;
4242
4243        reason = readq(&bar0->general_int_status);
4244        if (unlikely(reason == S2IO_MINUS_ONE))
4245                /* Nothing much can be done. Get out */
4246                return IRQ_HANDLED;
4247
4248        if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4249                writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4250
4251                if (reason & GEN_INTR_TXPIC)
4252                        s2io_txpic_intr_handle(sp);
4253
4254                if (reason & GEN_INTR_TXTRAFFIC)
4255                        writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4256
4257                for (i = 0; i < config->tx_fifo_num; i++)
4258                        tx_intr_handler(&fifos[i]);
4259
4260                writeq(sp->general_int_mask, &bar0->general_int_mask);
4261                readl(&bar0->general_int_status);
4262                return IRQ_HANDLED;
4263        }
4264        /* The interrupt was not raised by us */
4265        return IRQ_NONE;
4266}
4267
4268static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4269{
4270        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4271        u64 val64;
4272
4273        val64 = readq(&bar0->pic_int_status);
4274        if (val64 & PIC_INT_GPIO) {
4275                val64 = readq(&bar0->gpio_int_reg);
4276                if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4277                    (val64 & GPIO_INT_REG_LINK_UP)) {
4278                        /*
4279                         * This is unstable state so clear both up/down
4280                         * interrupt and adapter to re-evaluate the link state.
4281                         */
4282                        val64 |= GPIO_INT_REG_LINK_DOWN;
4283                        val64 |= GPIO_INT_REG_LINK_UP;
4284                        writeq(val64, &bar0->gpio_int_reg);
4285                        val64 = readq(&bar0->gpio_int_mask);
4286                        val64 &= ~(GPIO_INT_MASK_LINK_UP |
4287                                   GPIO_INT_MASK_LINK_DOWN);
4288                        writeq(val64, &bar0->gpio_int_mask);
4289                } else if (val64 & GPIO_INT_REG_LINK_UP) {
4290                        val64 = readq(&bar0->adapter_status);
4291                        /* Enable Adapter */
4292                        val64 = readq(&bar0->adapter_control);
4293                        val64 |= ADAPTER_CNTL_EN;
4294                        writeq(val64, &bar0->adapter_control);
4295                        val64 |= ADAPTER_LED_ON;
4296                        writeq(val64, &bar0->adapter_control);
4297                        if (!sp->device_enabled_once)
4298                                sp->device_enabled_once = 1;
4299
4300                        s2io_link(sp, LINK_UP);
4301                        /*
4302                         * unmask link down interrupt and mask link-up
4303                         * intr
4304                         */
4305                        val64 = readq(&bar0->gpio_int_mask);
4306                        val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4307                        val64 |= GPIO_INT_MASK_LINK_UP;
4308                        writeq(val64, &bar0->gpio_int_mask);
4309
4310                } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4311                        val64 = readq(&bar0->adapter_status);
4312                        s2io_link(sp, LINK_DOWN);
4313                        /* Link is down so unmaks link up interrupt */
4314                        val64 = readq(&bar0->gpio_int_mask);
4315                        val64 &= ~GPIO_INT_MASK_LINK_UP;
4316                        val64 |= GPIO_INT_MASK_LINK_DOWN;
4317                        writeq(val64, &bar0->gpio_int_mask);
4318
4319                        /* turn off LED */
4320                        val64 = readq(&bar0->adapter_control);
4321                        val64 = val64 & (~ADAPTER_LED_ON);
4322                        writeq(val64, &bar0->adapter_control);
4323                }
4324        }
4325        val64 = readq(&bar0->gpio_int_mask);
4326}
4327
4328/**
4329 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4330 *  @value: alarm bits
4331 *  @addr: address value
4332 *  @cnt: counter variable
4333 *  Description: Check for alarm and increment the counter
4334 *  Return Value:
4335 *  1 - if alarm bit set
4336 *  0 - if alarm bit is not set
4337 */
4338static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4339                                 unsigned long long *cnt)
4340{
4341        u64 val64;
4342        val64 = readq(addr);
4343        if (val64 & value) {
4344                writeq(val64, addr);
4345                (*cnt)++;
4346                return 1;
4347        }
4348        return 0;
4349
4350}
4351
4352/**
4353 *  s2io_handle_errors - Xframe error indication handler
4354 *  @dev_id: opaque handle to dev
4355 *  Description: Handle alarms such as loss of link, single or
4356 *  double ECC errors, critical and serious errors.
4357 *  Return Value:
4358 *  NONE
4359 */
4360static void s2io_handle_errors(void *dev_id)
4361{
4362        struct net_device *dev = (struct net_device *)dev_id;
4363        struct s2io_nic *sp = netdev_priv(dev);
4364        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4365        u64 temp64 = 0, val64 = 0;
4366        int i = 0;
4367
4368        struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4369        struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4370
4371        if (!is_s2io_card_up(sp))
4372                return;
4373
4374        if (pci_channel_offline(sp->pdev))
4375                return;
4376
4377        memset(&sw_stat->ring_full_cnt, 0,
4378               sizeof(sw_stat->ring_full_cnt));
4379
4380        /* Handling the XPAK counters update */
4381        if (stats->xpak_timer_count < 72000) {
4382                /* waiting for an hour */
4383                stats->xpak_timer_count++;
4384        } else {
4385                s2io_updt_xpak_counter(dev);
4386                /* reset the count to zero */
4387                stats->xpak_timer_count = 0;
4388        }
4389
4390        /* Handling link status change error Intr */
4391        if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4392                val64 = readq(&bar0->mac_rmac_err_reg);
4393                writeq(val64, &bar0->mac_rmac_err_reg);
4394                if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4395                        schedule_work(&sp->set_link_task);
4396        }
4397
4398        /* In case of a serious error, the device will be Reset. */
4399        if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4400                                  &sw_stat->serious_err_cnt))
4401                goto reset;
4402
4403        /* Check for data parity error */
4404        if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4405                                  &sw_stat->parity_err_cnt))
4406                goto reset;
4407
4408        /* Check for ring full counter */
4409        if (sp->device_type == XFRAME_II_DEVICE) {
4410                val64 = readq(&bar0->ring_bump_counter1);
4411                for (i = 0; i < 4; i++) {
4412                        temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4413                        temp64 >>= 64 - ((i+1)*16);
4414                        sw_stat->ring_full_cnt[i] += temp64;
4415                }
4416
4417                val64 = readq(&bar0->ring_bump_counter2);
4418                for (i = 0; i < 4; i++) {
4419                        temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4420                        temp64 >>= 64 - ((i+1)*16);
4421                        sw_stat->ring_full_cnt[i+4] += temp64;
4422                }
4423        }
4424
4425        val64 = readq(&bar0->txdma_int_status);
4426        /*check for pfc_err*/
4427        if (val64 & TXDMA_PFC_INT) {
4428                if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4429                                          PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4430                                          PFC_PCIX_ERR,
4431                                          &bar0->pfc_err_reg,
4432                                          &sw_stat->pfc_err_cnt))
4433                        goto reset;
4434                do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4435                                      &bar0->pfc_err_reg,
4436                                      &sw_stat->pfc_err_cnt);
4437        }
4438
4439        /*check for tda_err*/
4440        if (val64 & TXDMA_TDA_INT) {
4441                if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4442                                          TDA_SM0_ERR_ALARM |
4443                                          TDA_SM1_ERR_ALARM,
4444                                          &bar0->tda_err_reg,
4445                                          &sw_stat->tda_err_cnt))
4446                        goto reset;
4447                do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4448                                      &bar0->tda_err_reg,
4449                                      &sw_stat->tda_err_cnt);
4450        }
4451        /*check for pcc_err*/
4452        if (val64 & TXDMA_PCC_INT) {
4453                if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4454                                          PCC_N_SERR | PCC_6_COF_OV_ERR |
4455                                          PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4456                                          PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4457                                          PCC_TXB_ECC_DB_ERR,
4458                                          &bar0->pcc_err_reg,
4459                                          &sw_stat->pcc_err_cnt))
4460                        goto reset;
4461                do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4462                                      &bar0->pcc_err_reg,
4463                                      &sw_stat->pcc_err_cnt);
4464        }
4465
4466        /*check for tti_err*/
4467        if (val64 & TXDMA_TTI_INT) {
4468                if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4469                                          &bar0->tti_err_reg,
4470                                          &sw_stat->tti_err_cnt))
4471                        goto reset;
4472                do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4473                                      &bar0->tti_err_reg,
4474                                      &sw_stat->tti_err_cnt);
4475        }
4476
4477        /*check for lso_err*/
4478        if (val64 & TXDMA_LSO_INT) {
4479                if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4480                                          LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4481                                          &bar0->lso_err_reg,
4482                                          &sw_stat->lso_err_cnt))
4483                        goto reset;
4484                do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4485                                      &bar0->lso_err_reg,
4486                                      &sw_stat->lso_err_cnt);
4487        }
4488
4489        /*check for tpa_err*/
4490        if (val64 & TXDMA_TPA_INT) {
4491                if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4492                                          &bar0->tpa_err_reg,
4493                                          &sw_stat->tpa_err_cnt))
4494                        goto reset;
4495                do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4496                                      &bar0->tpa_err_reg,
4497                                      &sw_stat->tpa_err_cnt);
4498        }
4499
4500        /*check for sm_err*/
4501        if (val64 & TXDMA_SM_INT) {
4502                if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4503                                          &bar0->sm_err_reg,
4504                                          &sw_stat->sm_err_cnt))
4505                        goto reset;
4506        }
4507
4508        val64 = readq(&bar0->mac_int_status);
4509        if (val64 & MAC_INT_STATUS_TMAC_INT) {
4510                if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4511                                          &bar0->mac_tmac_err_reg,
4512                                          &sw_stat->mac_tmac_err_cnt))
4513                        goto reset;
4514                do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4515                                      TMAC_DESC_ECC_SG_ERR |
4516                                      TMAC_DESC_ECC_DB_ERR,
4517                                      &bar0->mac_tmac_err_reg,
4518                                      &sw_stat->mac_tmac_err_cnt);
4519        }
4520
4521        val64 = readq(&bar0->xgxs_int_status);
4522        if (val64 & XGXS_INT_STATUS_TXGXS) {
4523                if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4524                                          &bar0->xgxs_txgxs_err_reg,
4525                                          &sw_stat->xgxs_txgxs_err_cnt))
4526                        goto reset;
4527                do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4528                                      &bar0->xgxs_txgxs_err_reg,
4529                                      &sw_stat->xgxs_txgxs_err_cnt);
4530        }
4531
4532        val64 = readq(&bar0->rxdma_int_status);
4533        if (val64 & RXDMA_INT_RC_INT_M) {
4534                if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4535                                          RC_FTC_ECC_DB_ERR |
4536                                          RC_PRCn_SM_ERR_ALARM |
4537                                          RC_FTC_SM_ERR_ALARM,
4538                                          &bar0->rc_err_reg,
4539                                          &sw_stat->rc_err_cnt))
4540                        goto reset;
4541                do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4542                                      RC_FTC_ECC_SG_ERR |
4543                                      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4544                                      &sw_stat->rc_err_cnt);
4545                if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4546                                          PRC_PCI_AB_WR_Rn |
4547                                          PRC_PCI_AB_F_WR_Rn,
4548                                          &bar0->prc_pcix_err_reg,
4549                                          &sw_stat->prc_pcix_err_cnt))
4550                        goto reset;
4551                do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4552                                      PRC_PCI_DP_WR_Rn |
4553                                      PRC_PCI_DP_F_WR_Rn,
4554                                      &bar0->prc_pcix_err_reg,
4555                                      &sw_stat->prc_pcix_err_cnt);
4556        }
4557
4558        if (val64 & RXDMA_INT_RPA_INT_M) {
4559                if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4560                                          &bar0->rpa_err_reg,
4561                                          &sw_stat->rpa_err_cnt))
4562                        goto reset;
4563                do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4564                                      &bar0->rpa_err_reg,
4565                                      &sw_stat->rpa_err_cnt);
4566        }
4567
4568        if (val64 & RXDMA_INT_RDA_INT_M) {
4569                if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4570                                          RDA_FRM_ECC_DB_N_AERR |
4571                                          RDA_SM1_ERR_ALARM |
4572                                          RDA_SM0_ERR_ALARM |
4573                                          RDA_RXD_ECC_DB_SERR,
4574                                          &bar0->rda_err_reg,
4575                                          &sw_stat->rda_err_cnt))
4576                        goto reset;
4577                do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4578                                      RDA_FRM_ECC_SG_ERR |
4579                                      RDA_MISC_ERR |
4580                                      RDA_PCIX_ERR,
4581                                      &bar0->rda_err_reg,
4582                                      &sw_stat->rda_err_cnt);
4583        }
4584
4585        if (val64 & RXDMA_INT_RTI_INT_M) {
4586                if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4587                                          &bar0->rti_err_reg,
4588                                          &sw_stat->rti_err_cnt))
4589                        goto reset;
4590                do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4591                                      &bar0->rti_err_reg,
4592                                      &sw_stat->rti_err_cnt);
4593        }
4594
4595        val64 = readq(&bar0->mac_int_status);
4596        if (val64 & MAC_INT_STATUS_RMAC_INT) {
4597                if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4598                                          &bar0->mac_rmac_err_reg,
4599                                          &sw_stat->mac_rmac_err_cnt))
4600                        goto reset;
4601                do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4602                                      RMAC_SINGLE_ECC_ERR |
4603                                      RMAC_DOUBLE_ECC_ERR,
4604                                      &bar0->mac_rmac_err_reg,
4605                                      &sw_stat->mac_rmac_err_cnt);
4606        }
4607
4608        val64 = readq(&bar0->xgxs_int_status);
4609        if (val64 & XGXS_INT_STATUS_RXGXS) {
4610                if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4611                                          &bar0->xgxs_rxgxs_err_reg,
4612                                          &sw_stat->xgxs_rxgxs_err_cnt))
4613                        goto reset;
4614        }
4615
4616        val64 = readq(&bar0->mc_int_status);
4617        if (val64 & MC_INT_STATUS_MC_INT) {
4618                if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4619                                          &bar0->mc_err_reg,
4620                                          &sw_stat->mc_err_cnt))
4621                        goto reset;
4622
4623                /* Handling Ecc errors */
4624                if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4625                        writeq(val64, &bar0->mc_err_reg);
4626                        if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4627                                sw_stat->double_ecc_errs++;
4628                                if (sp->device_type != XFRAME_II_DEVICE) {
4629                                        /*
4630                                         * Reset XframeI only if critical error
4631                                         */
4632                                        if (val64 &
4633                                            (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4634                                             MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4635                                                goto reset;
4636                                }
4637                        } else
4638                                sw_stat->single_ecc_errs++;
4639                }
4640        }
4641        return;
4642
4643reset:
4644        s2io_stop_all_tx_queue(sp);
4645        schedule_work(&sp->rst_timer_task);
4646        sw_stat->soft_reset_cnt++;
4647}
4648
4649/**
4650 *  s2io_isr - ISR handler of the device .
4651 *  @irq: the irq of the device.
4652 *  @dev_id: a void pointer to the dev structure of the NIC.
4653 *  Description:  This function is the ISR handler of the device. It
4654 *  identifies the reason for the interrupt and calls the relevant
4655 *  service routines. As a contongency measure, this ISR allocates the
4656 *  recv buffers, if their numbers are below the panic value which is
4657 *  presently set to 25% of the original number of rcv buffers allocated.
4658 *  Return value:
4659 *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4660 *   IRQ_NONE: will be returned if interrupt is not from our device
4661 */
4662static irqreturn_t s2io_isr(int irq, void *dev_id)
4663{
4664        struct net_device *dev = (struct net_device *)dev_id;
4665        struct s2io_nic *sp = netdev_priv(dev);
4666        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4667        int i;
4668        u64 reason = 0;
4669        struct mac_info *mac_control;
4670        struct config_param *config;
4671
4672        /* Pretend we handled any irq's from a disconnected card */
4673        if (pci_channel_offline(sp->pdev))
4674                return IRQ_NONE;
4675
4676        if (!is_s2io_card_up(sp))
4677                return IRQ_NONE;
4678
4679        config = &sp->config;
4680        mac_control = &sp->mac_control;
4681
4682        /*
4683         * Identify the cause for interrupt and call the appropriate
4684         * interrupt handler. Causes for the interrupt could be;
4685         * 1. Rx of packet.
4686         * 2. Tx complete.
4687         * 3. Link down.
4688         */
4689        reason = readq(&bar0->general_int_status);
4690
4691        if (unlikely(reason == S2IO_MINUS_ONE))
4692                return IRQ_HANDLED;     /* Nothing much can be done. Get out */
4693
4694        if (reason &
4695            (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4696                writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4697
4698                if (config->napi) {
4699                        if (reason & GEN_INTR_RXTRAFFIC) {
4700                                napi_schedule(&sp->napi);
4701                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4702                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4703                                readl(&bar0->rx_traffic_int);
4704                        }
4705                } else {
4706                        /*
4707                         * rx_traffic_int reg is an R1 register, writing all 1's
4708                         * will ensure that the actual interrupt causing bit
4709                         * get's cleared and hence a read can be avoided.
4710                         */
4711                        if (reason & GEN_INTR_RXTRAFFIC)
4712                                writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4713
4714                        for (i = 0; i < config->rx_ring_num; i++) {
4715                                struct ring_info *ring = &mac_control->rings[i];
4716
4717                                rx_intr_handler(ring, 0);
4718                        }
4719                }
4720
4721                /*
4722                 * tx_traffic_int reg is an R1 register, writing all 1's
4723                 * will ensure that the actual interrupt causing bit get's
4724                 * cleared and hence a read can be avoided.
4725                 */
4726                if (reason & GEN_INTR_TXTRAFFIC)
4727                        writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4728
4729                for (i = 0; i < config->tx_fifo_num; i++)
4730                        tx_intr_handler(&mac_control->fifos[i]);
4731
4732                if (reason & GEN_INTR_TXPIC)
4733                        s2io_txpic_intr_handle(sp);
4734
4735                /*
4736                 * Reallocate the buffers from the interrupt handler itself.
4737                 */
4738                if (!config->napi) {
4739                        for (i = 0; i < config->rx_ring_num; i++) {
4740                                struct ring_info *ring = &mac_control->rings[i];
4741
4742                                s2io_chk_rx_buffers(sp, ring);
4743                        }
4744                }
4745                writeq(sp->general_int_mask, &bar0->general_int_mask);
4746                readl(&bar0->general_int_status);
4747
4748                return IRQ_HANDLED;
4749
4750        } else if (!reason) {
4751                /* The interrupt was not raised by us */
4752                return IRQ_NONE;
4753        }
4754
4755        return IRQ_HANDLED;
4756}
4757
4758/*
4759 * s2io_updt_stats -
4760 */
4761static void s2io_updt_stats(struct s2io_nic *sp)
4762{
4763        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4764        u64 val64;
4765        int cnt = 0;
4766
4767        if (is_s2io_card_up(sp)) {
4768                /* Apprx 30us on a 133 MHz bus */
4769                val64 = SET_UPDT_CLICKS(10) |
4770                        STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4771                writeq(val64, &bar0->stat_cfg);
4772                do {
4773                        udelay(100);
4774                        val64 = readq(&bar0->stat_cfg);
4775                        if (!(val64 & s2BIT(0)))
4776                                break;
4777                        cnt++;
4778                        if (cnt == 5)
4779                                break; /* Updt failed */
4780                } while (1);
4781        }
4782}
4783
4784/**
4785 *  s2io_get_stats - Updates the device statistics structure.
4786 *  @dev : pointer to the device structure.
4787 *  Description:
4788 *  This function updates the device statistics structure in the s2io_nic
4789 *  structure and returns a pointer to the same.
4790 *  Return value:
4791 *  pointer to the updated net_device_stats structure.
4792 */
4793static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4794{
4795        struct s2io_nic *sp = netdev_priv(dev);
4796        struct mac_info *mac_control = &sp->mac_control;
4797        struct stat_block *stats = mac_control->stats_info;
4798        u64 delta;
4799
4800        /* Configure Stats for immediate updt */
4801        s2io_updt_stats(sp);
4802
4803        /* A device reset will cause the on-adapter statistics to be zero'ed.
4804         * This can be done while running by changing the MTU.  To prevent the
4805         * system from having the stats zero'ed, the driver keeps a copy of the
4806         * last update to the system (which is also zero'ed on reset).  This
4807         * enables the driver to accurately know the delta between the last
4808         * update and the current update.
4809         */
4810        delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4811                le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4812        sp->stats.rx_packets += delta;
4813        dev->stats.rx_packets += delta;
4814
4815        delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4816                le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4817        sp->stats.tx_packets += delta;
4818        dev->stats.tx_packets += delta;
4819
4820        delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4821                le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4822        sp->stats.rx_bytes += delta;
4823        dev->stats.rx_bytes += delta;
4824
4825        delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4826                le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4827        sp->stats.tx_bytes += delta;
4828        dev->stats.tx_bytes += delta;
4829
4830        delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4831        sp->stats.rx_errors += delta;
4832        dev->stats.rx_errors += delta;
4833
4834        delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4835                le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4836        sp->stats.tx_errors += delta;
4837        dev->stats.tx_errors += delta;
4838
4839        delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4840        sp->stats.rx_dropped += delta;
4841        dev->stats.rx_dropped += delta;
4842
4843        delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4844        sp->stats.tx_dropped += delta;
4845        dev->stats.tx_dropped += delta;
4846
4847        /* The adapter MAC interprets pause frames as multicast packets, but
4848         * does not pass them up.  This erroneously increases the multicast
4849         * packet count and needs to be deducted when the multicast frame count
4850         * is queried.
4851         */
4852        delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4853                le32_to_cpu(stats->rmac_vld_mcst_frms);
4854        delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4855        delta -= sp->stats.multicast;
4856        sp->stats.multicast += delta;
4857        dev->stats.multicast += delta;
4858
4859        delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4860                le32_to_cpu(stats->rmac_usized_frms)) +
4861                le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4862        sp->stats.rx_length_errors += delta;
4863        dev->stats.rx_length_errors += delta;
4864
4865        delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4866        sp->stats.rx_crc_errors += delta;
4867        dev->stats.rx_crc_errors += delta;
4868
4869        return &dev->stats;
4870}
4871
4872/**
4873 *  s2io_set_multicast - entry point for multicast address enable/disable.
4874 *  @dev : pointer to the device structure
4875 *  @may_sleep: parameter indicates if sleeping when waiting for command
4876 *  complete
4877 *  Description:
4878 *  This function is a driver entry point which gets called by the kernel
4879 *  whenever multicast addresses must be enabled/disabled. This also gets
4880 *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4881 *  determine, if multicast address must be enabled or if promiscuous mode
4882 *  is to be disabled etc.
4883 *  Return value:
4884 *  void.
4885 */
4886static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
4887{
4888        int i, j, prev_cnt;
4889        struct netdev_hw_addr *ha;
4890        struct s2io_nic *sp = netdev_priv(dev);
4891        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4892        u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4893                0xfeffffffffffULL;
4894        u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4895        void __iomem *add;
4896        struct config_param *config = &sp->config;
4897
4898        if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4899                /*  Enable all Multicast addresses */
4900                writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4901                       &bar0->rmac_addr_data0_mem);
4902                writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4903                       &bar0->rmac_addr_data1_mem);
4904                val64 = RMAC_ADDR_CMD_MEM_WE |
4905                        RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906                        RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4907                writeq(val64, &bar0->rmac_addr_cmd_mem);
4908                /* Wait till command completes */
4909                wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910                                      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911                                      S2IO_BIT_RESET, may_sleep);
4912
4913                sp->m_cast_flg = 1;
4914                sp->all_multi_pos = config->max_mc_addr - 1;
4915        } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4916                /*  Disable all Multicast addresses */
4917                writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4918                       &bar0->rmac_addr_data0_mem);
4919                writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4920                       &bar0->rmac_addr_data1_mem);
4921                val64 = RMAC_ADDR_CMD_MEM_WE |
4922                        RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4923                        RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4924                writeq(val64, &bar0->rmac_addr_cmd_mem);
4925                /* Wait till command completes */
4926                wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4927                                      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4928                                      S2IO_BIT_RESET, may_sleep);
4929
4930                sp->m_cast_flg = 0;
4931                sp->all_multi_pos = 0;
4932        }
4933
4934        if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4935                /*  Put the NIC into promiscuous mode */
4936                add = &bar0->mac_cfg;
4937                val64 = readq(&bar0->mac_cfg);
4938                val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4939
4940                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4941                writel((u32)val64, add);
4942                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4943                writel((u32) (val64 >> 32), (add + 4));
4944
4945                if (vlan_tag_strip != 1) {
4946                        val64 = readq(&bar0->rx_pa_cfg);
4947                        val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4948                        writeq(val64, &bar0->rx_pa_cfg);
4949                        sp->vlan_strip_flag = 0;
4950                }
4951
4952                val64 = readq(&bar0->mac_cfg);
4953                sp->promisc_flg = 1;
4954                DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4955                          dev->name);
4956        } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4957                /*  Remove the NIC from promiscuous mode */
4958                add = &bar0->mac_cfg;
4959                val64 = readq(&bar0->mac_cfg);
4960                val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4961
4962                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4963                writel((u32)val64, add);
4964                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4965                writel((u32) (val64 >> 32), (add + 4));
4966
4967                if (vlan_tag_strip != 0) {
4968                        val64 = readq(&bar0->rx_pa_cfg);
4969                        val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4970                        writeq(val64, &bar0->rx_pa_cfg);
4971                        sp->vlan_strip_flag = 1;
4972                }
4973
4974                val64 = readq(&bar0->mac_cfg);
4975                sp->promisc_flg = 0;
4976                DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4977        }
4978
4979        /*  Update individual M_CAST address list */
4980        if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4981                if (netdev_mc_count(dev) >
4982                    (config->max_mc_addr - config->max_mac_addr)) {
4983                        DBG_PRINT(ERR_DBG,
4984                                  "%s: No more Rx filters can be added - "
4985                                  "please enable ALL_MULTI instead\n",
4986                                  dev->name);
4987                        return;
4988                }
4989
4990                prev_cnt = sp->mc_addr_count;
4991                sp->mc_addr_count = netdev_mc_count(dev);
4992
4993                /* Clear out the previous list of Mc in the H/W. */
4994                for (i = 0; i < prev_cnt; i++) {
4995                        writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4996                               &bar0->rmac_addr_data0_mem);
4997                        writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4998                               &bar0->rmac_addr_data1_mem);
4999                        val64 = RMAC_ADDR_CMD_MEM_WE |
5000                                RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5001                                RMAC_ADDR_CMD_MEM_OFFSET
5002                                (config->mc_start_offset + i);
5003                        writeq(val64, &bar0->rmac_addr_cmd_mem);
5004
5005                        /* Wait for command completes */
5006                        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5007                                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5008                                                  S2IO_BIT_RESET, may_sleep)) {
5009                                DBG_PRINT(ERR_DBG,
5010                                          "%s: Adding Multicasts failed\n",
5011                                          dev->name);
5012                                return;
5013                        }
5014                }
5015
5016                /* Create the new Rx filter list and update the same in H/W. */
5017                i = 0;
5018                netdev_for_each_mc_addr(ha, dev) {
5019                        mac_addr = 0;
5020                        for (j = 0; j < ETH_ALEN; j++) {
5021                                mac_addr |= ha->addr[j];
5022                                mac_addr <<= 8;
5023                        }
5024                        mac_addr >>= 8;
5025                        writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5026                               &bar0->rmac_addr_data0_mem);
5027                        writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5028                               &bar0->rmac_addr_data1_mem);
5029                        val64 = RMAC_ADDR_CMD_MEM_WE |
5030                                RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5031                                RMAC_ADDR_CMD_MEM_OFFSET
5032                                (i + config->mc_start_offset);
5033                        writeq(val64, &bar0->rmac_addr_cmd_mem);
5034
5035                        /* Wait for command completes */
5036                        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5037                                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5038                                                  S2IO_BIT_RESET, may_sleep)) {
5039                                DBG_PRINT(ERR_DBG,
5040                                          "%s: Adding Multicasts failed\n",
5041                                          dev->name);
5042                                return;
5043                        }
5044                        i++;
5045                }
5046        }
5047}
5048
5049/* NDO wrapper for s2io_set_multicast */
5050static void s2io_ndo_set_multicast(struct net_device *dev)
5051{
5052        s2io_set_multicast(dev, false);
5053}
5054
5055/* read from CAM unicast & multicast addresses and store it in
5056 * def_mac_addr structure
5057 */
5058static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5059{
5060        int offset;
5061        u64 mac_addr = 0x0;
5062        struct config_param *config = &sp->config;
5063
5064        /* store unicast & multicast mac addresses */
5065        for (offset = 0; offset < config->max_mc_addr; offset++) {
5066                mac_addr = do_s2io_read_unicast_mc(sp, offset);
5067                /* if read fails disable the entry */
5068                if (mac_addr == FAILURE)
5069                        mac_addr = S2IO_DISABLE_MAC_ENTRY;
5070                do_s2io_copy_mac_addr(sp, offset, mac_addr);
5071        }
5072}
5073
5074/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5075static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5076{
5077        int offset;
5078        struct config_param *config = &sp->config;
5079        /* restore unicast mac address */
5080        for (offset = 0; offset < config->max_mac_addr; offset++)
5081                do_s2io_prog_unicast(sp->dev,
5082                                     sp->def_mac_addr[offset].mac_addr);
5083
5084        /* restore multicast mac address */
5085        for (offset = config->mc_start_offset;
5086             offset < config->max_mc_addr; offset++)
5087                do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5088}
5089
5090/* add a multicast MAC address to CAM */
5091static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5092{
5093        int i;
5094        u64 mac_addr = 0;
5095        struct config_param *config = &sp->config;
5096
5097        for (i = 0; i < ETH_ALEN; i++) {
5098                mac_addr <<= 8;
5099                mac_addr |= addr[i];
5100        }
5101        if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5102                return SUCCESS;
5103
5104        /* check if the multicast mac already preset in CAM */
5105        for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5106                u64 tmp64;
5107                tmp64 = do_s2io_read_unicast_mc(sp, i);
5108                if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5109                        break;
5110
5111                if (tmp64 == mac_addr)
5112                        return SUCCESS;
5113        }
5114        if (i == config->max_mc_addr) {
5115                DBG_PRINT(ERR_DBG,
5116                          "CAM full no space left for multicast MAC\n");
5117                return FAILURE;
5118        }
5119        /* Update the internal structure with this new mac address */
5120        do_s2io_copy_mac_addr(sp, i, mac_addr);
5121
5122        return do_s2io_add_mac(sp, mac_addr, i);
5123}
5124
5125/* add MAC address to CAM */
5126static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5127{
5128        u64 val64;
5129        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5130
5131        writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5132               &bar0->rmac_addr_data0_mem);
5133
5134        val64 = RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5135                RMAC_ADDR_CMD_MEM_OFFSET(off);
5136        writeq(val64, &bar0->rmac_addr_cmd_mem);
5137
5138        /* Wait till command completes */
5139        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5140                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5141                                  S2IO_BIT_RESET, true)) {
5142                DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5143                return FAILURE;
5144        }
5145        return SUCCESS;
5146}
5147/* deletes a specified unicast/multicast mac entry from CAM */
5148static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5149{
5150        int offset;
5151        u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5152        struct config_param *config = &sp->config;
5153
5154        for (offset = 1;
5155             offset < config->max_mc_addr; offset++) {
5156                tmp64 = do_s2io_read_unicast_mc(sp, offset);
5157                if (tmp64 == addr) {
5158                        /* disable the entry by writing  0xffffffffffffULL */
5159                        if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5160                                return FAILURE;
5161                        /* store the new mac list from CAM */
5162                        do_s2io_store_unicast_mc(sp);
5163                        return SUCCESS;
5164                }
5165        }
5166        DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5167                  (unsigned long long)addr);
5168        return FAILURE;
5169}
5170
5171/* read mac entries from CAM */
5172static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5173{
5174        u64 tmp64, val64;
5175        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5176
5177        /* read mac addr */
5178        val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5179                RMAC_ADDR_CMD_MEM_OFFSET(offset);
5180        writeq(val64, &bar0->rmac_addr_cmd_mem);
5181
5182        /* Wait till command completes */
5183        if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5184                                  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5185                                  S2IO_BIT_RESET, true)) {
5186                DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5187                return FAILURE;
5188        }
5189        tmp64 = readq(&bar0->rmac_addr_data0_mem);
5190
5191        return tmp64 >> 16;
5192}
5193
5194/*
5195 * s2io_set_mac_addr - driver entry point
5196 */
5197
5198static int s2io_set_mac_addr(struct net_device *dev, void *p)
5199{
5200        struct sockaddr *addr = p;
5201
5202        if (!is_valid_ether_addr(addr->sa_data))
5203                return -EADDRNOTAVAIL;
5204
5205        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5206
5207        /* store the MAC address in CAM */
5208        return do_s2io_prog_unicast(dev, dev->dev_addr);
5209}
5210/**
5211 *  do_s2io_prog_unicast - Programs the Xframe mac address
5212 *  @dev : pointer to the device structure.
5213 *  @addr: a uchar pointer to the new mac address which is to be set.
5214 *  Description : This procedure will program the Xframe to receive
5215 *  frames with new Mac Address
5216 *  Return value: SUCCESS on success and an appropriate (-)ve integer
5217 *  as defined in errno.h file on failure.
5218 */
5219
5220static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5221{
5222        struct s2io_nic *sp = netdev_priv(dev);
5223        register u64 mac_addr = 0, perm_addr = 0;
5224        int i;
5225        u64 tmp64;
5226        struct config_param *config = &sp->config;
5227
5228        /*
5229         * Set the new MAC address as the new unicast filter and reflect this
5230         * change on the device address registered with the OS. It will be
5231         * at offset 0.
5232         */
5233        for (i = 0; i < ETH_ALEN; i++) {
5234                mac_addr <<= 8;
5235                mac_addr |= addr[i];
5236                perm_addr <<= 8;
5237                perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5238        }
5239
5240        /* check if the dev_addr is different than perm_addr */
5241        if (mac_addr == perm_addr)
5242                return SUCCESS;
5243
5244        /* check if the mac already preset in CAM */
5245        for (i = 1; i < config->max_mac_addr; i++) {
5246                tmp64 = do_s2io_read_unicast_mc(sp, i);
5247                if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5248                        break;
5249
5250                if (tmp64 == mac_addr) {
5251                        DBG_PRINT(INFO_DBG,
5252                                  "MAC addr:0x%llx already present in CAM\n",
5253                                  (unsigned long long)mac_addr);
5254                        return SUCCESS;
5255                }
5256        }
5257        if (i == config->max_mac_addr) {
5258                DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5259                return FAILURE;
5260        }
5261        /* Update the internal structure with this new mac address */
5262        do_s2io_copy_mac_addr(sp, i, mac_addr);
5263
5264        return do_s2io_add_mac(sp, mac_addr, i);
5265}
5266
5267/**
5268 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5269 * @dev : pointer to netdev
5270 * @cmd: pointer to the structure with parameters given by ethtool to set
5271 * link information.
5272 * Description:
5273 * The function sets different link parameters provided by the user onto
5274 * the NIC.
5275 * Return value:
5276 * 0 on success.
5277 */
5278
5279static int
5280s2io_ethtool_set_link_ksettings(struct net_device *dev,
5281                                const struct ethtool_link_ksettings *cmd)
5282{
5283        struct s2io_nic *sp = netdev_priv(dev);
5284        if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5285            (cmd->base.speed != SPEED_10000) ||
5286            (cmd->base.duplex != DUPLEX_FULL))
5287                return -EINVAL;
5288        else {
5289                s2io_close(sp->dev);
5290                s2io_open(sp->dev);
5291        }
5292
5293        return 0;
5294}
5295
5296/**
5297 * s2io_ethtool_get_link_ksettings - Return link specific information.
5298 * @dev: pointer to netdev
5299 * @cmd : pointer to the structure with parameters given by ethtool
5300 * to return link information.
5301 * Description:
5302 * Returns link specific information like speed, duplex etc.. to ethtool.
5303 * Return value :
5304 * return 0 on success.
5305 */
5306
5307static int
5308s2io_ethtool_get_link_ksettings(struct net_device *dev,
5309                                struct ethtool_link_ksettings *cmd)
5310{
5311        struct s2io_nic *sp = netdev_priv(dev);
5312
5313        ethtool_link_ksettings_zero_link_mode(cmd, supported);
5314        ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5315        ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5316
5317        ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5318        ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5319        ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5320
5321        cmd->base.port = PORT_FIBRE;
5322
5323        if (netif_carrier_ok(sp->dev)) {
5324                cmd->base.speed = SPEED_10000;
5325                cmd->base.duplex = DUPLEX_FULL;
5326        } else {
5327                cmd->base.speed = SPEED_UNKNOWN;
5328                cmd->base.duplex = DUPLEX_UNKNOWN;
5329        }
5330
5331        cmd->base.autoneg = AUTONEG_DISABLE;
5332        return 0;
5333}
5334
5335/**
5336 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5337 * @dev: pointer to netdev
5338 * @info : pointer to the structure with parameters given by ethtool to
5339 * return driver information.
5340 * Description:
5341 * Returns driver specefic information like name, version etc.. to ethtool.
5342 * Return value:
5343 *  void
5344 */
5345
5346static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5347                                  struct ethtool_drvinfo *info)
5348{
5349        struct s2io_nic *sp = netdev_priv(dev);
5350
5351        strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5352        strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5353        strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5354}
5355
5356/**
5357 *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5358 *  @dev: pointer to netdev
5359 *  @regs : pointer to the structure with parameters given by ethtool for
5360 *          dumping the registers.
5361 *  @space: The input argument into which all the registers are dumped.
5362 *  Description:
5363 *  Dumps the entire register space of xFrame NIC into the user given
5364 *  buffer area.
5365 * Return value :
5366 * void .
5367 */
5368
5369static void s2io_ethtool_gregs(struct net_device *dev,
5370                               struct ethtool_regs *regs, void *space)
5371{
5372        int i;
5373        u64 reg;
5374        u8 *reg_space = (u8 *)space;
5375        struct s2io_nic *sp = netdev_priv(dev);
5376
5377        regs->len = XENA_REG_SPACE;
5378        regs->version = sp->pdev->subsystem_device;
5379
5380        for (i = 0; i < regs->len; i += 8) {
5381                reg = readq(sp->bar0 + i);
5382                memcpy((reg_space + i), &reg, 8);
5383        }
5384}
5385
5386/*
5387 *  s2io_set_led - control NIC led
5388 */
5389static void s2io_set_led(struct s2io_nic *sp, bool on)
5390{
5391        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5392        u16 subid = sp->pdev->subsystem_device;
5393        u64 val64;
5394
5395        if ((sp->device_type == XFRAME_II_DEVICE) ||
5396            ((subid & 0xFF) >= 0x07)) {
5397                val64 = readq(&bar0->gpio_control);
5398                if (on)
5399                        val64 |= GPIO_CTRL_GPIO_0;
5400                else
5401                        val64 &= ~GPIO_CTRL_GPIO_0;
5402
5403                writeq(val64, &bar0->gpio_control);
5404        } else {
5405                val64 = readq(&bar0->adapter_control);
5406                if (on)
5407                        val64 |= ADAPTER_LED_ON;
5408                else
5409                        val64 &= ~ADAPTER_LED_ON;
5410
5411                writeq(val64, &bar0->adapter_control);
5412        }
5413
5414}
5415
5416/**
5417 * s2io_ethtool_set_led - To physically identify the nic on the system.
5418 * @dev : network device
5419 * @state: led setting
5420 *
5421 * Description: Used to physically identify the NIC on the system.
5422 * The Link LED will blink for a time specified by the user for
5423 * identification.
5424 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5425 * identification is possible only if it's link is up.
5426 */
5427
5428static int s2io_ethtool_set_led(struct net_device *dev,
5429                                enum ethtool_phys_id_state state)
5430{
5431        struct s2io_nic *sp = netdev_priv(dev);
5432        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5433        u16 subid = sp->pdev->subsystem_device;
5434
5435        if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5436                u64 val64 = readq(&bar0->adapter_control);
5437                if (!(val64 & ADAPTER_CNTL_EN)) {
5438                        pr_err("Adapter Link down, cannot blink LED\n");
5439                        return -EAGAIN;
5440                }
5441        }
5442
5443        switch (state) {
5444        case ETHTOOL_ID_ACTIVE:
5445                sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5446                return 1;       /* cycle on/off once per second */
5447
5448        case ETHTOOL_ID_ON:
5449                s2io_set_led(sp, true);
5450                break;
5451
5452        case ETHTOOL_ID_OFF:
5453                s2io_set_led(sp, false);
5454                break;
5455
5456        case ETHTOOL_ID_INACTIVE:
5457                if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5458                        writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5459        }
5460
5461        return 0;
5462}
5463
5464static void s2io_ethtool_gringparam(struct net_device *dev,
5465                                    struct ethtool_ringparam *ering)
5466{
5467        struct s2io_nic *sp = netdev_priv(dev);
5468        int i, tx_desc_count = 0, rx_desc_count = 0;
5469
5470        if (sp->rxd_mode == RXD_MODE_1) {
5471                ering->rx_max_pending = MAX_RX_DESC_1;
5472                ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5473        } else {
5474                ering->rx_max_pending = MAX_RX_DESC_2;
5475                ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5476        }
5477
5478        ering->tx_max_pending = MAX_TX_DESC;
5479
5480        for (i = 0; i < sp->config.rx_ring_num; i++)
5481                rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5482        ering->rx_pending = rx_desc_count;
5483        ering->rx_jumbo_pending = rx_desc_count;
5484
5485        for (i = 0; i < sp->config.tx_fifo_num; i++)
5486                tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5487        ering->tx_pending = tx_desc_count;
5488        DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5489}
5490
5491/**
5492 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5493 * @dev: pointer to netdev
5494 * @ep : pointer to the structure with pause parameters given by ethtool.
5495 * Description:
5496 * Returns the Pause frame generation and reception capability of the NIC.
5497 * Return value:
5498 *  void
5499 */
5500static void s2io_ethtool_getpause_data(struct net_device *dev,
5501                                       struct ethtool_pauseparam *ep)
5502{
5503        u64 val64;
5504        struct s2io_nic *sp = netdev_priv(dev);
5505        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5506
5507        val64 = readq(&bar0->rmac_pause_cfg);
5508        if (val64 & RMAC_PAUSE_GEN_ENABLE)
5509                ep->tx_pause = true;
5510        if (val64 & RMAC_PAUSE_RX_ENABLE)
5511                ep->rx_pause = true;
5512        ep->autoneg = false;
5513}
5514
5515/**
5516 * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5517 * @dev: pointer to netdev
5518 * @ep : pointer to the structure with pause parameters given by ethtool.
5519 * Description:
5520 * It can be used to set or reset Pause frame generation or reception
5521 * support of the NIC.
5522 * Return value:
5523 * int, returns 0 on Success
5524 */
5525
5526static int s2io_ethtool_setpause_data(struct net_device *dev,
5527                                      struct ethtool_pauseparam *ep)
5528{
5529        u64 val64;
5530        struct s2io_nic *sp = netdev_priv(dev);
5531        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5532
5533        val64 = readq(&bar0->rmac_pause_cfg);
5534        if (ep->tx_pause)
5535                val64 |= RMAC_PAUSE_GEN_ENABLE;
5536        else
5537                val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5538        if (ep->rx_pause)
5539                val64 |= RMAC_PAUSE_RX_ENABLE;
5540        else
5541                val64 &= ~RMAC_PAUSE_RX_ENABLE;
5542        writeq(val64, &bar0->rmac_pause_cfg);
5543        return 0;
5544}
5545
5546#define S2IO_DEV_ID             5
5547/**
5548 * read_eeprom - reads 4 bytes of data from user given offset.
5549 * @sp : private member of the device structure, which is a pointer to the
5550 *      s2io_nic structure.
5551 * @off : offset at which the data must be written
5552 * @data : Its an output parameter where the data read at the given
5553 *      offset is stored.
5554 * Description:
5555 * Will read 4 bytes of data from the user given offset and return the
5556 * read data.
5557 * NOTE: Will allow to read only part of the EEPROM visible through the
5558 *   I2C bus.
5559 * Return value:
5560 *  -1 on failure and 0 on success.
5561 */
5562static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5563{
5564        int ret = -1;
5565        u32 exit_cnt = 0;
5566        u64 val64;
5567        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5568
5569        if (sp->device_type == XFRAME_I_DEVICE) {
5570                val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5571                        I2C_CONTROL_ADDR(off) |
5572                        I2C_CONTROL_BYTE_CNT(0x3) |
5573                        I2C_CONTROL_READ |
5574                        I2C_CONTROL_CNTL_START;
5575                SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5576
5577                while (exit_cnt < 5) {
5578                        val64 = readq(&bar0->i2c_control);
5579                        if (I2C_CONTROL_CNTL_END(val64)) {
5580                                *data = I2C_CONTROL_GET_DATA(val64);
5581                                ret = 0;
5582                                break;
5583                        }
5584                        msleep(50);
5585                        exit_cnt++;
5586                }
5587        }
5588
5589        if (sp->device_type == XFRAME_II_DEVICE) {
5590                val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5591                        SPI_CONTROL_BYTECNT(0x3) |
5592                        SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5593                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5594                val64 |= SPI_CONTROL_REQ;
5595                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5596                while (exit_cnt < 5) {
5597                        val64 = readq(&bar0->spi_control);
5598                        if (val64 & SPI_CONTROL_NACK) {
5599                                ret = 1;
5600                                break;
5601                        } else if (val64 & SPI_CONTROL_DONE) {
5602                                *data = readq(&bar0->spi_data);
5603                                *data &= 0xffffff;
5604                                ret = 0;
5605                                break;
5606                        }
5607                        msleep(50);
5608                        exit_cnt++;
5609                }
5610        }
5611        return ret;
5612}
5613
5614/**
5615 *  write_eeprom - actually writes the relevant part of the data value.
5616 *  @sp : private member of the device structure, which is a pointer to the
5617 *       s2io_nic structure.
5618 *  @off : offset at which the data must be written
5619 *  @data : The data that is to be written
5620 *  @cnt : Number of bytes of the data that are actually to be written into
5621 *  the Eeprom. (max of 3)
5622 * Description:
5623 *  Actually writes the relevant part of the data value into the Eeprom
5624 *  through the I2C bus.
5625 * Return value:
5626 *  0 on success, -1 on failure.
5627 */
5628
5629static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5630{
5631        int exit_cnt = 0, ret = -1;
5632        u64 val64;
5633        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5634
5635        if (sp->device_type == XFRAME_I_DEVICE) {
5636                val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5637                        I2C_CONTROL_ADDR(off) |
5638                        I2C_CONTROL_BYTE_CNT(cnt) |
5639                        I2C_CONTROL_SET_DATA((u32)data) |
5640                        I2C_CONTROL_CNTL_START;
5641                SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5642
5643                while (exit_cnt < 5) {
5644                        val64 = readq(&bar0->i2c_control);
5645                        if (I2C_CONTROL_CNTL_END(val64)) {
5646                                if (!(val64 & I2C_CONTROL_NACK))
5647                                        ret = 0;
5648                                break;
5649                        }
5650                        msleep(50);
5651                        exit_cnt++;
5652                }
5653        }
5654
5655        if (sp->device_type == XFRAME_II_DEVICE) {
5656                int write_cnt = (cnt == 8) ? 0 : cnt;
5657                writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5658
5659                val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5660                        SPI_CONTROL_BYTECNT(write_cnt) |
5661                        SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5662                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5663                val64 |= SPI_CONTROL_REQ;
5664                SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5665                while (exit_cnt < 5) {
5666                        val64 = readq(&bar0->spi_control);
5667                        if (val64 & SPI_CONTROL_NACK) {
5668                                ret = 1;
5669                                break;
5670                        } else if (val64 & SPI_CONTROL_DONE) {
5671                                ret = 0;
5672                                break;
5673                        }
5674                        msleep(50);
5675                        exit_cnt++;
5676                }
5677        }
5678        return ret;
5679}
5680static void s2io_vpd_read(struct s2io_nic *nic)
5681{
5682        u8 *vpd_data;
5683        u8 data;
5684        int i = 0, cnt, len, fail = 0;
5685        int vpd_addr = 0x80;
5686        struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5687
5688        if (nic->device_type == XFRAME_II_DEVICE) {
5689                strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5690                vpd_addr = 0x80;
5691        } else {
5692                strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5693                vpd_addr = 0x50;
5694        }
5695        strcpy(nic->serial_num, "NOT AVAILABLE");
5696
5697        vpd_data = kmalloc(256, GFP_KERNEL);
5698        if (!vpd_data) {
5699                swstats->mem_alloc_fail_cnt++;
5700                return;
5701        }
5702        swstats->mem_allocated += 256;
5703
5704        for (i = 0; i < 256; i += 4) {
5705                pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5706                pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5707                pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5708                for (cnt = 0; cnt < 5; cnt++) {
5709                        msleep(2);
5710                        pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5711                        if (data == 0x80)
5712                                break;
5713                }
5714                if (cnt >= 5) {
5715                        DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5716                        fail = 1;
5717                        break;
5718                }
5719                pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5720                                      (u32 *)&vpd_data[i]);
5721        }
5722
5723        if (!fail) {
5724                /* read serial number of adapter */
5725                for (cnt = 0; cnt < 252; cnt++) {
5726                        if ((vpd_data[cnt] == 'S') &&
5727                            (vpd_data[cnt+1] == 'N')) {
5728                                len = vpd_data[cnt+2];
5729                                if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5730                                        memcpy(nic->serial_num,
5731                                               &vpd_data[cnt + 3],
5732                                               len);
5733                                        memset(nic->serial_num+len,
5734                                               0,
5735                                               VPD_STRING_LEN-len);
5736                                        break;
5737                                }
5738                        }
5739                }
5740        }
5741
5742        if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5743                len = vpd_data[1];
5744                memcpy(nic->product_name, &vpd_data[3], len);
5745                nic->product_name[len] = 0;
5746        }
5747        kfree(vpd_data);
5748        swstats->mem_freed += 256;
5749}
5750
5751/**
5752 *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5753 *  @dev: pointer to netdev
5754 *  @eeprom : pointer to the user level structure provided by ethtool,
5755 *  containing all relevant information.
5756 *  @data_buf : user defined value to be written into Eeprom.
5757 *  Description: Reads the values stored in the Eeprom at given offset
5758 *  for a given length. Stores these values int the input argument data
5759 *  buffer 'data_buf' and returns these to the caller (ethtool.)
5760 *  Return value:
5761 *  int  0 on success
5762 */
5763
5764static int s2io_ethtool_geeprom(struct net_device *dev,
5765                                struct ethtool_eeprom *eeprom, u8 * data_buf)
5766{
5767        u32 i, valid;
5768        u64 data;
5769        struct s2io_nic *sp = netdev_priv(dev);
5770
5771        eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5772
5773        if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5774                eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5775
5776        for (i = 0; i < eeprom->len; i += 4) {
5777                if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5778                        DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5779                        return -EFAULT;
5780                }
5781                valid = INV(data);
5782                memcpy((data_buf + i), &valid, 4);
5783        }
5784        return 0;
5785}
5786
5787/**
5788 *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5789 *  @dev: pointer to netdev
5790 *  @eeprom : pointer to the user level structure provided by ethtool,
5791 *  containing all relevant information.
5792 *  @data_buf : user defined value to be written into Eeprom.
5793 *  Description:
5794 *  Tries to write the user provided value in the Eeprom, at the offset
5795 *  given by the user.
5796 *  Return value:
5797 *  0 on success, -EFAULT on failure.
5798 */
5799
5800static int s2io_ethtool_seeprom(struct net_device *dev,
5801                                struct ethtool_eeprom *eeprom,
5802                                u8 *data_buf)
5803{
5804        int len = eeprom->len, cnt = 0;
5805        u64 valid = 0, data;
5806        struct s2io_nic *sp = netdev_priv(dev);
5807
5808        if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5809                DBG_PRINT(ERR_DBG,
5810                          "ETHTOOL_WRITE_EEPROM Err: "
5811                          "Magic value is wrong, it is 0x%x should be 0x%x\n",
5812                          (sp->pdev->vendor | (sp->pdev->device << 16)),
5813                          eeprom->magic);
5814                return -EFAULT;
5815        }
5816
5817        while (len) {
5818                data = (u32)data_buf[cnt] & 0x000000FF;
5819                if (data)
5820                        valid = (u32)(data << 24);
5821                else
5822                        valid = data;
5823
5824                if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5825                        DBG_PRINT(ERR_DBG,
5826                                  "ETHTOOL_WRITE_EEPROM Err: "
5827                                  "Cannot write into the specified offset\n");
5828                        return -EFAULT;
5829                }
5830                cnt++;
5831                len--;
5832        }
5833
5834        return 0;
5835}
5836
5837/**
5838 * s2io_register_test - reads and writes into all clock domains.
5839 * @sp : private member of the device structure, which is a pointer to the
5840 * s2io_nic structure.
5841 * @data : variable that returns the result of each of the test conducted b
5842 * by the driver.
5843 * Description:
5844 * Read and write into all clock domains. The NIC has 3 clock domains,
5845 * see that registers in all the three regions are accessible.
5846 * Return value:
5847 * 0 on success.
5848 */
5849
5850static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5851{
5852        struct XENA_dev_config __iomem *bar0 = sp->bar0;
5853        u64 val64 = 0, exp_val;
5854        int fail = 0;
5855
5856        val64 = readq(&bar0->pif_rd_swapper_fb);
5857        if (val64 != 0x123456789abcdefULL) {
5858                fail = 1;
5859                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5860        }
5861
5862        val64 = readq(&bar0->rmac_pause_cfg);
5863        if (val64 != 0xc000ffff00000000ULL) {
5864                fail = 1;
5865                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5866        }
5867
5868        val64 = readq(&bar0->rx_queue_cfg);
5869        if (sp->device_type == XFRAME_II_DEVICE)
5870                exp_val = 0x0404040404040404ULL;
5871        else
5872                exp_val = 0x0808080808080808ULL;
5873        if (val64 != exp_val) {
5874                fail = 1;
5875                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5876        }
5877
5878        val64 = readq(&bar0->xgxs_efifo_cfg);
5879        if (val64 != 0x000000001923141EULL) {
5880                fail = 1;
5881                DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5882        }
5883
5884        val64 = 0x5A5A5A5A5A5A5A5AULL;
5885        writeq(val64, &bar0->xmsi_data);
5886        val64 = readq(&bar0->xmsi_data);
5887        if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5888                fail = 1;
5889                DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5890        }
5891
5892        val64 = 0xA5A5A5A5A5A5A5A5ULL;
5893        writeq(val64, &bar0->xmsi_data);
5894        val64 = readq(&bar0->xmsi_data);
5895        if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5896                fail = 1;
5897                DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5898        }
5899
5900        *data = fail;
5901        return fail;
5902}
5903
5904/**
5905 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5906 * @sp : private member of the device structure, which is a pointer to the
5907 * s2io_nic structure.
5908 * @data:variable that returns the result of each of the test conducted by
5909 * the driver.
5910 * Description:
5911 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5912 * register.
5913 * Return value:
5914 * 0 on success.
5915 */
5916
5917static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5918{
5919        int fail = 0;
5920        u64 ret_data, org_4F0, org_7F0;
5921        u8 saved_4F0 = 0, saved_7F0 = 0;
5922        struct net_device *dev = sp->dev;
5923
5924        /* Test Write Error at offset 0 */
5925        /* Note that SPI interface allows write access to all areas
5926         * of EEPROM. Hence doing all negative testing only for Xframe I.
5927         */
5928        if (sp->device_type == XFRAME_I_DEVICE)
5929                if (!write_eeprom(sp, 0, 0, 3))
5930                        fail = 1;
5931
5932        /* Save current values at offsets 0x4F0 and 0x7F0 */
5933        if (!read_eeprom(sp, 0x4F0, &org_4F0))
5934                saved_4F0 = 1;
5935        if (!read_eeprom(sp, 0x7F0, &org_7F0))
5936                saved_7F0 = 1;
5937
5938        /* Test Write at offset 4f0 */
5939        if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5940                fail = 1;
5941        if (read_eeprom(sp, 0x4F0, &ret_data))
5942                fail = 1;
5943
5944        if (ret_data != 0x012345) {
5945                DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5946                          "Data written %llx Data read %llx\n",
5947                          dev->name, (unsigned long long)0x12345,
5948                          (unsigned long long)ret_data);
5949                fail = 1;
5950        }
5951
5952        /* Reset the EEPROM data go FFFF */
5953        write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5954
5955        /* Test Write Request Error at offset 0x7c */
5956        if (sp->device_type == XFRAME_I_DEVICE)
5957                if (!write_eeprom(sp, 0x07C, 0, 3))
5958                        fail = 1;
5959
5960        /* Test Write Request at offset 0x7f0 */
5961        if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5962                fail = 1;
5963        if (read_eeprom(sp, 0x7F0, &ret_data))
5964                fail = 1;
5965
5966        if (ret_data != 0x012345) {
5967                DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5968                          "Data written %llx Data read %llx\n",
5969                          dev->name, (unsigned long long)0x12345,
5970                          (unsigned long long)ret_data);
5971                fail = 1;
5972        }
5973
5974        /* Reset the EEPROM data go FFFF */
5975        write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5976
5977        if (sp->device_type == XFRAME_I_DEVICE) {
5978                /* Test Write Error at offset 0x80 */
5979                if (!write_eeprom(sp, 0x080, 0, 3))
5980                        fail = 1;
5981
5982                /* Test Write Error at offset 0xfc */
5983                if (!write_eeprom(sp, 0x0FC, 0, 3))
5984                        fail = 1;
5985
5986                /* Test Write Error at offset 0x100 */
5987                if (!write_eeprom(sp, 0x100, 0, 3))
5988                        fail = 1;
5989
5990                /* Test Write Error at offset 4ec */
5991                if (!write_eeprom(sp, 0x4EC, 0, 3))
5992                        fail = 1;
5993        }
5994
5995        /* Restore values at offsets 0x4F0 and 0x7F0 */
5996        if (saved_4F0)
5997                write_eeprom(sp, 0x4F0, org_4F0, 3);
5998        if (saved_7F0)
5999                write_eeprom(sp, 0x7F0, org_7F0, 3);
6000
6001        *data = fail;
6002        return fail;
6003}
6004
6005/**
6006 * s2io_bist_test - invokes the MemBist test of the card .
6007 * @sp : private member of the device structure, which is a pointer to the
6008 * s2io_nic structure.
6009 * @data:variable that returns the result of each of the test conducted by
6010 * the driver.
6011 * Description:
6012 * This invokes the MemBist test of the card. We give around
6013 * 2 secs time for the Test to complete. If it's still not complete
6014 * within this peiod, we consider that the test failed.
6015 * Return value:
6016 * 0 on success and -1 on failure.
6017 */
6018
6019static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6020{
6021        u8 bist = 0;
6022        int cnt = 0, ret = -1;
6023
6024        pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6025        bist |= PCI_BIST_START;
6026        pci_write_config_word(sp->pdev, PCI_BIST, bist);
6027
6028        while (cnt < 20) {
6029                pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6030                if (!(bist & PCI_BIST_START)) {
6031                        *data = (bist & PCI_BIST_CODE_MASK);
6032                        ret = 0;
6033                        break;
6034                }
6035                msleep(100);
6036                cnt++;
6037        }
6038
6039        return ret;
6040}
6041
6042/**
6043 * s2io_link_test - verifies the link state of the nic
6044 * @sp: private member of the device structure, which is a pointer to the
6045 * s2io_nic structure.
6046 * @data: variable that returns the result of each of the test conducted by
6047 * the driver.
6048 * Description:
6049 * The function verifies the link state of the NIC and updates the input
6050 * argument 'data' appropriately.
6051 * Return value:
6052 * 0 on success.
6053 */
6054
6055static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6056{
6057        struct XENA_dev_config __iomem *bar0 = sp->bar0;
6058        u64 val64;
6059
6060        val64 = readq(&bar0->adapter_status);
6061        if (!(LINK_IS_UP(val64)))
6062                *data = 1;
6063        else
6064                *data = 0;
6065
6066        return *data;
6067}
6068
6069/**
6070 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6071 * @sp: private member of the device structure, which is a pointer to the
6072 * s2io_nic structure.
6073 * @data: variable that returns the result of each of the test
6074 * conducted by the driver.
6075 * Description:
6076 *  This is one of the offline test that tests the read and write
6077 *  access to the RldRam chip on the NIC.
6078 * Return value:
6079 *  0 on success.
6080 */
6081
6082static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6083{
6084        struct XENA_dev_config __iomem *bar0 = sp->bar0;
6085        u64 val64;
6086        int cnt, iteration = 0, test_fail = 0;
6087
6088        val64 = readq(&bar0->adapter_control);
6089        val64 &= ~ADAPTER_ECC_EN;
6090        writeq(val64, &bar0->adapter_control);
6091
6092        val64 = readq(&bar0->mc_rldram_test_ctrl);
6093        val64 |= MC_RLDRAM_TEST_MODE;
6094        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6095
6096        val64 = readq(&bar0->mc_rldram_mrs);
6097        val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6098        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6099
6100        val64 |= MC_RLDRAM_MRS_ENABLE;
6101        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6102
6103        while (iteration < 2) {
6104                val64 = 0x55555555aaaa0000ULL;
6105                if (iteration == 1)
6106                        val64 ^= 0xFFFFFFFFFFFF0000ULL;
6107                writeq(val64, &bar0->mc_rldram_test_d0);
6108
6109                val64 = 0xaaaa5a5555550000ULL;
6110                if (iteration == 1)
6111                        val64 ^= 0xFFFFFFFFFFFF0000ULL;
6112                writeq(val64, &bar0->mc_rldram_test_d1);
6113
6114                val64 = 0x55aaaaaaaa5a0000ULL;
6115                if (iteration == 1)
6116                        val64 ^= 0xFFFFFFFFFFFF0000ULL;
6117                writeq(val64, &bar0->mc_rldram_test_d2);
6118
6119                val64 = (u64) (0x0000003ffffe0100ULL);
6120                writeq(val64, &bar0->mc_rldram_test_add);
6121
6122                val64 = MC_RLDRAM_TEST_MODE |
6123                        MC_RLDRAM_TEST_WRITE |
6124                        MC_RLDRAM_TEST_GO;
6125                SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6126
6127                for (cnt = 0; cnt < 5; cnt++) {
6128                        val64 = readq(&bar0->mc_rldram_test_ctrl);
6129                        if (val64 & MC_RLDRAM_TEST_DONE)
6130                                break;
6131                        msleep(200);
6132                }
6133
6134                if (cnt == 5)
6135                        break;
6136
6137                val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6138                SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6139
6140                for (cnt = 0; cnt < 5; cnt++) {
6141                        val64 = readq(&bar0->mc_rldram_test_ctrl);
6142                        if (val64 & MC_RLDRAM_TEST_DONE)
6143                                break;
6144                        msleep(500);
6145                }
6146
6147                if (cnt == 5)
6148                        break;
6149
6150                val64 = readq(&bar0->mc_rldram_test_ctrl);
6151                if (!(val64 & MC_RLDRAM_TEST_PASS))
6152                        test_fail = 1;
6153
6154                iteration++;
6155        }
6156
6157        *data = test_fail;
6158
6159        /* Bring the adapter out of test mode */
6160        SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6161
6162        return test_fail;
6163}
6164
6165/**
6166 *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6167 *  @dev: pointer to netdev
6168 *  @ethtest : pointer to a ethtool command specific structure that will be
6169 *  returned to the user.
6170 *  @data : variable that returns the result of each of the test
6171 * conducted by the driver.
6172 * Description:
6173 *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6174 *  the health of the card.
6175 * Return value:
6176 *  void
6177 */
6178
6179static void s2io_ethtool_test(struct net_device *dev,
6180                              struct ethtool_test *ethtest,
6181                              uint64_t *data)
6182{
6183        struct s2io_nic *sp = netdev_priv(dev);
6184        int orig_state = netif_running(sp->dev);
6185
6186        if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6187                /* Offline Tests. */
6188                if (orig_state)
6189                        s2io_close(sp->dev);
6190
6191                if (s2io_register_test(sp, &data[0]))
6192                        ethtest->flags |= ETH_TEST_FL_FAILED;
6193
6194                s2io_reset(sp);
6195
6196                if (s2io_rldram_test(sp, &data[3]))
6197                        ethtest->flags |= ETH_TEST_FL_FAILED;
6198
6199                s2io_reset(sp);
6200
6201                if (s2io_eeprom_test(sp, &data[1]))
6202                        ethtest->flags |= ETH_TEST_FL_FAILED;
6203
6204                if (s2io_bist_test(sp, &data[4]))
6205                        ethtest->flags |= ETH_TEST_FL_FAILED;
6206
6207                if (orig_state)
6208                        s2io_open(sp->dev);
6209
6210                data[2] = 0;
6211        } else {
6212                /* Online Tests. */
6213                if (!orig_state) {
6214                        DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6215                                  dev->name);
6216                        data[0] = -1;
6217                        data[1] = -1;
6218                        data[2] = -1;
6219                        data[3] = -1;
6220                        data[4] = -1;
6221                }
6222
6223                if (s2io_link_test(sp, &data[2]))
6224                        ethtest->flags |= ETH_TEST_FL_FAILED;
6225
6226                data[0] = 0;
6227                data[1] = 0;
6228                data[3] = 0;
6229                data[4] = 0;
6230        }
6231}
6232
6233static void s2io_get_ethtool_stats(struct net_device *dev,
6234                                   struct ethtool_stats *estats,
6235                                   u64 *tmp_stats)
6236{
6237        int i = 0, k;
6238        struct s2io_nic *sp = netdev_priv(dev);
6239        struct stat_block *stats = sp->mac_control.stats_info;
6240        struct swStat *swstats = &stats->sw_stat;
6241        struct xpakStat *xstats = &stats->xpak_stat;
6242
6243        s2io_updt_stats(sp);
6244        tmp_stats[i++] =
6245                (u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6246                le32_to_cpu(stats->tmac_frms);
6247        tmp_stats[i++] =
6248                (u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6249                le32_to_cpu(stats->tmac_data_octets);
6250        tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6251        tmp_stats[i++] =
6252                (u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6253                le32_to_cpu(stats->tmac_mcst_frms);
6254        tmp_stats[i++] =
6255                (u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6256                le32_to_cpu(stats->tmac_bcst_frms);
6257        tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6258        tmp_stats[i++] =
6259                (u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6260                le32_to_cpu(stats->tmac_ttl_octets);
6261        tmp_stats[i++] =
6262                (u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6263                le32_to_cpu(stats->tmac_ucst_frms);
6264        tmp_stats[i++] =
6265                (u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6266                le32_to_cpu(stats->tmac_nucst_frms);
6267        tmp_stats[i++] =
6268                (u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6269                le32_to_cpu(stats->tmac_any_err_frms);
6270        tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6271        tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6272        tmp_stats[i++] =
6273                (u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6274                le32_to_cpu(stats->tmac_vld_ip);
6275        tmp_stats[i++] =
6276                (u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6277                le32_to_cpu(stats->tmac_drop_ip);
6278        tmp_stats[i++] =
6279                (u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6280                le32_to_cpu(stats->tmac_icmp);
6281        tmp_stats[i++] =
6282                (u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6283                le32_to_cpu(stats->tmac_rst_tcp);
6284        tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6285        tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6286                le32_to_cpu(stats->tmac_udp);
6287        tmp_stats[i++] =
6288                (u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6289                le32_to_cpu(stats->rmac_vld_frms);
6290        tmp_stats[i++] =
6291                (u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6292                le32_to_cpu(stats->rmac_data_octets);
6293        tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6294        tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6295        tmp_stats[i++] =
6296                (u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6297                le32_to_cpu(stats->rmac_vld_mcst_frms);
6298        tmp_stats[i++] =
6299                (u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6300                le32_to_cpu(stats->rmac_vld_bcst_frms);
6301        tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6302        tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6303        tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6304        tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6305        tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6306        tmp_stats[i++] =
6307                (u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6308                le32_to_cpu(stats->rmac_ttl_octets);
6309        tmp_stats[i++] =
6310                (u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6311                | le32_to_cpu(stats->rmac_accepted_ucst_frms);
6312        tmp_stats[i++] =
6313                (u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6314                << 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6315        tmp_stats[i++] =
6316                (u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6317                le32_to_cpu(stats->rmac_discarded_frms);
6318        tmp_stats[i++] =
6319                (u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6320                << 32 | le32_to_cpu(stats->rmac_drop_events);
6321        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6322        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6323        tmp_stats[i++] =
6324                (u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6325                le32_to_cpu(stats->rmac_usized_frms);
6326        tmp_stats[i++] =
6327                (u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6328                le32_to_cpu(stats->rmac_osized_frms);
6329        tmp_stats[i++] =
6330                (u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6331                le32_to_cpu(stats->rmac_frag_frms);
6332        tmp_stats[i++] =
6333                (u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6334                le32_to_cpu(stats->rmac_jabber_frms);
6335        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6336        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6337        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6338        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6339        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6340        tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6341        tmp_stats[i++] =
6342                (u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6343                le32_to_cpu(stats->rmac_ip);
6344        tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6345        tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6346        tmp_stats[i++] =
6347                (u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6348                le32_to_cpu(stats->rmac_drop_ip);
6349        tmp_stats[i++] =
6350                (u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6351                le32_to_cpu(stats->rmac_icmp);
6352        tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6353        tmp_stats[i++] =
6354                (u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6355                le32_to_cpu(stats->rmac_udp);
6356        tmp_stats[i++] =
6357                (u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6358                le32_to_cpu(stats->rmac_err_drp_udp);
6359        tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6360        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6361        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6362        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6363        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6364        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6365        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6366        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6367        tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6368        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6369        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6370        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6371        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6372        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6373        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6374        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6375        tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6376        tmp_stats[i++] =
6377                (u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6378                le32_to_cpu(stats->rmac_pause_cnt);
6379        tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6380        tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6381        tmp_stats[i++] =
6382                (u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6383                le32_to_cpu(stats->rmac_accepted_ip);
6384        tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6385        tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6386        tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6387        tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6388        tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6389        tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6390        tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6391        tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6392        tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6393        tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6394        tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6395        tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6396        tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6397        tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6398        tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6399        tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6400        tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6401        tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6402        tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6403
6404        /* Enhanced statistics exist only for Hercules */
6405        if (sp->device_type == XFRAME_II_DEVICE) {
6406                tmp_stats[i++] =
6407                        le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6408                tmp_stats[i++] =
6409                        le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6410                tmp_stats[i++] =
6411                        le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6412                tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6413                tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6414                tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6415                tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6416                tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6417                tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6418                tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6419                tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6420                tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6421                tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6422                tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6423                tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6424                tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6425        }
6426
6427        tmp_stats[i++] = 0;
6428        tmp_stats[i++] = swstats->single_ecc_errs;
6429        tmp_stats[i++] = swstats->double_ecc_errs;
6430        tmp_stats[i++] = swstats->parity_err_cnt;
6431        tmp_stats[i++] = swstats->serious_err_cnt;
6432        tmp_stats[i++] = swstats->soft_reset_cnt;
6433        tmp_stats[i++] = swstats->fifo_full_cnt;
6434        for (k = 0; k < MAX_RX_RINGS; k++)
6435                tmp_stats[i++] = swstats->ring_full_cnt[k];
6436        tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6437        tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6438        tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6439        tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6440        tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6441        tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6442        tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6443        tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6444        tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6445        tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6446        tmp_stats[i++] = xstats->warn_laser_output_power_high;
6447        tmp_stats[i++] = xstats->warn_laser_output_power_low;
6448        tmp_stats[i++] = swstats->clubbed_frms_cnt;
6449        tmp_stats[i++] = swstats->sending_both;
6450        tmp_stats[i++] = swstats->outof_sequence_pkts;
6451        tmp_stats[i++] = swstats->flush_max_pkts;
6452        if (swstats->num_aggregations) {
6453                u64 tmp = swstats->sum_avg_pkts_aggregated;
6454                int count = 0;
6455                /*
6456                 * Since 64-bit divide does not work on all platforms,
6457                 * do repeated subtraction.
6458                 */
6459                while (tmp >= swstats->num_aggregations) {
6460                        tmp -= swstats->num_aggregations;
6461                        count++;
6462                }
6463                tmp_stats[i++] = count;
6464        } else
6465                tmp_stats[i++] = 0;
6466        tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6467        tmp_stats[i++] = swstats->pci_map_fail_cnt;
6468        tmp_stats[i++] = swstats->watchdog_timer_cnt;
6469        tmp_stats[i++] = swstats->mem_allocated;
6470        tmp_stats[i++] = swstats->mem_freed;
6471        tmp_stats[i++] = swstats->link_up_cnt;
6472        tmp_stats[i++] = swstats->link_down_cnt;
6473        tmp_stats[i++] = swstats->link_up_time;
6474        tmp_stats[i++] = swstats->link_down_time;
6475
6476        tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6477        tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6478        tmp_stats[i++] = swstats->tx_parity_err_cnt;
6479        tmp_stats[i++] = swstats->tx_link_loss_cnt;
6480        tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6481
6482        tmp_stats[i++] = swstats->rx_parity_err_cnt;
6483        tmp_stats[i++] = swstats->rx_abort_cnt;
6484        tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6485        tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6486        tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6487        tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6488        tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6489        tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6490        tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6491        tmp_stats[i++] = swstats->tda_err_cnt;
6492        tmp_stats[i++] = swstats->pfc_err_cnt;
6493        tmp_stats[i++] = swstats->pcc_err_cnt;
6494        tmp_stats[i++] = swstats->tti_err_cnt;
6495        tmp_stats[i++] = swstats->tpa_err_cnt;
6496        tmp_stats[i++] = swstats->sm_err_cnt;
6497        tmp_stats[i++] = swstats->lso_err_cnt;
6498        tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6499        tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6500        tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6501        tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6502        tmp_stats[i++] = swstats->rc_err_cnt;
6503        tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6504        tmp_stats[i++] = swstats->rpa_err_cnt;
6505        tmp_stats[i++] = swstats->rda_err_cnt;
6506        tmp_stats[i++] = swstats->rti_err_cnt;
6507        tmp_stats[i++] = swstats->mc_err_cnt;
6508}
6509
6510static int s2io_ethtool_get_regs_len(struct net_device *dev)
6511{
6512        return XENA_REG_SPACE;
6513}
6514
6515
6516static int s2io_get_eeprom_len(struct net_device *dev)
6517{
6518        return XENA_EEPROM_SPACE;
6519}
6520
6521static int s2io_get_sset_count(struct net_device *dev, int sset)
6522{
6523        struct s2io_nic *sp = netdev_priv(dev);
6524
6525        switch (sset) {
6526        case ETH_SS_TEST:
6527                return S2IO_TEST_LEN;
6528        case ETH_SS_STATS:
6529                switch (sp->device_type) {
6530                case XFRAME_I_DEVICE:
6531                        return XFRAME_I_STAT_LEN;
6532                case XFRAME_II_DEVICE:
6533                        return XFRAME_II_STAT_LEN;
6534                default:
6535                        return 0;
6536                }
6537        default:
6538                return -EOPNOTSUPP;
6539        }
6540}
6541
6542static void s2io_ethtool_get_strings(struct net_device *dev,
6543                                     u32 stringset, u8 *data)
6544{
6545        int stat_size = 0;
6546        struct s2io_nic *sp = netdev_priv(dev);
6547
6548        switch (stringset) {
6549        case ETH_SS_TEST:
6550                memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6551                break;
6552        case ETH_SS_STATS:
6553                stat_size = sizeof(ethtool_xena_stats_keys);
6554                memcpy(data, &ethtool_xena_stats_keys, stat_size);
6555                if (sp->device_type == XFRAME_II_DEVICE) {
6556                        memcpy(data + stat_size,
6557                               &ethtool_enhanced_stats_keys,
6558                               sizeof(ethtool_enhanced_stats_keys));
6559                        stat_size += sizeof(ethtool_enhanced_stats_keys);
6560                }
6561
6562                memcpy(data + stat_size, &ethtool_driver_stats_keys,
6563                       sizeof(ethtool_driver_stats_keys));
6564        }
6565}
6566
6567static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6568{
6569        struct s2io_nic *sp = netdev_priv(dev);
6570        netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6571
6572        if (changed && netif_running(dev)) {
6573                int rc;
6574
6575                s2io_stop_all_tx_queue(sp);
6576                s2io_card_down(sp);
6577                dev->features = features;
6578                rc = s2io_card_up(sp);
6579                if (rc)
6580                        s2io_reset(sp);
6581                else
6582                        s2io_start_all_tx_queue(sp);
6583
6584                return rc ? rc : 1;
6585        }
6586
6587        return 0;
6588}
6589
6590static const struct ethtool_ops netdev_ethtool_ops = {
6591        .get_drvinfo = s2io_ethtool_gdrvinfo,
6592        .get_regs_len = s2io_ethtool_get_regs_len,
6593        .get_regs = s2io_ethtool_gregs,
6594        .get_link = ethtool_op_get_link,
6595        .get_eeprom_len = s2io_get_eeprom_len,
6596        .get_eeprom = s2io_ethtool_geeprom,
6597        .set_eeprom = s2io_ethtool_seeprom,
6598        .get_ringparam = s2io_ethtool_gringparam,
6599        .get_pauseparam = s2io_ethtool_getpause_data,
6600        .set_pauseparam = s2io_ethtool_setpause_data,
6601        .self_test = s2io_ethtool_test,
6602        .get_strings = s2io_ethtool_get_strings,
6603        .set_phys_id = s2io_ethtool_set_led,
6604        .get_ethtool_stats = s2io_get_ethtool_stats,
6605        .get_sset_count = s2io_get_sset_count,
6606        .get_link_ksettings = s2io_ethtool_get_link_ksettings,
6607        .set_link_ksettings = s2io_ethtool_set_link_ksettings,
6608};
6609
6610/**
6611 *  s2io_ioctl - Entry point for the Ioctl
6612 *  @dev :  Device pointer.
6613 *  @rq :  An IOCTL specefic structure, that can contain a pointer to
6614 *  a proprietary structure used to pass information to the driver.
6615 *  @cmd :  This is used to distinguish between the different commands that
6616 *  can be passed to the IOCTL functions.
6617 *  Description:
6618 *  Currently there are no special functionality supported in IOCTL, hence
6619 *  function always return EOPNOTSUPPORTED
6620 */
6621
6622static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6623{
6624        return -EOPNOTSUPP;
6625}
6626
6627/**
6628 *  s2io_change_mtu - entry point to change MTU size for the device.
6629 *   @dev : device pointer.
6630 *   @new_mtu : the new MTU size for the device.
6631 *   Description: A driver entry point to change MTU size for the device.
6632 *   Before changing the MTU the device must be stopped.
6633 *  Return value:
6634 *   0 on success and an appropriate (-)ve integer as defined in errno.h
6635 *   file on failure.
6636 */
6637
6638static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6639{
6640        struct s2io_nic *sp = netdev_priv(dev);
6641        int ret = 0;
6642
6643        dev->mtu = new_mtu;
6644        if (netif_running(dev)) {
6645                s2io_stop_all_tx_queue(sp);
6646                s2io_card_down(sp);
6647                ret = s2io_card_up(sp);
6648                if (ret) {
6649                        DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6650                                  __func__);
6651                        return ret;
6652                }
6653                s2io_wake_all_tx_queue(sp);
6654        } else { /* Device is down */
6655                struct XENA_dev_config __iomem *bar0 = sp->bar0;
6656                u64 val64 = new_mtu;
6657
6658                writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6659        }
6660
6661        return ret;
6662}
6663
6664/**
6665 * s2io_set_link - Set the LInk status
6666 * @work: work struct containing a pointer to device private structure
6667 * Description: Sets the link status for the adapter
6668 */
6669
6670static void s2io_set_link(struct work_struct *work)
6671{
6672        struct s2io_nic *nic = container_of(work, struct s2io_nic,
6673                                            set_link_task);
6674        struct net_device *dev = nic->dev;
6675        struct XENA_dev_config __iomem *bar0 = nic->bar0;
6676        register u64 val64;
6677        u16 subid;
6678
6679        rtnl_lock();
6680
6681        if (!netif_running(dev))
6682                goto out_unlock;
6683
6684        if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6685                /* The card is being reset, no point doing anything */
6686                goto out_unlock;
6687        }
6688
6689        subid = nic->pdev->subsystem_device;
6690        if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6691                /*
6692                 * Allow a small delay for the NICs self initiated
6693                 * cleanup to complete.
6694                 */
6695                msleep(100);
6696        }
6697
6698        val64 = readq(&bar0->adapter_status);
6699        if (LINK_IS_UP(val64)) {
6700                if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6701                        if (verify_xena_quiescence(nic)) {
6702                                val64 = readq(&bar0->adapter_control);
6703                                val64 |= ADAPTER_CNTL_EN;
6704                                writeq(val64, &bar0->adapter_control);
6705                                if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6706                                            nic->device_type, subid)) {
6707                                        val64 = readq(&bar0->gpio_control);
6708                                        val64 |= GPIO_CTRL_GPIO_0;
6709                                        writeq(val64, &bar0->gpio_control);
6710                                        val64 = readq(&bar0->gpio_control);
6711                                } else {
6712                                        val64 |= ADAPTER_LED_ON;
6713                                        writeq(val64, &bar0->adapter_control);
6714                                }
6715                                nic->device_enabled_once = true;
6716                        } else {
6717                                DBG_PRINT(ERR_DBG,
6718                                          "%s: Error: device is not Quiescent\n",
6719                                          dev->name);
6720                                s2io_stop_all_tx_queue(nic);
6721                        }
6722                }
6723                val64 = readq(&bar0->adapter_control);
6724                val64 |= ADAPTER_LED_ON;
6725                writeq(val64, &bar0->adapter_control);
6726                s2io_link(nic, LINK_UP);
6727        } else {
6728                if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6729                                                      subid)) {
6730                        val64 = readq(&bar0->gpio_control);
6731                        val64 &= ~GPIO_CTRL_GPIO_0;
6732                        writeq(val64, &bar0->gpio_control);
6733                        val64 = readq(&bar0->gpio_control);
6734                }
6735                /* turn off LED */
6736                val64 = readq(&bar0->adapter_control);
6737                val64 = val64 & (~ADAPTER_LED_ON);
6738                writeq(val64, &bar0->adapter_control);
6739                s2io_link(nic, LINK_DOWN);
6740        }
6741        clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6742
6743out_unlock:
6744        rtnl_unlock();
6745}
6746
6747static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6748                                  struct buffAdd *ba,
6749                                  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6750                                  u64 *temp2, int size)
6751{
6752        struct net_device *dev = sp->dev;
6753        struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6754
6755        if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6756                struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6757                /* allocate skb */
6758                if (*skb) {
6759                        DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6760                        /*
6761                         * As Rx frame are not going to be processed,
6762                         * using same mapped address for the Rxd
6763                         * buffer pointer
6764                         */
6765                        rxdp1->Buffer0_ptr = *temp0;
6766                } else {
6767                        *skb = netdev_alloc_skb(dev, size);
6768                        if (!(*skb)) {
6769                                DBG_PRINT(INFO_DBG,
6770                                          "%s: Out of memory to allocate %s\n",
6771                                          dev->name, "1 buf mode SKBs");
6772                                stats->mem_alloc_fail_cnt++;
6773                                return -ENOMEM ;
6774                        }
6775                        stats->mem_allocated += (*skb)->truesize;
6776                        /* storing the mapped addr in a temp variable
6777                         * such it will be used for next rxd whose
6778                         * Host Control is NULL
6779                         */
6780                        rxdp1->Buffer0_ptr = *temp0 =
6781                                dma_map_single(&sp->pdev->dev, (*skb)->data,
6782                                               size - NET_IP_ALIGN,
6783                                               DMA_FROM_DEVICE);
6784                        if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6785                                goto memalloc_failed;
6786                        rxdp->Host_Control = (unsigned long) (*skb);
6787                }
6788        } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6789                struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6790                /* Two buffer Mode */
6791                if (*skb) {
6792                        rxdp3->Buffer2_ptr = *temp2;
6793                        rxdp3->Buffer0_ptr = *temp0;
6794                        rxdp3->Buffer1_ptr = *temp1;
6795                } else {
6796                        *skb = netdev_alloc_skb(dev, size);
6797                        if (!(*skb)) {
6798                                DBG_PRINT(INFO_DBG,
6799                                          "%s: Out of memory to allocate %s\n",
6800                                          dev->name,
6801                                          "2 buf mode SKBs");
6802                                stats->mem_alloc_fail_cnt++;
6803                                return -ENOMEM;
6804                        }
6805                        stats->mem_allocated += (*skb)->truesize;
6806                        rxdp3->Buffer2_ptr = *temp2 =
6807                                dma_map_single(&sp->pdev->dev, (*skb)->data,
6808                                               dev->mtu + 4, DMA_FROM_DEVICE);
6809                        if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6810                                goto memalloc_failed;
6811                        rxdp3->Buffer0_ptr = *temp0 =
6812                                dma_map_single(&sp->pdev->dev, ba->ba_0,
6813                                               BUF0_LEN, DMA_FROM_DEVICE);
6814                        if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6815                                dma_unmap_single(&sp->pdev->dev,
6816                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
6817                                                 dev->mtu + 4,
6818                                                 DMA_FROM_DEVICE);
6819                                goto memalloc_failed;
6820                        }
6821                        rxdp->Host_Control = (unsigned long) (*skb);
6822
6823                        /* Buffer-1 will be dummy buffer not used */
6824                        rxdp3->Buffer1_ptr = *temp1 =
6825                                dma_map_single(&sp->pdev->dev, ba->ba_1,
6826                                               BUF1_LEN, DMA_FROM_DEVICE);
6827                        if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6828                                dma_unmap_single(&sp->pdev->dev,
6829                                                 (dma_addr_t)rxdp3->Buffer0_ptr,
6830                                                 BUF0_LEN, DMA_FROM_DEVICE);
6831                                dma_unmap_single(&sp->pdev->dev,
6832                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
6833                                                 dev->mtu + 4,
6834                                                 DMA_FROM_DEVICE);
6835                                goto memalloc_failed;
6836                        }
6837                }
6838        }
6839        return 0;
6840
6841memalloc_failed:
6842        stats->pci_map_fail_cnt++;
6843        stats->mem_freed += (*skb)->truesize;
6844        dev_kfree_skb(*skb);
6845        return -ENOMEM;
6846}
6847
6848static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6849                                int size)
6850{
6851        struct net_device *dev = sp->dev;
6852        if (sp->rxd_mode == RXD_MODE_1) {
6853                rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6854        } else if (sp->rxd_mode == RXD_MODE_3B) {
6855                rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6856                rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6857                rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6858        }
6859}
6860
6861static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6862{
6863        int i, j, k, blk_cnt = 0, size;
6864        struct config_param *config = &sp->config;
6865        struct mac_info *mac_control = &sp->mac_control;
6866        struct net_device *dev = sp->dev;
6867        struct RxD_t *rxdp = NULL;
6868        struct sk_buff *skb = NULL;
6869        struct buffAdd *ba = NULL;
6870        u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6871
6872        /* Calculate the size based on ring mode */
6873        size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6874                HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6875        if (sp->rxd_mode == RXD_MODE_1)
6876                size += NET_IP_ALIGN;
6877        else if (sp->rxd_mode == RXD_MODE_3B)
6878                size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6879
6880        for (i = 0; i < config->rx_ring_num; i++) {
6881                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6882                struct ring_info *ring = &mac_control->rings[i];
6883
6884                blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6885
6886                for (j = 0; j < blk_cnt; j++) {
6887                        for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6888                                rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6889                                if (sp->rxd_mode == RXD_MODE_3B)
6890                                        ba = &ring->ba[j][k];
6891                                if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6892                                                           &temp0_64,
6893                                                           &temp1_64,
6894                                                           &temp2_64,
6895                                                           size) == -ENOMEM) {
6896                                        return 0;
6897                                }
6898
6899                                set_rxd_buffer_size(sp, rxdp, size);
6900                                dma_wmb();
6901                                /* flip the Ownership bit to Hardware */
6902                                rxdp->Control_1 |= RXD_OWN_XENA;
6903                        }
6904                }
6905        }
6906        return 0;
6907
6908}
6909
6910static int s2io_add_isr(struct s2io_nic *sp)
6911{
6912        int ret = 0;
6913        struct net_device *dev = sp->dev;
6914        int err = 0;
6915
6916        if (sp->config.intr_type == MSI_X)
6917                ret = s2io_enable_msi_x(sp);
6918        if (ret) {
6919                DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6920                sp->config.intr_type = INTA;
6921        }
6922
6923        /*
6924         * Store the values of the MSIX table in
6925         * the struct s2io_nic structure
6926         */
6927        store_xmsi_data(sp);
6928
6929        /* After proper initialization of H/W, register ISR */
6930        if (sp->config.intr_type == MSI_X) {
6931                int i, msix_rx_cnt = 0;
6932
6933                for (i = 0; i < sp->num_entries; i++) {
6934                        if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6935                                if (sp->s2io_entries[i].type ==
6936                                    MSIX_RING_TYPE) {
6937                                        snprintf(sp->desc[i],
6938                                                sizeof(sp->desc[i]),
6939                                                "%s:MSI-X-%d-RX",
6940                                                dev->name, i);
6941                                        err = request_irq(sp->entries[i].vector,
6942                                                          s2io_msix_ring_handle,
6943                                                          0,
6944                                                          sp->desc[i],
6945                                                          sp->s2io_entries[i].arg);
6946                                } else if (sp->s2io_entries[i].type ==
6947                                           MSIX_ALARM_TYPE) {
6948                                        snprintf(sp->desc[i],
6949                                                sizeof(sp->desc[i]),
6950                                                "%s:MSI-X-%d-TX",
6951                                                dev->name, i);
6952                                        err = request_irq(sp->entries[i].vector,
6953                                                          s2io_msix_fifo_handle,
6954                                                          0,
6955                                                          sp->desc[i],
6956                                                          sp->s2io_entries[i].arg);
6957
6958                                }
6959                                /* if either data or addr is zero print it. */
6960                                if (!(sp->msix_info[i].addr &&
6961                                      sp->msix_info[i].data)) {
6962                                        DBG_PRINT(ERR_DBG,
6963                                                  "%s @Addr:0x%llx Data:0x%llx\n",
6964                                                  sp->desc[i],
6965                                                  (unsigned long long)
6966                                                  sp->msix_info[i].addr,
6967                                                  (unsigned long long)
6968                                                  ntohl(sp->msix_info[i].data));
6969                                } else
6970                                        msix_rx_cnt++;
6971                                if (err) {
6972                                        remove_msix_isr(sp);
6973
6974                                        DBG_PRINT(ERR_DBG,
6975                                                  "%s:MSI-X-%d registration "
6976                                                  "failed\n", dev->name, i);
6977
6978                                        DBG_PRINT(ERR_DBG,
6979                                                  "%s: Defaulting to INTA\n",
6980                                                  dev->name);
6981                                        sp->config.intr_type = INTA;
6982                                        break;
6983                                }
6984                                sp->s2io_entries[i].in_use =
6985                                        MSIX_REGISTERED_SUCCESS;
6986                        }
6987                }
6988                if (!err) {
6989                        pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6990                        DBG_PRINT(INFO_DBG,
6991                                  "MSI-X-TX entries enabled through alarm vector\n");
6992                }
6993        }
6994        if (sp->config.intr_type == INTA) {
6995                err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6996                                  sp->name, dev);
6997                if (err) {
6998                        DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6999                                  dev->name);
7000                        return -1;
7001                }
7002        }
7003        return 0;
7004}
7005
7006static void s2io_rem_isr(struct s2io_nic *sp)
7007{
7008        if (sp->config.intr_type == MSI_X)
7009                remove_msix_isr(sp);
7010        else
7011                remove_inta_isr(sp);
7012}
7013
7014static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7015{
7016        int cnt = 0;
7017        struct XENA_dev_config __iomem *bar0 = sp->bar0;
7018        register u64 val64 = 0;
7019        struct config_param *config;
7020        config = &sp->config;
7021
7022        if (!is_s2io_card_up(sp))
7023                return;
7024
7025        del_timer_sync(&sp->alarm_timer);
7026        /* If s2io_set_link task is executing, wait till it completes. */
7027        while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7028                msleep(50);
7029        clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7030
7031        /* Disable napi */
7032        if (sp->config.napi) {
7033                int off = 0;
7034                if (config->intr_type ==  MSI_X) {
7035                        for (; off < sp->config.rx_ring_num; off++)
7036                                napi_disable(&sp->mac_control.rings[off].napi);
7037                }
7038                else
7039                        napi_disable(&sp->napi);
7040        }
7041
7042        /* disable Tx and Rx traffic on the NIC */
7043        if (do_io)
7044                stop_nic(sp);
7045
7046        s2io_rem_isr(sp);
7047
7048        /* stop the tx queue, indicate link down */
7049        s2io_link(sp, LINK_DOWN);
7050
7051        /* Check if the device is Quiescent and then Reset the NIC */
7052        while (do_io) {
7053                /* As per the HW requirement we need to replenish the
7054                 * receive buffer to avoid the ring bump. Since there is
7055                 * no intention of processing the Rx frame at this pointwe are
7056                 * just setting the ownership bit of rxd in Each Rx
7057                 * ring to HW and set the appropriate buffer size
7058                 * based on the ring mode
7059                 */
7060                rxd_owner_bit_reset(sp);
7061
7062                val64 = readq(&bar0->adapter_status);
7063                if (verify_xena_quiescence(sp)) {
7064                        if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7065                                break;
7066                }
7067
7068                msleep(50);
7069                cnt++;
7070                if (cnt == 10) {
7071                        DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7072                                  "adapter status reads 0x%llx\n",
7073                                  (unsigned long long)val64);
7074                        break;
7075                }
7076        }
7077        if (do_io)
7078                s2io_reset(sp);
7079
7080        /* Free all Tx buffers */
7081        free_tx_buffers(sp);
7082
7083        /* Free all Rx buffers */
7084        free_rx_buffers(sp);
7085
7086        clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7087}
7088
7089static void s2io_card_down(struct s2io_nic *sp)
7090{
7091        do_s2io_card_down(sp, 1);
7092}
7093
7094static int s2io_card_up(struct s2io_nic *sp)
7095{
7096        int i, ret = 0;
7097        struct config_param *config;
7098        struct mac_info *mac_control;
7099        struct net_device *dev = sp->dev;
7100        u16 interruptible;
7101
7102        /* Initialize the H/W I/O registers */
7103        ret = init_nic(sp);
7104        if (ret != 0) {
7105                DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7106                          dev->name);
7107                if (ret != -EIO)
7108                        s2io_reset(sp);
7109                return ret;
7110        }
7111
7112        /*
7113         * Initializing the Rx buffers. For now we are considering only 1
7114         * Rx ring and initializing buffers into 30 Rx blocks
7115         */
7116        config = &sp->config;
7117        mac_control = &sp->mac_control;
7118
7119        for (i = 0; i < config->rx_ring_num; i++) {
7120                struct ring_info *ring = &mac_control->rings[i];
7121
7122                ring->mtu = dev->mtu;
7123                ring->lro = !!(dev->features & NETIF_F_LRO);
7124                ret = fill_rx_buffers(sp, ring, 1);
7125                if (ret) {
7126                        DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7127                                  dev->name);
7128                        s2io_reset(sp);
7129                        free_rx_buffers(sp);
7130                        return -ENOMEM;
7131                }
7132                DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7133                          ring->rx_bufs_left);
7134        }
7135
7136        /* Initialise napi */
7137        if (config->napi) {
7138                if (config->intr_type ==  MSI_X) {
7139                        for (i = 0; i < sp->config.rx_ring_num; i++)
7140                                napi_enable(&sp->mac_control.rings[i].napi);
7141                } else {
7142                        napi_enable(&sp->napi);
7143                }
7144        }
7145
7146        /* Maintain the state prior to the open */
7147        if (sp->promisc_flg)
7148                sp->promisc_flg = 0;
7149        if (sp->m_cast_flg) {
7150                sp->m_cast_flg = 0;
7151                sp->all_multi_pos = 0;
7152        }
7153
7154        /* Setting its receive mode */
7155        s2io_set_multicast(dev, true);
7156
7157        if (dev->features & NETIF_F_LRO) {
7158                /* Initialize max aggregatable pkts per session based on MTU */
7159                sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7160                /* Check if we can use (if specified) user provided value */
7161                if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7162                        sp->lro_max_aggr_per_sess = lro_max_pkts;
7163        }
7164
7165        /* Enable Rx Traffic and interrupts on the NIC */
7166        if (start_nic(sp)) {
7167                DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7168                s2io_reset(sp);
7169                free_rx_buffers(sp);
7170                return -ENODEV;
7171        }
7172
7173        /* Add interrupt service routine */
7174        if (s2io_add_isr(sp) != 0) {
7175                if (sp->config.intr_type == MSI_X)
7176                        s2io_rem_isr(sp);
7177                s2io_reset(sp);
7178                free_rx_buffers(sp);
7179                return -ENODEV;
7180        }
7181
7182        timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7183        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7184
7185        set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7186
7187        /*  Enable select interrupts */
7188        en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7189        if (sp->config.intr_type != INTA) {
7190                interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7191                en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7192        } else {
7193                interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7194                interruptible |= TX_PIC_INTR;
7195                en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7196        }
7197
7198        return 0;
7199}
7200
7201/**
7202 * s2io_restart_nic - Resets the NIC.
7203 * @work : work struct containing a pointer to the device private structure
7204 * Description:
7205 * This function is scheduled to be run by the s2io_tx_watchdog
7206 * function after 0.5 secs to reset the NIC. The idea is to reduce
7207 * the run time of the watch dog routine which is run holding a
7208 * spin lock.
7209 */
7210
7211static void s2io_restart_nic(struct work_struct *work)
7212{
7213        struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7214        struct net_device *dev = sp->dev;
7215
7216        rtnl_lock();
7217
7218        if (!netif_running(dev))
7219                goto out_unlock;
7220
7221        s2io_card_down(sp);
7222        if (s2io_card_up(sp)) {
7223                DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7224        }
7225        s2io_wake_all_tx_queue(sp);
7226        DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7227out_unlock:
7228        rtnl_unlock();
7229}
7230
7231/**
7232 *  s2io_tx_watchdog - Watchdog for transmit side.
7233 *  @dev : Pointer to net device structure
7234 *  @txqueue: index of the hanging queue
7235 *  Description:
7236 *  This function is triggered if the Tx Queue is stopped
7237 *  for a pre-defined amount of time when the Interface is still up.
7238 *  If the Interface is jammed in such a situation, the hardware is
7239 *  reset (by s2io_close) and restarted again (by s2io_open) to
7240 *  overcome any problem that might have been caused in the hardware.
7241 *  Return value:
7242 *  void
7243 */
7244
7245static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7246{
7247        struct s2io_nic *sp = netdev_priv(dev);
7248        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7249
7250        if (netif_carrier_ok(dev)) {
7251                swstats->watchdog_timer_cnt++;
7252                schedule_work(&sp->rst_timer_task);
7253                swstats->soft_reset_cnt++;
7254        }
7255}
7256
7257/**
7258 *   rx_osm_handler - To perform some OS related operations on SKB.
7259 *   @ring_data : the ring from which this RxD was extracted.
7260 *   @rxdp: descriptor
7261 *   Description:
7262 *   This function is called by the Rx interrupt serivce routine to perform
7263 *   some OS related operations on the SKB before passing it to the upper
7264 *   layers. It mainly checks if the checksum is OK, if so adds it to the
7265 *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7266 *   to the upper layer. If the checksum is wrong, it increments the Rx
7267 *   packet error count, frees the SKB and returns error.
7268 *   Return value:
7269 *   SUCCESS on success and -1 on failure.
7270 */
7271static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7272{
7273        struct s2io_nic *sp = ring_data->nic;
7274        struct net_device *dev = ring_data->dev;
7275        struct sk_buff *skb = (struct sk_buff *)
7276                ((unsigned long)rxdp->Host_Control);
7277        int ring_no = ring_data->ring_no;
7278        u16 l3_csum, l4_csum;
7279        unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7280        struct lro *lro;
7281        u8 err_mask;
7282        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7283
7284        skb->dev = dev;
7285
7286        if (err) {
7287                /* Check for parity error */
7288                if (err & 0x1)
7289                        swstats->parity_err_cnt++;
7290
7291                err_mask = err >> 48;
7292                switch (err_mask) {
7293                case 1:
7294                        swstats->rx_parity_err_cnt++;
7295                        break;
7296
7297                case 2:
7298                        swstats->rx_abort_cnt++;
7299                        break;
7300
7301                case 3:
7302                        swstats->rx_parity_abort_cnt++;
7303                        break;
7304
7305                case 4:
7306                        swstats->rx_rda_fail_cnt++;
7307                        break;
7308
7309                case 5:
7310                        swstats->rx_unkn_prot_cnt++;
7311                        break;
7312
7313                case 6:
7314                        swstats->rx_fcs_err_cnt++;
7315                        break;
7316
7317                case 7:
7318                        swstats->rx_buf_size_err_cnt++;
7319                        break;
7320
7321                case 8:
7322                        swstats->rx_rxd_corrupt_cnt++;
7323                        break;
7324
7325                case 15:
7326                        swstats->rx_unkn_err_cnt++;
7327                        break;
7328                }
7329                /*
7330                 * Drop the packet if bad transfer code. Exception being
7331                 * 0x5, which could be due to unsupported IPv6 extension header.
7332                 * In this case, we let stack handle the packet.
7333                 * Note that in this case, since checksum will be incorrect,
7334                 * stack will validate the same.
7335                 */
7336                if (err_mask != 0x5) {
7337                        DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7338                                  dev->name, err_mask);
7339                        dev->stats.rx_crc_errors++;
7340                        swstats->mem_freed
7341                                += skb->truesize;
7342                        dev_kfree_skb(skb);
7343                        ring_data->rx_bufs_left -= 1;
7344                        rxdp->Host_Control = 0;
7345                        return 0;
7346                }
7347        }
7348
7349        rxdp->Host_Control = 0;
7350        if (sp->rxd_mode == RXD_MODE_1) {
7351                int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7352
7353                skb_put(skb, len);
7354        } else if (sp->rxd_mode == RXD_MODE_3B) {
7355                int get_block = ring_data->rx_curr_get_info.block_index;
7356                int get_off = ring_data->rx_curr_get_info.offset;
7357                int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7358                int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7359                unsigned char *buff = skb_push(skb, buf0_len);
7360
7361                struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7362                memcpy(buff, ba->ba_0, buf0_len);
7363                skb_put(skb, buf2_len);
7364        }
7365
7366        if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7367            ((!ring_data->lro) ||
7368             (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7369            (dev->features & NETIF_F_RXCSUM)) {
7370                l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7371                l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7372                if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7373                        /*
7374                         * NIC verifies if the Checksum of the received
7375                         * frame is Ok or not and accordingly returns
7376                         * a flag in the RxD.
7377                         */
7378                        skb->ip_summed = CHECKSUM_UNNECESSARY;
7379                        if (ring_data->lro) {
7380                                u32 tcp_len = 0;
7381                                u8 *tcp;
7382                                int ret = 0;
7383
7384                                ret = s2io_club_tcp_session(ring_data,
7385                                                            skb->data, &tcp,
7386                                                            &tcp_len, &lro,
7387                                                            rxdp, sp);
7388                                switch (ret) {
7389                                case 3: /* Begin anew */
7390                                        lro->parent = skb;
7391                                        goto aggregate;
7392                                case 1: /* Aggregate */
7393                                        lro_append_pkt(sp, lro, skb, tcp_len);
7394                                        goto aggregate;
7395                                case 4: /* Flush session */
7396                                        lro_append_pkt(sp, lro, skb, tcp_len);
7397                                        queue_rx_frame(lro->parent,
7398                                                       lro->vlan_tag);
7399                                        clear_lro_session(lro);
7400                                        swstats->flush_max_pkts++;
7401                                        goto aggregate;
7402                                case 2: /* Flush both */
7403                                        lro->parent->data_len = lro->frags_len;
7404                                        swstats->sending_both++;
7405                                        queue_rx_frame(lro->parent,
7406                                                       lro->vlan_tag);
7407                                        clear_lro_session(lro);
7408                                        goto send_up;
7409                                case 0: /* sessions exceeded */
7410                                case -1: /* non-TCP or not L2 aggregatable */
7411                                case 5: /*
7412                                         * First pkt in session not
7413                                         * L3/L4 aggregatable
7414                                         */
7415                                        break;
7416                                default:
7417                                        DBG_PRINT(ERR_DBG,
7418                                                  "%s: Samadhana!!\n",
7419                                                  __func__);
7420                                        BUG();
7421                                }
7422                        }
7423                } else {
7424                        /*
7425                         * Packet with erroneous checksum, let the
7426                         * upper layers deal with it.
7427                         */
7428                        skb_checksum_none_assert(skb);
7429                }
7430        } else
7431                skb_checksum_none_assert(skb);
7432
7433        swstats->mem_freed += skb->truesize;
7434send_up:
7435        skb_record_rx_queue(skb, ring_no);
7436        queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7437aggregate:
7438        sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7439        return SUCCESS;
7440}
7441
7442/**
7443 *  s2io_link - stops/starts the Tx queue.
7444 *  @sp : private member of the device structure, which is a pointer to the
7445 *  s2io_nic structure.
7446 *  @link : inidicates whether link is UP/DOWN.
7447 *  Description:
7448 *  This function stops/starts the Tx queue depending on whether the link
7449 *  status of the NIC is is down or up. This is called by the Alarm
7450 *  interrupt handler whenever a link change interrupt comes up.
7451 *  Return value:
7452 *  void.
7453 */
7454
7455static void s2io_link(struct s2io_nic *sp, int link)
7456{
7457        struct net_device *dev = sp->dev;
7458        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7459
7460        if (link != sp->last_link_state) {
7461                init_tti(sp, link, false);
7462                if (link == LINK_DOWN) {
7463                        DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7464                        s2io_stop_all_tx_queue(sp);
7465                        netif_carrier_off(dev);
7466                        if (swstats->link_up_cnt)
7467                                swstats->link_up_time =
7468                                        jiffies - sp->start_time;
7469                        swstats->link_down_cnt++;
7470                } else {
7471                        DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7472                        if (swstats->link_down_cnt)
7473                                swstats->link_down_time =
7474                                        jiffies - sp->start_time;
7475                        swstats->link_up_cnt++;
7476                        netif_carrier_on(dev);
7477                        s2io_wake_all_tx_queue(sp);
7478                }
7479        }
7480        sp->last_link_state = link;
7481        sp->start_time = jiffies;
7482}
7483
7484/**
7485 *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7486 *  @sp : private member of the device structure, which is a pointer to the
7487 *  s2io_nic structure.
7488 *  Description:
7489 *  This function initializes a few of the PCI and PCI-X configuration registers
7490 *  with recommended values.
7491 *  Return value:
7492 *  void
7493 */
7494
7495static void s2io_init_pci(struct s2io_nic *sp)
7496{
7497        u16 pci_cmd = 0, pcix_cmd = 0;
7498
7499        /* Enable Data Parity Error Recovery in PCI-X command register. */
7500        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7501                             &(pcix_cmd));
7502        pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7503                              (pcix_cmd | 1));
7504        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7505                             &(pcix_cmd));
7506
7507        /* Set the PErr Response bit in PCI command register. */
7508        pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7509        pci_write_config_word(sp->pdev, PCI_COMMAND,
7510                              (pci_cmd | PCI_COMMAND_PARITY));
7511        pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512}
7513
7514static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7515                            u8 *dev_multiq)
7516{
7517        int i;
7518
7519        if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7520                DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7521                          "(%d) not supported\n", tx_fifo_num);
7522
7523                if (tx_fifo_num < 1)
7524                        tx_fifo_num = 1;
7525                else
7526                        tx_fifo_num = MAX_TX_FIFOS;
7527
7528                DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7529        }
7530
7531        if (multiq)
7532                *dev_multiq = multiq;
7533
7534        if (tx_steering_type && (1 == tx_fifo_num)) {
7535                if (tx_steering_type != TX_DEFAULT_STEERING)
7536                        DBG_PRINT(ERR_DBG,
7537                                  "Tx steering is not supported with "
7538                                  "one fifo. Disabling Tx steering.\n");
7539                tx_steering_type = NO_STEERING;
7540        }
7541
7542        if ((tx_steering_type < NO_STEERING) ||
7543            (tx_steering_type > TX_DEFAULT_STEERING)) {
7544                DBG_PRINT(ERR_DBG,
7545                          "Requested transmit steering not supported\n");
7546                DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7547                tx_steering_type = NO_STEERING;
7548        }
7549
7550        if (rx_ring_num > MAX_RX_RINGS) {
7551                DBG_PRINT(ERR_DBG,
7552                          "Requested number of rx rings not supported\n");
7553                DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7554                          MAX_RX_RINGS);
7555                rx_ring_num = MAX_RX_RINGS;
7556        }
7557
7558        if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7559                DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7560                          "Defaulting to INTA\n");
7561                *dev_intr_type = INTA;
7562        }
7563
7564        if ((*dev_intr_type == MSI_X) &&
7565            ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7566             (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7567                DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7568                          "Defaulting to INTA\n");
7569                *dev_intr_type = INTA;
7570        }
7571
7572        if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7573                DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7574                DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7575                rx_ring_mode = 1;
7576        }
7577
7578        for (i = 0; i < MAX_RX_RINGS; i++)
7579                if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7580                        DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7581                                  "supported\nDefaulting to %d\n",
7582                                  MAX_RX_BLOCKS_PER_RING);
7583                        rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7584                }
7585
7586        return SUCCESS;
7587}
7588
7589/**
7590 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7591 * @nic: device private variable
7592 * @ds_codepoint: data
7593 * @ring: ring index
7594 * Description: The function configures the receive steering to
7595 * desired receive ring.
7596 * Return Value:  SUCCESS on success and
7597 * '-1' on failure (endian settings incorrect).
7598 */
7599static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7600{
7601        struct XENA_dev_config __iomem *bar0 = nic->bar0;
7602        register u64 val64 = 0;
7603
7604        if (ds_codepoint > 63)
7605                return FAILURE;
7606
7607        val64 = RTS_DS_MEM_DATA(ring);
7608        writeq(val64, &bar0->rts_ds_mem_data);
7609
7610        val64 = RTS_DS_MEM_CTRL_WE |
7611                RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7612                RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7613
7614        writeq(val64, &bar0->rts_ds_mem_ctrl);
7615
7616        return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7617                                     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7618                                     S2IO_BIT_RESET, true);
7619}
7620
7621static const struct net_device_ops s2io_netdev_ops = {
7622        .ndo_open               = s2io_open,
7623        .ndo_stop               = s2io_close,
7624        .ndo_get_stats          = s2io_get_stats,
7625        .ndo_start_xmit         = s2io_xmit,
7626        .ndo_validate_addr      = eth_validate_addr,
7627        .ndo_set_rx_mode        = s2io_ndo_set_multicast,
7628        .ndo_eth_ioctl          = s2io_ioctl,
7629        .ndo_set_mac_address    = s2io_set_mac_addr,
7630        .ndo_change_mtu         = s2io_change_mtu,
7631        .ndo_set_features       = s2io_set_features,
7632        .ndo_tx_timeout         = s2io_tx_watchdog,
7633#ifdef CONFIG_NET_POLL_CONTROLLER
7634        .ndo_poll_controller    = s2io_netpoll,
7635#endif
7636};
7637
7638/**
7639 *  s2io_init_nic - Initialization of the adapter .
7640 *  @pdev : structure containing the PCI related information of the device.
7641 *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7642 *  Description:
7643 *  The function initializes an adapter identified by the pci_dec structure.
7644 *  All OS related initialization including memory and device structure and
7645 *  initlaization of the device private variable is done. Also the swapper
7646 *  control register is initialized to enable read and write into the I/O
7647 *  registers of the device.
7648 *  Return value:
7649 *  returns 0 on success and negative on failure.
7650 */
7651
7652static int
7653s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7654{
7655        struct s2io_nic *sp;
7656        struct net_device *dev;
7657        int i, j, ret;
7658        int dma_flag = false;
7659        u32 mac_up, mac_down;
7660        u64 val64 = 0, tmp64 = 0;
7661        struct XENA_dev_config __iomem *bar0 = NULL;
7662        u16 subid;
7663        struct config_param *config;
7664        struct mac_info *mac_control;
7665        int mode;
7666        u8 dev_intr_type = intr_type;
7667        u8 dev_multiq = 0;
7668
7669        ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7670        if (ret)
7671                return ret;
7672
7673        ret = pci_enable_device(pdev);
7674        if (ret) {
7675                DBG_PRINT(ERR_DBG,
7676                          "%s: pci_enable_device failed\n", __func__);
7677                return ret;
7678        }
7679
7680        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7681                DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7682                dma_flag = true;
7683                if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7684                        DBG_PRINT(ERR_DBG,
7685                                  "Unable to obtain 64bit DMA for coherent allocations\n");
7686                        pci_disable_device(pdev);
7687                        return -ENOMEM;
7688                }
7689        } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7690                DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7691        } else {
7692                pci_disable_device(pdev);
7693                return -ENOMEM;
7694        }
7695        ret = pci_request_regions(pdev, s2io_driver_name);
7696        if (ret) {
7697                DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7698                          __func__, ret);
7699                pci_disable_device(pdev);
7700                return -ENODEV;
7701        }
7702        if (dev_multiq)
7703                dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7704        else
7705                dev = alloc_etherdev(sizeof(struct s2io_nic));
7706        if (dev == NULL) {
7707                pci_disable_device(pdev);
7708                pci_release_regions(pdev);
7709                return -ENODEV;
7710        }
7711
7712        pci_set_master(pdev);
7713        pci_set_drvdata(pdev, dev);
7714        SET_NETDEV_DEV(dev, &pdev->dev);
7715
7716        /*  Private member variable initialized to s2io NIC structure */
7717        sp = netdev_priv(dev);
7718        sp->dev = dev;
7719        sp->pdev = pdev;
7720        sp->high_dma_flag = dma_flag;
7721        sp->device_enabled_once = false;
7722        if (rx_ring_mode == 1)
7723                sp->rxd_mode = RXD_MODE_1;
7724        if (rx_ring_mode == 2)
7725                sp->rxd_mode = RXD_MODE_3B;
7726
7727        sp->config.intr_type = dev_intr_type;
7728
7729        if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7730            (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7731                sp->device_type = XFRAME_II_DEVICE;
7732        else
7733                sp->device_type = XFRAME_I_DEVICE;
7734
7735
7736        /* Initialize some PCI/PCI-X fields of the NIC. */
7737        s2io_init_pci(sp);
7738
7739        /*
7740         * Setting the device configuration parameters.
7741         * Most of these parameters can be specified by the user during
7742         * module insertion as they are module loadable parameters. If
7743         * these parameters are not not specified during load time, they
7744         * are initialized with default values.
7745         */
7746        config = &sp->config;
7747        mac_control = &sp->mac_control;
7748
7749        config->napi = napi;
7750        config->tx_steering_type = tx_steering_type;
7751
7752        /* Tx side parameters. */
7753        if (config->tx_steering_type == TX_PRIORITY_STEERING)
7754                config->tx_fifo_num = MAX_TX_FIFOS;
7755        else
7756                config->tx_fifo_num = tx_fifo_num;
7757
7758        /* Initialize the fifos used for tx steering */
7759        if (config->tx_fifo_num < 5) {
7760                if (config->tx_fifo_num  == 1)
7761                        sp->total_tcp_fifos = 1;
7762                else
7763                        sp->total_tcp_fifos = config->tx_fifo_num - 1;
7764                sp->udp_fifo_idx = config->tx_fifo_num - 1;
7765                sp->total_udp_fifos = 1;
7766                sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7767        } else {
7768                sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7769                                       FIFO_OTHER_MAX_NUM);
7770                sp->udp_fifo_idx = sp->total_tcp_fifos;
7771                sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7772                sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7773        }
7774
7775        config->multiq = dev_multiq;
7776        for (i = 0; i < config->tx_fifo_num; i++) {
7777                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7778
7779                tx_cfg->fifo_len = tx_fifo_len[i];
7780                tx_cfg->fifo_priority = i;
7781        }
7782
7783        /* mapping the QoS priority to the configured fifos */
7784        for (i = 0; i < MAX_TX_FIFOS; i++)
7785                config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7786
7787        /* map the hashing selector table to the configured fifos */
7788        for (i = 0; i < config->tx_fifo_num; i++)
7789                sp->fifo_selector[i] = fifo_selector[i];
7790
7791
7792        config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7793        for (i = 0; i < config->tx_fifo_num; i++) {
7794                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7795
7796                tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7797                if (tx_cfg->fifo_len < 65) {
7798                        config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7799                        break;
7800                }
7801        }
7802        /* + 2 because one Txd for skb->data and one Txd for UFO */
7803        config->max_txds = MAX_SKB_FRAGS + 2;
7804
7805        /* Rx side parameters. */
7806        config->rx_ring_num = rx_ring_num;
7807        for (i = 0; i < config->rx_ring_num; i++) {
7808                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7809                struct ring_info *ring = &mac_control->rings[i];
7810
7811                rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7812                rx_cfg->ring_priority = i;
7813                ring->rx_bufs_left = 0;
7814                ring->rxd_mode = sp->rxd_mode;
7815                ring->rxd_count = rxd_count[sp->rxd_mode];
7816                ring->pdev = sp->pdev;
7817                ring->dev = sp->dev;
7818        }
7819
7820        for (i = 0; i < rx_ring_num; i++) {
7821                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7822
7823                rx_cfg->ring_org = RING_ORG_BUFF1;
7824                rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7825        }
7826
7827        /*  Setting Mac Control parameters */
7828        mac_control->rmac_pause_time = rmac_pause_time;
7829        mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7830        mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7831
7832
7833        /*  initialize the shared memory used by the NIC and the host */
7834        if (init_shared_mem(sp)) {
7835                DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7836                ret = -ENOMEM;
7837                goto mem_alloc_failed;
7838        }
7839
7840        sp->bar0 = pci_ioremap_bar(pdev, 0);
7841        if (!sp->bar0) {
7842                DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7843                          dev->name);
7844                ret = -ENOMEM;
7845                goto bar0_remap_failed;
7846        }
7847
7848        sp->bar1 = pci_ioremap_bar(pdev, 2);
7849        if (!sp->bar1) {
7850                DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7851                          dev->name);
7852                ret = -ENOMEM;
7853                goto bar1_remap_failed;
7854        }
7855
7856        /* Initializing the BAR1 address as the start of the FIFO pointer. */
7857        for (j = 0; j < MAX_TX_FIFOS; j++) {
7858                mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7859        }
7860
7861        /*  Driver entry points */
7862        dev->netdev_ops = &s2io_netdev_ops;
7863        dev->ethtool_ops = &netdev_ethtool_ops;
7864        dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7865                NETIF_F_TSO | NETIF_F_TSO6 |
7866                NETIF_F_RXCSUM | NETIF_F_LRO;
7867        dev->features |= dev->hw_features |
7868                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7869        if (sp->high_dma_flag == true)
7870                dev->features |= NETIF_F_HIGHDMA;
7871        dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7872        INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7873        INIT_WORK(&sp->set_link_task, s2io_set_link);
7874
7875        pci_save_state(sp->pdev);
7876
7877        /* Setting swapper control on the NIC, for proper reset operation */
7878        if (s2io_set_swapper(sp)) {
7879                DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7880                          dev->name);
7881                ret = -EAGAIN;
7882                goto set_swap_failed;
7883        }
7884
7885        /* Verify if the Herc works on the slot its placed into */
7886        if (sp->device_type & XFRAME_II_DEVICE) {
7887                mode = s2io_verify_pci_mode(sp);
7888                if (mode < 0) {
7889                        DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7890                                  __func__);
7891                        ret = -EBADSLT;
7892                        goto set_swap_failed;
7893                }
7894        }
7895
7896        if (sp->config.intr_type == MSI_X) {
7897                sp->num_entries = config->rx_ring_num + 1;
7898                ret = s2io_enable_msi_x(sp);
7899
7900                if (!ret) {
7901                        ret = s2io_test_msi(sp);
7902                        /* rollback MSI-X, will re-enable during add_isr() */
7903                        remove_msix_isr(sp);
7904                }
7905                if (ret) {
7906
7907                        DBG_PRINT(ERR_DBG,
7908                                  "MSI-X requested but failed to enable\n");
7909                        sp->config.intr_type = INTA;
7910                }
7911        }
7912
7913        if (config->intr_type ==  MSI_X) {
7914                for (i = 0; i < config->rx_ring_num ; i++) {
7915                        struct ring_info *ring = &mac_control->rings[i];
7916
7917                        netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7918                }
7919        } else {
7920                netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7921        }
7922
7923        /* Not needed for Herc */
7924        if (sp->device_type & XFRAME_I_DEVICE) {
7925                /*
7926                 * Fix for all "FFs" MAC address problems observed on
7927                 * Alpha platforms
7928                 */
7929                fix_mac_address(sp);
7930                s2io_reset(sp);
7931        }
7932
7933        /*
7934         * MAC address initialization.
7935         * For now only one mac address will be read and used.
7936         */
7937        bar0 = sp->bar0;
7938        val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7939                RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7940        writeq(val64, &bar0->rmac_addr_cmd_mem);
7941        wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7942                              RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7943                              S2IO_BIT_RESET, true);
7944        tmp64 = readq(&bar0->rmac_addr_data0_mem);
7945        mac_down = (u32)tmp64;
7946        mac_up = (u32) (tmp64 >> 32);
7947
7948        sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7949        sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7950        sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7951        sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7952        sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7953        sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7954
7955        /*  Set the factory defined MAC address initially   */
7956        dev->addr_len = ETH_ALEN;
7957        memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7958
7959        /* initialize number of multicast & unicast MAC entries variables */
7960        if (sp->device_type == XFRAME_I_DEVICE) {
7961                config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7962                config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7963                config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7964        } else if (sp->device_type == XFRAME_II_DEVICE) {
7965                config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7966                config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7967                config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7968        }
7969
7970        /* MTU range: 46 - 9600 */
7971        dev->min_mtu = MIN_MTU;
7972        dev->max_mtu = S2IO_JUMBO_SIZE;
7973
7974        /* store mac addresses from CAM to s2io_nic structure */
7975        do_s2io_store_unicast_mc(sp);
7976
7977        /* Configure MSIX vector for number of rings configured plus one */
7978        if ((sp->device_type == XFRAME_II_DEVICE) &&
7979            (config->intr_type == MSI_X))
7980                sp->num_entries = config->rx_ring_num + 1;
7981
7982        /* Store the values of the MSIX table in the s2io_nic structure */
7983        store_xmsi_data(sp);
7984        /* reset Nic and bring it to known state */
7985        s2io_reset(sp);
7986
7987        /*
7988         * Initialize link state flags
7989         * and the card state parameter
7990         */
7991        sp->state = 0;
7992
7993        /* Initialize spinlocks */
7994        for (i = 0; i < sp->config.tx_fifo_num; i++) {
7995                struct fifo_info *fifo = &mac_control->fifos[i];
7996
7997                spin_lock_init(&fifo->tx_lock);
7998        }
7999
8000        /*
8001         * SXE-002: Configure link and activity LED to init state
8002         * on driver load.
8003         */
8004        subid = sp->pdev->subsystem_device;
8005        if ((subid & 0xFF) >= 0x07) {
8006                val64 = readq(&bar0->gpio_control);
8007                val64 |= 0x0000800000000000ULL;
8008                writeq(val64, &bar0->gpio_control);
8009                val64 = 0x0411040400000000ULL;
8010                writeq(val64, (void __iomem *)bar0 + 0x2700);
8011                val64 = readq(&bar0->gpio_control);
8012        }
8013
8014        sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8015
8016        if (register_netdev(dev)) {
8017                DBG_PRINT(ERR_DBG, "Device registration failed\n");
8018                ret = -ENODEV;
8019                goto register_failed;
8020        }
8021        s2io_vpd_read(sp);
8022        DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8023        DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8024                  sp->product_name, pdev->revision);
8025        DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8026                  s2io_driver_version);
8027        DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8028        DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8029        if (sp->device_type & XFRAME_II_DEVICE) {
8030                mode = s2io_print_pci_mode(sp);
8031                if (mode < 0) {
8032                        ret = -EBADSLT;
8033                        unregister_netdev(dev);
8034                        goto set_swap_failed;
8035                }
8036        }
8037        switch (sp->rxd_mode) {
8038        case RXD_MODE_1:
8039                DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8040                          dev->name);
8041                break;
8042        case RXD_MODE_3B:
8043                DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8044                          dev->name);
8045                break;
8046        }
8047
8048        switch (sp->config.napi) {
8049        case 0:
8050                DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8051                break;
8052        case 1:
8053                DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8054                break;
8055        }
8056
8057        DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8058                  sp->config.tx_fifo_num);
8059
8060        DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8061                  sp->config.rx_ring_num);
8062
8063        switch (sp->config.intr_type) {
8064        case INTA:
8065                DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8066                break;
8067        case MSI_X:
8068                DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8069                break;
8070        }
8071        if (sp->config.multiq) {
8072                for (i = 0; i < sp->config.tx_fifo_num; i++) {
8073                        struct fifo_info *fifo = &mac_control->fifos[i];
8074
8075                        fifo->multiq = config->multiq;
8076                }
8077                DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8078                          dev->name);
8079        } else
8080                DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8081                          dev->name);
8082
8083        switch (sp->config.tx_steering_type) {
8084        case NO_STEERING:
8085                DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8086                          dev->name);
8087                break;
8088        case TX_PRIORITY_STEERING:
8089                DBG_PRINT(ERR_DBG,
8090                          "%s: Priority steering enabled for transmit\n",
8091                          dev->name);
8092                break;
8093        case TX_DEFAULT_STEERING:
8094                DBG_PRINT(ERR_DBG,
8095                          "%s: Default steering enabled for transmit\n",
8096                          dev->name);
8097        }
8098
8099        DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8100                  dev->name);
8101        /* Initialize device name */
8102        snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8103                 sp->product_name);
8104
8105        if (vlan_tag_strip)
8106                sp->vlan_strip_flag = 1;
8107        else
8108                sp->vlan_strip_flag = 0;
8109
8110        /*
8111         * Make Link state as off at this point, when the Link change
8112         * interrupt comes the state will be automatically changed to
8113         * the right state.
8114         */
8115        netif_carrier_off(dev);
8116
8117        return 0;
8118
8119register_failed:
8120set_swap_failed:
8121        iounmap(sp->bar1);
8122bar1_remap_failed:
8123        iounmap(sp->bar0);
8124bar0_remap_failed:
8125mem_alloc_failed:
8126        free_shared_mem(sp);
8127        pci_disable_device(pdev);
8128        pci_release_regions(pdev);
8129        free_netdev(dev);
8130
8131        return ret;
8132}
8133
8134/**
8135 * s2io_rem_nic - Free the PCI device
8136 * @pdev: structure containing the PCI related information of the device.
8137 * Description: This function is called by the Pci subsystem to release a
8138 * PCI device and free up all resource held up by the device. This could
8139 * be in response to a Hot plug event or when the driver is to be removed
8140 * from memory.
8141 */
8142
8143static void s2io_rem_nic(struct pci_dev *pdev)
8144{
8145        struct net_device *dev = pci_get_drvdata(pdev);
8146        struct s2io_nic *sp;
8147
8148        if (dev == NULL) {
8149                DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8150                return;
8151        }
8152
8153        sp = netdev_priv(dev);
8154
8155        cancel_work_sync(&sp->rst_timer_task);
8156        cancel_work_sync(&sp->set_link_task);
8157
8158        unregister_netdev(dev);
8159
8160        free_shared_mem(sp);
8161        iounmap(sp->bar0);
8162        iounmap(sp->bar1);
8163        pci_release_regions(pdev);
8164        free_netdev(dev);
8165        pci_disable_device(pdev);
8166}
8167
8168module_pci_driver(s2io_driver);
8169
8170static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8171                                struct tcphdr **tcp, struct RxD_t *rxdp,
8172                                struct s2io_nic *sp)
8173{
8174        int ip_off;
8175        u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8176
8177        if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8178                DBG_PRINT(INIT_DBG,
8179                          "%s: Non-TCP frames not supported for LRO\n",
8180                          __func__);
8181                return -1;
8182        }
8183
8184        /* Checking for DIX type or DIX type with VLAN */
8185        if ((l2_type == 0) || (l2_type == 4)) {
8186                ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8187                /*
8188                 * If vlan stripping is disabled and the frame is VLAN tagged,
8189                 * shift the offset by the VLAN header size bytes.
8190                 */
8191                if ((!sp->vlan_strip_flag) &&
8192                    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8193                        ip_off += HEADER_VLAN_SIZE;
8194        } else {
8195                /* LLC, SNAP etc are considered non-mergeable */
8196                return -1;
8197        }
8198
8199        *ip = (struct iphdr *)(buffer + ip_off);
8200        ip_len = (u8)((*ip)->ihl);
8201        ip_len <<= 2;
8202        *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8203
8204        return 0;
8205}
8206
8207static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8208                                  struct tcphdr *tcp)
8209{
8210        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8211        if ((lro->iph->saddr != ip->saddr) ||
8212            (lro->iph->daddr != ip->daddr) ||
8213            (lro->tcph->source != tcp->source) ||
8214            (lro->tcph->dest != tcp->dest))
8215                return -1;
8216        return 0;
8217}
8218
8219static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8220{
8221        return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8222}
8223
8224static void initiate_new_session(struct lro *lro, u8 *l2h,
8225                                 struct iphdr *ip, struct tcphdr *tcp,
8226                                 u32 tcp_pyld_len, u16 vlan_tag)
8227{
8228        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8229        lro->l2h = l2h;
8230        lro->iph = ip;
8231        lro->tcph = tcp;
8232        lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8233        lro->tcp_ack = tcp->ack_seq;
8234        lro->sg_num = 1;
8235        lro->total_len = ntohs(ip->tot_len);
8236        lro->frags_len = 0;
8237        lro->vlan_tag = vlan_tag;
8238        /*
8239         * Check if we saw TCP timestamp.
8240         * Other consistency checks have already been done.
8241         */
8242        if (tcp->doff == 8) {
8243                __be32 *ptr;
8244                ptr = (__be32 *)(tcp+1);
8245                lro->saw_ts = 1;
8246                lro->cur_tsval = ntohl(*(ptr+1));
8247                lro->cur_tsecr = *(ptr+2);
8248        }
8249        lro->in_use = 1;
8250}
8251
8252static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8253{
8254        struct iphdr *ip = lro->iph;
8255        struct tcphdr *tcp = lro->tcph;
8256        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8257
8258        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8259
8260        /* Update L3 header */
8261        csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8262        ip->tot_len = htons(lro->total_len);
8263
8264        /* Update L4 header */
8265        tcp->ack_seq = lro->tcp_ack;
8266        tcp->window = lro->window;
8267
8268        /* Update tsecr field if this session has timestamps enabled */
8269        if (lro->saw_ts) {
8270                __be32 *ptr = (__be32 *)(tcp + 1);
8271                *(ptr+2) = lro->cur_tsecr;
8272        }
8273
8274        /* Update counters required for calculation of
8275         * average no. of packets aggregated.
8276         */
8277        swstats->sum_avg_pkts_aggregated += lro->sg_num;
8278        swstats->num_aggregations++;
8279}
8280
8281static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8282                             struct tcphdr *tcp, u32 l4_pyld)
8283{
8284        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8285        lro->total_len += l4_pyld;
8286        lro->frags_len += l4_pyld;
8287        lro->tcp_next_seq += l4_pyld;
8288        lro->sg_num++;
8289
8290        /* Update ack seq no. and window ad(from this pkt) in LRO object */
8291        lro->tcp_ack = tcp->ack_seq;
8292        lro->window = tcp->window;
8293
8294        if (lro->saw_ts) {
8295                __be32 *ptr;
8296                /* Update tsecr and tsval from this packet */
8297                ptr = (__be32 *)(tcp+1);
8298                lro->cur_tsval = ntohl(*(ptr+1));
8299                lro->cur_tsecr = *(ptr + 2);
8300        }
8301}
8302
8303static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8304                                    struct tcphdr *tcp, u32 tcp_pyld_len)
8305{
8306        u8 *ptr;
8307
8308        DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8309
8310        if (!tcp_pyld_len) {
8311                /* Runt frame or a pure ack */
8312                return -1;
8313        }
8314
8315        if (ip->ihl != 5) /* IP has options */
8316                return -1;
8317
8318        /* If we see CE codepoint in IP header, packet is not mergeable */
8319        if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8320                return -1;
8321
8322        /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8323        if (tcp->urg || tcp->psh || tcp->rst ||
8324            tcp->syn || tcp->fin ||
8325            tcp->ece || tcp->cwr || !tcp->ack) {
8326                /*
8327                 * Currently recognize only the ack control word and
8328                 * any other control field being set would result in
8329                 * flushing the LRO session
8330                 */
8331                return -1;
8332        }
8333
8334        /*
8335         * Allow only one TCP timestamp option. Don't aggregate if
8336         * any other options are detected.
8337         */
8338        if (tcp->doff != 5 && tcp->doff != 8)
8339                return -1;
8340
8341        if (tcp->doff == 8) {
8342                ptr = (u8 *)(tcp + 1);
8343                while (*ptr == TCPOPT_NOP)
8344                        ptr++;
8345                if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8346                        return -1;
8347
8348                /* Ensure timestamp value increases monotonically */
8349                if (l_lro)
8350                        if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8351                                return -1;
8352
8353                /* timestamp echo reply should be non-zero */
8354                if (*((__be32 *)(ptr+6)) == 0)
8355                        return -1;
8356        }
8357
8358        return 0;
8359}
8360
8361static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8362                                 u8 **tcp, u32 *tcp_len, struct lro **lro,
8363                                 struct RxD_t *rxdp, struct s2io_nic *sp)
8364{
8365        struct iphdr *ip;
8366        struct tcphdr *tcph;
8367        int ret = 0, i;
8368        u16 vlan_tag = 0;
8369        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8370
8371        ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8372                                   rxdp, sp);
8373        if (ret)
8374                return ret;
8375
8376        DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8377
8378        vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8379        tcph = (struct tcphdr *)*tcp;
8380        *tcp_len = get_l4_pyld_length(ip, tcph);
8381        for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8382                struct lro *l_lro = &ring_data->lro0_n[i];
8383                if (l_lro->in_use) {
8384                        if (check_for_socket_match(l_lro, ip, tcph))
8385                                continue;
8386                        /* Sock pair matched */
8387                        *lro = l_lro;
8388
8389                        if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8390                                DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8391                                          "expected 0x%x, actual 0x%x\n",
8392                                          __func__,
8393                                          (*lro)->tcp_next_seq,
8394                                          ntohl(tcph->seq));
8395
8396                                swstats->outof_sequence_pkts++;
8397                                ret = 2;
8398                                break;
8399                        }
8400
8401                        if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8402                                                      *tcp_len))
8403                                ret = 1; /* Aggregate */
8404                        else
8405                                ret = 2; /* Flush both */
8406                        break;
8407                }
8408        }
8409
8410        if (ret == 0) {
8411                /* Before searching for available LRO objects,
8412                 * check if the pkt is L3/L4 aggregatable. If not
8413                 * don't create new LRO session. Just send this
8414                 * packet up.
8415                 */
8416                if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8417                        return 5;
8418
8419                for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8420                        struct lro *l_lro = &ring_data->lro0_n[i];
8421                        if (!(l_lro->in_use)) {
8422                                *lro = l_lro;
8423                                ret = 3; /* Begin anew */
8424                                break;
8425                        }
8426                }
8427        }
8428
8429        if (ret == 0) { /* sessions exceeded */
8430                DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8431                          __func__);
8432                *lro = NULL;
8433                return ret;
8434        }
8435
8436        switch (ret) {
8437        case 3:
8438                initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8439                                     vlan_tag);
8440                break;
8441        case 2:
8442                update_L3L4_header(sp, *lro);
8443                break;
8444        case 1:
8445                aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8446                if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8447                        update_L3L4_header(sp, *lro);
8448                        ret = 4; /* Flush the LRO */
8449                }
8450                break;
8451        default:
8452                DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8453                break;
8454        }
8455
8456        return ret;
8457}
8458
8459static void clear_lro_session(struct lro *lro)
8460{
8461        static u16 lro_struct_size = sizeof(struct lro);
8462
8463        memset(lro, 0, lro_struct_size);
8464}
8465
8466static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8467{
8468        struct net_device *dev = skb->dev;
8469        struct s2io_nic *sp = netdev_priv(dev);
8470
8471        skb->protocol = eth_type_trans(skb, dev);
8472        if (vlan_tag && sp->vlan_strip_flag)
8473                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8474        if (sp->config.napi)
8475                netif_receive_skb(skb);
8476        else
8477                netif_rx(skb);
8478}
8479
8480static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8481                           struct sk_buff *skb, u32 tcp_len)
8482{
8483        struct sk_buff *first = lro->parent;
8484        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8485
8486        first->len += tcp_len;
8487        first->data_len = lro->frags_len;
8488        skb_pull(skb, (skb->len - tcp_len));
8489        if (skb_shinfo(first)->frag_list)
8490                lro->last_frag->next = skb;
8491        else
8492                skb_shinfo(first)->frag_list = skb;
8493        first->truesize += skb->truesize;
8494        lro->last_frag = skb;
8495        swstats->clubbed_frms_cnt++;
8496}
8497
8498/**
8499 * s2io_io_error_detected - called when PCI error is detected
8500 * @pdev: Pointer to PCI device
8501 * @state: The current pci connection state
8502 *
8503 * This function is called after a PCI bus error affecting
8504 * this device has been detected.
8505 */
8506static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8507                                               pci_channel_state_t state)
8508{
8509        struct net_device *netdev = pci_get_drvdata(pdev);
8510        struct s2io_nic *sp = netdev_priv(netdev);
8511
8512        netif_device_detach(netdev);
8513
8514        if (state == pci_channel_io_perm_failure)
8515                return PCI_ERS_RESULT_DISCONNECT;
8516
8517        if (netif_running(netdev)) {
8518                /* Bring down the card, while avoiding PCI I/O */
8519                do_s2io_card_down(sp, 0);
8520        }
8521        pci_disable_device(pdev);
8522
8523        return PCI_ERS_RESULT_NEED_RESET;
8524}
8525
8526/**
8527 * s2io_io_slot_reset - called after the pci bus has been reset.
8528 * @pdev: Pointer to PCI device
8529 *
8530 * Restart the card from scratch, as if from a cold-boot.
8531 * At this point, the card has exprienced a hard reset,
8532 * followed by fixups by BIOS, and has its config space
8533 * set up identically to what it was at cold boot.
8534 */
8535static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8536{
8537        struct net_device *netdev = pci_get_drvdata(pdev);
8538        struct s2io_nic *sp = netdev_priv(netdev);
8539
8540        if (pci_enable_device(pdev)) {
8541                pr_err("Cannot re-enable PCI device after reset.\n");
8542                return PCI_ERS_RESULT_DISCONNECT;
8543        }
8544
8545        pci_set_master(pdev);
8546        s2io_reset(sp);
8547
8548        return PCI_ERS_RESULT_RECOVERED;
8549}
8550
8551/**
8552 * s2io_io_resume - called when traffic can start flowing again.
8553 * @pdev: Pointer to PCI device
8554 *
8555 * This callback is called when the error recovery driver tells
8556 * us that its OK to resume normal operation.
8557 */
8558static void s2io_io_resume(struct pci_dev *pdev)
8559{
8560        struct net_device *netdev = pci_get_drvdata(pdev);
8561        struct s2io_nic *sp = netdev_priv(netdev);
8562
8563        if (netif_running(netdev)) {
8564                if (s2io_card_up(sp)) {
8565                        pr_err("Can't bring device back up after reset.\n");
8566                        return;
8567                }
8568
8569                if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8570                        s2io_card_down(sp);
8571                        pr_err("Can't restore mac addr after reset.\n");
8572                        return;
8573                }
8574        }
8575
8576        netif_device_attach(netdev);
8577        netif_tx_wake_all_queues(netdev);
8578}
8579