linux/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
<<
>>
Prefs
   1/*******************************************************************************
   2
   3  Intel 10 Gigabit PCI Express Linux driver
   4  Copyright(c) 1999 - 2016 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include <linux/pci.h>
  30#include <linux/delay.h>
  31#include <linux/sched.h>
  32#include <linux/netdevice.h>
  33
  34#include "ixgbe.h"
  35#include "ixgbe_common.h"
  36#include "ixgbe_phy.h"
  37
  38static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
  39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
  40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
  41static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
  42static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
  43static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
  44                                        u16 count);
  45static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
  46static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
  47static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
  48static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
  49
  50static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
  51static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
  52static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
  53                                             u16 words, u16 *data);
  54static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
  55                                             u16 words, u16 *data);
  56static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
  57                                                 u16 offset);
  58static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
  59
  60/* Base table for registers values that change by MAC */
  61const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
  62        IXGBE_MVALS_INIT(8259X)
  63};
  64
  65/**
  66 *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
  67 *  control
  68 *  @hw: pointer to hardware structure
  69 *
  70 *  There are several phys that do not support autoneg flow control. This
  71 *  function check the device id to see if the associated phy supports
  72 *  autoneg flow control.
  73 **/
  74bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
  75{
  76        bool supported = false;
  77        ixgbe_link_speed speed;
  78        bool link_up;
  79
  80        switch (hw->phy.media_type) {
  81        case ixgbe_media_type_fiber:
  82                hw->mac.ops.check_link(hw, &speed, &link_up, false);
  83                /* if link is down, assume supported */
  84                if (link_up)
  85                        supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
  86                                true : false;
  87                else
  88                        supported = true;
  89                break;
  90        case ixgbe_media_type_backplane:
  91                supported = true;
  92                break;
  93        case ixgbe_media_type_copper:
  94                /* only some copper devices support flow control autoneg */
  95                switch (hw->device_id) {
  96                case IXGBE_DEV_ID_82599_T3_LOM:
  97                case IXGBE_DEV_ID_X540T:
  98                case IXGBE_DEV_ID_X540T1:
  99                case IXGBE_DEV_ID_X550T:
 100                case IXGBE_DEV_ID_X550T1:
 101                case IXGBE_DEV_ID_X550EM_X_10G_T:
 102                        supported = true;
 103                        break;
 104                default:
 105                        break;
 106                }
 107        default:
 108                break;
 109        }
 110
 111        return supported;
 112}
 113
 114/**
 115 *  ixgbe_setup_fc_generic - Set up flow control
 116 *  @hw: pointer to hardware structure
 117 *
 118 *  Called at init time to set up flow control.
 119 **/
 120s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
 121{
 122        s32 ret_val = 0;
 123        u32 reg = 0, reg_bp = 0;
 124        u16 reg_cu = 0;
 125        bool locked = false;
 126
 127        /*
 128         * Validate the requested mode.  Strict IEEE mode does not allow
 129         * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
 130         */
 131        if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
 132                hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
 133                return IXGBE_ERR_INVALID_LINK_SETTINGS;
 134        }
 135
 136        /*
 137         * 10gig parts do not have a word in the EEPROM to determine the
 138         * default flow control setting, so we explicitly set it to full.
 139         */
 140        if (hw->fc.requested_mode == ixgbe_fc_default)
 141                hw->fc.requested_mode = ixgbe_fc_full;
 142
 143        /*
 144         * Set up the 1G and 10G flow control advertisement registers so the
 145         * HW will be able to do fc autoneg once the cable is plugged in.  If
 146         * we link at 10G, the 1G advertisement is harmless and vice versa.
 147         */
 148        switch (hw->phy.media_type) {
 149        case ixgbe_media_type_backplane:
 150                /* some MAC's need RMW protection on AUTOC */
 151                ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
 152                if (ret_val)
 153                        return ret_val;
 154
 155                /* only backplane uses autoc so fall though */
 156        case ixgbe_media_type_fiber:
 157                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 158
 159                break;
 160        case ixgbe_media_type_copper:
 161                hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
 162                                        MDIO_MMD_AN, &reg_cu);
 163                break;
 164        default:
 165                break;
 166        }
 167
 168        /*
 169         * The possible values of fc.requested_mode are:
 170         * 0: Flow control is completely disabled
 171         * 1: Rx flow control is enabled (we can receive pause frames,
 172         *    but not send pause frames).
 173         * 2: Tx flow control is enabled (we can send pause frames but
 174         *    we do not support receiving pause frames).
 175         * 3: Both Rx and Tx flow control (symmetric) are enabled.
 176         * other: Invalid.
 177         */
 178        switch (hw->fc.requested_mode) {
 179        case ixgbe_fc_none:
 180                /* Flow control completely disabled by software override. */
 181                reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
 182                if (hw->phy.media_type == ixgbe_media_type_backplane)
 183                        reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
 184                                    IXGBE_AUTOC_ASM_PAUSE);
 185                else if (hw->phy.media_type == ixgbe_media_type_copper)
 186                        reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
 187                break;
 188        case ixgbe_fc_tx_pause:
 189                /*
 190                 * Tx Flow control is enabled, and Rx Flow control is
 191                 * disabled by software override.
 192                 */
 193                reg |= IXGBE_PCS1GANA_ASM_PAUSE;
 194                reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
 195                if (hw->phy.media_type == ixgbe_media_type_backplane) {
 196                        reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
 197                        reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
 198                } else if (hw->phy.media_type == ixgbe_media_type_copper) {
 199                        reg_cu |= IXGBE_TAF_ASM_PAUSE;
 200                        reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
 201                }
 202                break;
 203        case ixgbe_fc_rx_pause:
 204                /*
 205                 * Rx Flow control is enabled and Tx Flow control is
 206                 * disabled by software override. Since there really
 207                 * isn't a way to advertise that we are capable of RX
 208                 * Pause ONLY, we will advertise that we support both
 209                 * symmetric and asymmetric Rx PAUSE, as such we fall
 210                 * through to the fc_full statement.  Later, we will
 211                 * disable the adapter's ability to send PAUSE frames.
 212                 */
 213        case ixgbe_fc_full:
 214                /* Flow control (both Rx and Tx) is enabled by SW override. */
 215                reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
 216                if (hw->phy.media_type == ixgbe_media_type_backplane)
 217                        reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
 218                                  IXGBE_AUTOC_ASM_PAUSE;
 219                else if (hw->phy.media_type == ixgbe_media_type_copper)
 220                        reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
 221                break;
 222        default:
 223                hw_dbg(hw, "Flow control param set incorrectly\n");
 224                return IXGBE_ERR_CONFIG;
 225        }
 226
 227        if (hw->mac.type != ixgbe_mac_X540) {
 228                /*
 229                 * Enable auto-negotiation between the MAC & PHY;
 230                 * the MAC will advertise clause 37 flow control.
 231                 */
 232                IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
 233                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
 234
 235                /* Disable AN timeout */
 236                if (hw->fc.strict_ieee)
 237                        reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
 238
 239                IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
 240                hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
 241        }
 242
 243        /*
 244         * AUTOC restart handles negotiation of 1G and 10G on backplane
 245         * and copper. There is no need to set the PCS1GCTL register.
 246         *
 247         */
 248        if (hw->phy.media_type == ixgbe_media_type_backplane) {
 249                /* Need the SW/FW semaphore around AUTOC writes if 82599 and
 250                 * LESM is on, likewise reset_pipeline requries the lock as
 251                 * it also writes AUTOC.
 252                 */
 253                ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
 254                if (ret_val)
 255                        return ret_val;
 256
 257        } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
 258                   ixgbe_device_supports_autoneg_fc(hw)) {
 259                hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
 260                                      MDIO_MMD_AN, reg_cu);
 261        }
 262
 263        hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
 264        return ret_val;
 265}
 266
 267/**
 268 *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
 269 *  @hw: pointer to hardware structure
 270 *
 271 *  Starts the hardware by filling the bus info structure and media type, clears
 272 *  all on chip counters, initializes receive address registers, multicast
 273 *  table, VLAN filter table, calls routine to set up link and flow control
 274 *  settings, and leaves transmit and receive units disabled and uninitialized
 275 **/
 276s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
 277{
 278        s32 ret_val;
 279        u32 ctrl_ext;
 280
 281        /* Set the media type */
 282        hw->phy.media_type = hw->mac.ops.get_media_type(hw);
 283
 284        /* Identify the PHY */
 285        hw->phy.ops.identify(hw);
 286
 287        /* Clear the VLAN filter table */
 288        hw->mac.ops.clear_vfta(hw);
 289
 290        /* Clear statistics registers */
 291        hw->mac.ops.clear_hw_cntrs(hw);
 292
 293        /* Set No Snoop Disable */
 294        ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
 295        ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
 296        IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
 297        IXGBE_WRITE_FLUSH(hw);
 298
 299        /* Setup flow control */
 300        ret_val = hw->mac.ops.setup_fc(hw);
 301        if (ret_val)
 302                return ret_val;
 303
 304        /* Clear adapter stopped flag */
 305        hw->adapter_stopped = false;
 306
 307        return 0;
 308}
 309
 310/**
 311 *  ixgbe_start_hw_gen2 - Init sequence for common device family
 312 *  @hw: pointer to hw structure
 313 *
 314 * Performs the init sequence common to the second generation
 315 * of 10 GbE devices.
 316 * Devices in the second generation:
 317 *     82599
 318 *     X540
 319 **/
 320s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
 321{
 322        u32 i;
 323
 324        /* Clear the rate limiters */
 325        for (i = 0; i < hw->mac.max_tx_queues; i++) {
 326                IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
 327                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
 328        }
 329        IXGBE_WRITE_FLUSH(hw);
 330
 331#ifndef CONFIG_SPARC
 332        /* Disable relaxed ordering */
 333        for (i = 0; i < hw->mac.max_tx_queues; i++) {
 334                u32 regval;
 335
 336                regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
 337                regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
 338                IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
 339        }
 340
 341        for (i = 0; i < hw->mac.max_rx_queues; i++) {
 342                u32 regval;
 343
 344                regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
 345                regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
 346                            IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
 347                IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
 348        }
 349#endif
 350        return 0;
 351}
 352
 353/**
 354 *  ixgbe_init_hw_generic - Generic hardware initialization
 355 *  @hw: pointer to hardware structure
 356 *
 357 *  Initialize the hardware by resetting the hardware, filling the bus info
 358 *  structure and media type, clears all on chip counters, initializes receive
 359 *  address registers, multicast table, VLAN filter table, calls routine to set
 360 *  up link and flow control settings, and leaves transmit and receive units
 361 *  disabled and uninitialized
 362 **/
 363s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
 364{
 365        s32 status;
 366
 367        /* Reset the hardware */
 368        status = hw->mac.ops.reset_hw(hw);
 369
 370        if (status == 0) {
 371                /* Start the HW */
 372                status = hw->mac.ops.start_hw(hw);
 373        }
 374
 375        return status;
 376}
 377
 378/**
 379 *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
 380 *  @hw: pointer to hardware structure
 381 *
 382 *  Clears all hardware statistics counters by reading them from the hardware
 383 *  Statistics counters are clear on read.
 384 **/
 385s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
 386{
 387        u16 i = 0;
 388
 389        IXGBE_READ_REG(hw, IXGBE_CRCERRS);
 390        IXGBE_READ_REG(hw, IXGBE_ILLERRC);
 391        IXGBE_READ_REG(hw, IXGBE_ERRBC);
 392        IXGBE_READ_REG(hw, IXGBE_MSPDC);
 393        for (i = 0; i < 8; i++)
 394                IXGBE_READ_REG(hw, IXGBE_MPC(i));
 395
 396        IXGBE_READ_REG(hw, IXGBE_MLFC);
 397        IXGBE_READ_REG(hw, IXGBE_MRFC);
 398        IXGBE_READ_REG(hw, IXGBE_RLEC);
 399        IXGBE_READ_REG(hw, IXGBE_LXONTXC);
 400        IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
 401        if (hw->mac.type >= ixgbe_mac_82599EB) {
 402                IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
 403                IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
 404        } else {
 405                IXGBE_READ_REG(hw, IXGBE_LXONRXC);
 406                IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
 407        }
 408
 409        for (i = 0; i < 8; i++) {
 410                IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
 411                IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
 412                if (hw->mac.type >= ixgbe_mac_82599EB) {
 413                        IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
 414                        IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
 415                } else {
 416                        IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
 417                        IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
 418                }
 419        }
 420        if (hw->mac.type >= ixgbe_mac_82599EB)
 421                for (i = 0; i < 8; i++)
 422                        IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
 423        IXGBE_READ_REG(hw, IXGBE_PRC64);
 424        IXGBE_READ_REG(hw, IXGBE_PRC127);
 425        IXGBE_READ_REG(hw, IXGBE_PRC255);
 426        IXGBE_READ_REG(hw, IXGBE_PRC511);
 427        IXGBE_READ_REG(hw, IXGBE_PRC1023);
 428        IXGBE_READ_REG(hw, IXGBE_PRC1522);
 429        IXGBE_READ_REG(hw, IXGBE_GPRC);
 430        IXGBE_READ_REG(hw, IXGBE_BPRC);
 431        IXGBE_READ_REG(hw, IXGBE_MPRC);
 432        IXGBE_READ_REG(hw, IXGBE_GPTC);
 433        IXGBE_READ_REG(hw, IXGBE_GORCL);
 434        IXGBE_READ_REG(hw, IXGBE_GORCH);
 435        IXGBE_READ_REG(hw, IXGBE_GOTCL);
 436        IXGBE_READ_REG(hw, IXGBE_GOTCH);
 437        if (hw->mac.type == ixgbe_mac_82598EB)
 438                for (i = 0; i < 8; i++)
 439                        IXGBE_READ_REG(hw, IXGBE_RNBC(i));
 440        IXGBE_READ_REG(hw, IXGBE_RUC);
 441        IXGBE_READ_REG(hw, IXGBE_RFC);
 442        IXGBE_READ_REG(hw, IXGBE_ROC);
 443        IXGBE_READ_REG(hw, IXGBE_RJC);
 444        IXGBE_READ_REG(hw, IXGBE_MNGPRC);
 445        IXGBE_READ_REG(hw, IXGBE_MNGPDC);
 446        IXGBE_READ_REG(hw, IXGBE_MNGPTC);
 447        IXGBE_READ_REG(hw, IXGBE_TORL);
 448        IXGBE_READ_REG(hw, IXGBE_TORH);
 449        IXGBE_READ_REG(hw, IXGBE_TPR);
 450        IXGBE_READ_REG(hw, IXGBE_TPT);
 451        IXGBE_READ_REG(hw, IXGBE_PTC64);
 452        IXGBE_READ_REG(hw, IXGBE_PTC127);
 453        IXGBE_READ_REG(hw, IXGBE_PTC255);
 454        IXGBE_READ_REG(hw, IXGBE_PTC511);
 455        IXGBE_READ_REG(hw, IXGBE_PTC1023);
 456        IXGBE_READ_REG(hw, IXGBE_PTC1522);
 457        IXGBE_READ_REG(hw, IXGBE_MPTC);
 458        IXGBE_READ_REG(hw, IXGBE_BPTC);
 459        for (i = 0; i < 16; i++) {
 460                IXGBE_READ_REG(hw, IXGBE_QPRC(i));
 461                IXGBE_READ_REG(hw, IXGBE_QPTC(i));
 462                if (hw->mac.type >= ixgbe_mac_82599EB) {
 463                        IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
 464                        IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
 465                        IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
 466                        IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
 467                        IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 468                } else {
 469                        IXGBE_READ_REG(hw, IXGBE_QBRC(i));
 470                        IXGBE_READ_REG(hw, IXGBE_QBTC(i));
 471                }
 472        }
 473
 474        if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
 475                if (hw->phy.id == 0)
 476                        hw->phy.ops.identify(hw);
 477                hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
 478                hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
 479                hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
 480                hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
 481        }
 482
 483        return 0;
 484}
 485
 486/**
 487 *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
 488 *  @hw: pointer to hardware structure
 489 *  @pba_num: stores the part number string from the EEPROM
 490 *  @pba_num_size: part number string buffer length
 491 *
 492 *  Reads the part number string from the EEPROM.
 493 **/
 494s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
 495                                  u32 pba_num_size)
 496{
 497        s32 ret_val;
 498        u16 data;
 499        u16 pba_ptr;
 500        u16 offset;
 501        u16 length;
 502
 503        if (pba_num == NULL) {
 504                hw_dbg(hw, "PBA string buffer was null\n");
 505                return IXGBE_ERR_INVALID_ARGUMENT;
 506        }
 507
 508        ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
 509        if (ret_val) {
 510                hw_dbg(hw, "NVM Read Error\n");
 511                return ret_val;
 512        }
 513
 514        ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
 515        if (ret_val) {
 516                hw_dbg(hw, "NVM Read Error\n");
 517                return ret_val;
 518        }
 519
 520        /*
 521         * if data is not ptr guard the PBA must be in legacy format which
 522         * means pba_ptr is actually our second data word for the PBA number
 523         * and we can decode it into an ascii string
 524         */
 525        if (data != IXGBE_PBANUM_PTR_GUARD) {
 526                hw_dbg(hw, "NVM PBA number is not stored as string\n");
 527
 528                /* we will need 11 characters to store the PBA */
 529                if (pba_num_size < 11) {
 530                        hw_dbg(hw, "PBA string buffer too small\n");
 531                        return IXGBE_ERR_NO_SPACE;
 532                }
 533
 534                /* extract hex string from data and pba_ptr */
 535                pba_num[0] = (data >> 12) & 0xF;
 536                pba_num[1] = (data >> 8) & 0xF;
 537                pba_num[2] = (data >> 4) & 0xF;
 538                pba_num[3] = data & 0xF;
 539                pba_num[4] = (pba_ptr >> 12) & 0xF;
 540                pba_num[5] = (pba_ptr >> 8) & 0xF;
 541                pba_num[6] = '-';
 542                pba_num[7] = 0;
 543                pba_num[8] = (pba_ptr >> 4) & 0xF;
 544                pba_num[9] = pba_ptr & 0xF;
 545
 546                /* put a null character on the end of our string */
 547                pba_num[10] = '\0';
 548
 549                /* switch all the data but the '-' to hex char */
 550                for (offset = 0; offset < 10; offset++) {
 551                        if (pba_num[offset] < 0xA)
 552                                pba_num[offset] += '0';
 553                        else if (pba_num[offset] < 0x10)
 554                                pba_num[offset] += 'A' - 0xA;
 555                }
 556
 557                return 0;
 558        }
 559
 560        ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
 561        if (ret_val) {
 562                hw_dbg(hw, "NVM Read Error\n");
 563                return ret_val;
 564        }
 565
 566        if (length == 0xFFFF || length == 0) {
 567                hw_dbg(hw, "NVM PBA number section invalid length\n");
 568                return IXGBE_ERR_PBA_SECTION;
 569        }
 570
 571        /* check if pba_num buffer is big enough */
 572        if (pba_num_size  < (((u32)length * 2) - 1)) {
 573                hw_dbg(hw, "PBA string buffer too small\n");
 574                return IXGBE_ERR_NO_SPACE;
 575        }
 576
 577        /* trim pba length from start of string */
 578        pba_ptr++;
 579        length--;
 580
 581        for (offset = 0; offset < length; offset++) {
 582                ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
 583                if (ret_val) {
 584                        hw_dbg(hw, "NVM Read Error\n");
 585                        return ret_val;
 586                }
 587                pba_num[offset * 2] = (u8)(data >> 8);
 588                pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
 589        }
 590        pba_num[offset * 2] = '\0';
 591
 592        return 0;
 593}
 594
 595/**
 596 *  ixgbe_get_mac_addr_generic - Generic get MAC address
 597 *  @hw: pointer to hardware structure
 598 *  @mac_addr: Adapter MAC address
 599 *
 600 *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
 601 *  A reset of the adapter must be performed prior to calling this function
 602 *  in order for the MAC address to have been loaded from the EEPROM into RAR0
 603 **/
 604s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
 605{
 606        u32 rar_high;
 607        u32 rar_low;
 608        u16 i;
 609
 610        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
 611        rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
 612
 613        for (i = 0; i < 4; i++)
 614                mac_addr[i] = (u8)(rar_low >> (i*8));
 615
 616        for (i = 0; i < 2; i++)
 617                mac_addr[i+4] = (u8)(rar_high >> (i*8));
 618
 619        return 0;
 620}
 621
 622enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status)
 623{
 624        switch (link_status & IXGBE_PCI_LINK_WIDTH) {
 625        case IXGBE_PCI_LINK_WIDTH_1:
 626                return ixgbe_bus_width_pcie_x1;
 627        case IXGBE_PCI_LINK_WIDTH_2:
 628                return ixgbe_bus_width_pcie_x2;
 629        case IXGBE_PCI_LINK_WIDTH_4:
 630                return ixgbe_bus_width_pcie_x4;
 631        case IXGBE_PCI_LINK_WIDTH_8:
 632                return ixgbe_bus_width_pcie_x8;
 633        default:
 634                return ixgbe_bus_width_unknown;
 635        }
 636}
 637
 638enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
 639{
 640        switch (link_status & IXGBE_PCI_LINK_SPEED) {
 641        case IXGBE_PCI_LINK_SPEED_2500:
 642                return ixgbe_bus_speed_2500;
 643        case IXGBE_PCI_LINK_SPEED_5000:
 644                return ixgbe_bus_speed_5000;
 645        case IXGBE_PCI_LINK_SPEED_8000:
 646                return ixgbe_bus_speed_8000;
 647        default:
 648                return ixgbe_bus_speed_unknown;
 649        }
 650}
 651
 652/**
 653 *  ixgbe_get_bus_info_generic - Generic set PCI bus info
 654 *  @hw: pointer to hardware structure
 655 *
 656 *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
 657 **/
 658s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
 659{
 660        u16 link_status;
 661
 662        hw->bus.type = ixgbe_bus_type_pci_express;
 663
 664        /* Get the negotiated link width and speed from PCI config space */
 665        link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
 666
 667        hw->bus.width = ixgbe_convert_bus_width(link_status);
 668        hw->bus.speed = ixgbe_convert_bus_speed(link_status);
 669
 670        hw->mac.ops.set_lan_id(hw);
 671
 672        return 0;
 673}
 674
 675/**
 676 *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
 677 *  @hw: pointer to the HW structure
 678 *
 679 *  Determines the LAN function id by reading memory-mapped registers
 680 *  and swaps the port value if requested.
 681 **/
 682void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
 683{
 684        struct ixgbe_bus_info *bus = &hw->bus;
 685        u16 ee_ctrl_4;
 686        u32 reg;
 687
 688        reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
 689        bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
 690        bus->lan_id = bus->func;
 691
 692        /* check for a port swap */
 693        reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
 694        if (reg & IXGBE_FACTPS_LFS)
 695                bus->func ^= 0x1;
 696
 697        /* Get MAC instance from EEPROM for configuring CS4227 */
 698        if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
 699                hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
 700                bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
 701                                   IXGBE_EE_CTRL_4_INST_ID_SHIFT;
 702        }
 703}
 704
 705/**
 706 *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
 707 *  @hw: pointer to hardware structure
 708 *
 709 *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
 710 *  disables transmit and receive units. The adapter_stopped flag is used by
 711 *  the shared code and drivers to determine if the adapter is in a stopped
 712 *  state and should not touch the hardware.
 713 **/
 714s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
 715{
 716        u32 reg_val;
 717        u16 i;
 718
 719        /*
 720         * Set the adapter_stopped flag so other driver functions stop touching
 721         * the hardware
 722         */
 723        hw->adapter_stopped = true;
 724
 725        /* Disable the receive unit */
 726        hw->mac.ops.disable_rx(hw);
 727
 728        /* Clear interrupt mask to stop interrupts from being generated */
 729        IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
 730
 731        /* Clear any pending interrupts, flush previous writes */
 732        IXGBE_READ_REG(hw, IXGBE_EICR);
 733
 734        /* Disable the transmit unit.  Each queue must be disabled. */
 735        for (i = 0; i < hw->mac.max_tx_queues; i++)
 736                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
 737
 738        /* Disable the receive unit by stopping each queue */
 739        for (i = 0; i < hw->mac.max_rx_queues; i++) {
 740                reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
 741                reg_val &= ~IXGBE_RXDCTL_ENABLE;
 742                reg_val |= IXGBE_RXDCTL_SWFLSH;
 743                IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
 744        }
 745
 746        /* flush all queues disables */
 747        IXGBE_WRITE_FLUSH(hw);
 748        usleep_range(1000, 2000);
 749
 750        /*
 751         * Prevent the PCI-E bus from from hanging by disabling PCI-E master
 752         * access and verify no pending requests
 753         */
 754        return ixgbe_disable_pcie_master(hw);
 755}
 756
 757/**
 758 *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
 759 *  @hw: pointer to hardware structure
 760 *  @index: led number to turn on
 761 **/
 762s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
 763{
 764        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 765
 766        /* To turn on the LED, set mode to ON. */
 767        led_reg &= ~IXGBE_LED_MODE_MASK(index);
 768        led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
 769        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 770        IXGBE_WRITE_FLUSH(hw);
 771
 772        return 0;
 773}
 774
 775/**
 776 *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
 777 *  @hw: pointer to hardware structure
 778 *  @index: led number to turn off
 779 **/
 780s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
 781{
 782        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
 783
 784        /* To turn off the LED, set mode to OFF. */
 785        led_reg &= ~IXGBE_LED_MODE_MASK(index);
 786        led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
 787        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 788        IXGBE_WRITE_FLUSH(hw);
 789
 790        return 0;
 791}
 792
 793/**
 794 *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
 795 *  @hw: pointer to hardware structure
 796 *
 797 *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
 798 *  ixgbe_hw struct in order to set up EEPROM access.
 799 **/
 800s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
 801{
 802        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
 803        u32 eec;
 804        u16 eeprom_size;
 805
 806        if (eeprom->type == ixgbe_eeprom_uninitialized) {
 807                eeprom->type = ixgbe_eeprom_none;
 808                /* Set default semaphore delay to 10ms which is a well
 809                 * tested value */
 810                eeprom->semaphore_delay = 10;
 811                /* Clear EEPROM page size, it will be initialized as needed */
 812                eeprom->word_page_size = 0;
 813
 814                /*
 815                 * Check for EEPROM present first.
 816                 * If not present leave as none
 817                 */
 818                eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
 819                if (eec & IXGBE_EEC_PRES) {
 820                        eeprom->type = ixgbe_eeprom_spi;
 821
 822                        /*
 823                         * SPI EEPROM is assumed here.  This code would need to
 824                         * change if a future EEPROM is not SPI.
 825                         */
 826                        eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
 827                                            IXGBE_EEC_SIZE_SHIFT);
 828                        eeprom->word_size = BIT(eeprom_size +
 829                                                 IXGBE_EEPROM_WORD_SIZE_SHIFT);
 830                }
 831
 832                if (eec & IXGBE_EEC_ADDR_SIZE)
 833                        eeprom->address_bits = 16;
 834                else
 835                        eeprom->address_bits = 8;
 836                hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
 837                       eeprom->type, eeprom->word_size, eeprom->address_bits);
 838        }
 839
 840        return 0;
 841}
 842
 843/**
 844 *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
 845 *  @hw: pointer to hardware structure
 846 *  @offset: offset within the EEPROM to write
 847 *  @words: number of words
 848 *  @data: 16 bit word(s) to write to EEPROM
 849 *
 850 *  Reads 16 bit word(s) from EEPROM through bit-bang method
 851 **/
 852s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
 853                                               u16 words, u16 *data)
 854{
 855        s32 status;
 856        u16 i, count;
 857
 858        hw->eeprom.ops.init_params(hw);
 859
 860        if (words == 0)
 861                return IXGBE_ERR_INVALID_ARGUMENT;
 862
 863        if (offset + words > hw->eeprom.word_size)
 864                return IXGBE_ERR_EEPROM;
 865
 866        /*
 867         * The EEPROM page size cannot be queried from the chip. We do lazy
 868         * initialization. It is worth to do that when we write large buffer.
 869         */
 870        if ((hw->eeprom.word_page_size == 0) &&
 871            (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
 872                ixgbe_detect_eeprom_page_size_generic(hw, offset);
 873
 874        /*
 875         * We cannot hold synchronization semaphores for too long
 876         * to avoid other entity starvation. However it is more efficient
 877         * to read in bursts than synchronizing access for each word.
 878         */
 879        for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
 880                count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
 881                         IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
 882                status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
 883                                                            count, &data[i]);
 884
 885                if (status != 0)
 886                        break;
 887        }
 888
 889        return status;
 890}
 891
 892/**
 893 *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
 894 *  @hw: pointer to hardware structure
 895 *  @offset: offset within the EEPROM to be written to
 896 *  @words: number of word(s)
 897 *  @data: 16 bit word(s) to be written to the EEPROM
 898 *
 899 *  If ixgbe_eeprom_update_checksum is not called after this function, the
 900 *  EEPROM will most likely contain an invalid checksum.
 901 **/
 902static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 903                                              u16 words, u16 *data)
 904{
 905        s32 status;
 906        u16 word;
 907        u16 page_size;
 908        u16 i;
 909        u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
 910
 911        /* Prepare the EEPROM for writing  */
 912        status = ixgbe_acquire_eeprom(hw);
 913        if (status)
 914                return status;
 915
 916        if (ixgbe_ready_eeprom(hw) != 0) {
 917                ixgbe_release_eeprom(hw);
 918                return IXGBE_ERR_EEPROM;
 919        }
 920
 921        for (i = 0; i < words; i++) {
 922                ixgbe_standby_eeprom(hw);
 923
 924                /* Send the WRITE ENABLE command (8 bit opcode) */
 925                ixgbe_shift_out_eeprom_bits(hw,
 926                                            IXGBE_EEPROM_WREN_OPCODE_SPI,
 927                                            IXGBE_EEPROM_OPCODE_BITS);
 928
 929                ixgbe_standby_eeprom(hw);
 930
 931                /* Some SPI eeproms use the 8th address bit embedded
 932                 * in the opcode
 933                 */
 934                if ((hw->eeprom.address_bits == 8) &&
 935                    ((offset + i) >= 128))
 936                        write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
 937
 938                /* Send the Write command (8-bit opcode + addr) */
 939                ixgbe_shift_out_eeprom_bits(hw, write_opcode,
 940                                            IXGBE_EEPROM_OPCODE_BITS);
 941                ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
 942                                            hw->eeprom.address_bits);
 943
 944                page_size = hw->eeprom.word_page_size;
 945
 946                /* Send the data in burst via SPI */
 947                do {
 948                        word = data[i];
 949                        word = (word >> 8) | (word << 8);
 950                        ixgbe_shift_out_eeprom_bits(hw, word, 16);
 951
 952                        if (page_size == 0)
 953                                break;
 954
 955                        /* do not wrap around page */
 956                        if (((offset + i) & (page_size - 1)) ==
 957                            (page_size - 1))
 958                                break;
 959                } while (++i < words);
 960
 961                ixgbe_standby_eeprom(hw);
 962                usleep_range(10000, 20000);
 963        }
 964        /* Done with writing - release the EEPROM */
 965        ixgbe_release_eeprom(hw);
 966
 967        return 0;
 968}
 969
 970/**
 971 *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
 972 *  @hw: pointer to hardware structure
 973 *  @offset: offset within the EEPROM to be written to
 974 *  @data: 16 bit word to be written to the EEPROM
 975 *
 976 *  If ixgbe_eeprom_update_checksum is not called after this function, the
 977 *  EEPROM will most likely contain an invalid checksum.
 978 **/
 979s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
 980{
 981        hw->eeprom.ops.init_params(hw);
 982
 983        if (offset >= hw->eeprom.word_size)
 984                return IXGBE_ERR_EEPROM;
 985
 986        return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
 987}
 988
 989/**
 990 *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
 991 *  @hw: pointer to hardware structure
 992 *  @offset: offset within the EEPROM to be read
 993 *  @words: number of word(s)
 994 *  @data: read 16 bit words(s) from EEPROM
 995 *
 996 *  Reads 16 bit word(s) from EEPROM through bit-bang method
 997 **/
 998s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
 999                                              u16 words, u16 *data)
1000{
1001        s32 status;
1002        u16 i, count;
1003
1004        hw->eeprom.ops.init_params(hw);
1005
1006        if (words == 0)
1007                return IXGBE_ERR_INVALID_ARGUMENT;
1008
1009        if (offset + words > hw->eeprom.word_size)
1010                return IXGBE_ERR_EEPROM;
1011
1012        /*
1013         * We cannot hold synchronization semaphores for too long
1014         * to avoid other entity starvation. However it is more efficient
1015         * to read in bursts than synchronizing access for each word.
1016         */
1017        for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1018                count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1019                         IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1020
1021                status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1022                                                           count, &data[i]);
1023
1024                if (status)
1025                        return status;
1026        }
1027
1028        return 0;
1029}
1030
1031/**
1032 *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1033 *  @hw: pointer to hardware structure
1034 *  @offset: offset within the EEPROM to be read
1035 *  @words: number of word(s)
1036 *  @data: read 16 bit word(s) from EEPROM
1037 *
1038 *  Reads 16 bit word(s) from EEPROM through bit-bang method
1039 **/
1040static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1041                                             u16 words, u16 *data)
1042{
1043        s32 status;
1044        u16 word_in;
1045        u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1046        u16 i;
1047
1048        /* Prepare the EEPROM for reading  */
1049        status = ixgbe_acquire_eeprom(hw);
1050        if (status)
1051                return status;
1052
1053        if (ixgbe_ready_eeprom(hw) != 0) {
1054                ixgbe_release_eeprom(hw);
1055                return IXGBE_ERR_EEPROM;
1056        }
1057
1058        for (i = 0; i < words; i++) {
1059                ixgbe_standby_eeprom(hw);
1060                /* Some SPI eeproms use the 8th address bit embedded
1061                 * in the opcode
1062                 */
1063                if ((hw->eeprom.address_bits == 8) &&
1064                    ((offset + i) >= 128))
1065                        read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1066
1067                /* Send the READ command (opcode + addr) */
1068                ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1069                                            IXGBE_EEPROM_OPCODE_BITS);
1070                ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1071                                            hw->eeprom.address_bits);
1072
1073                /* Read the data. */
1074                word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1075                data[i] = (word_in >> 8) | (word_in << 8);
1076        }
1077
1078        /* End this read operation */
1079        ixgbe_release_eeprom(hw);
1080
1081        return 0;
1082}
1083
1084/**
1085 *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1086 *  @hw: pointer to hardware structure
1087 *  @offset: offset within the EEPROM to be read
1088 *  @data: read 16 bit value from EEPROM
1089 *
1090 *  Reads 16 bit value from EEPROM through bit-bang method
1091 **/
1092s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1093                                       u16 *data)
1094{
1095        hw->eeprom.ops.init_params(hw);
1096
1097        if (offset >= hw->eeprom.word_size)
1098                return IXGBE_ERR_EEPROM;
1099
1100        return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1101}
1102
1103/**
1104 *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1105 *  @hw: pointer to hardware structure
1106 *  @offset: offset of word in the EEPROM to read
1107 *  @words: number of word(s)
1108 *  @data: 16 bit word(s) from the EEPROM
1109 *
1110 *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
1111 **/
1112s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1113                                   u16 words, u16 *data)
1114{
1115        u32 eerd;
1116        s32 status;
1117        u32 i;
1118
1119        hw->eeprom.ops.init_params(hw);
1120
1121        if (words == 0)
1122                return IXGBE_ERR_INVALID_ARGUMENT;
1123
1124        if (offset >= hw->eeprom.word_size)
1125                return IXGBE_ERR_EEPROM;
1126
1127        for (i = 0; i < words; i++) {
1128                eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1129                       IXGBE_EEPROM_RW_REG_START;
1130
1131                IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1132                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1133
1134                if (status == 0) {
1135                        data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1136                                   IXGBE_EEPROM_RW_REG_DATA);
1137                } else {
1138                        hw_dbg(hw, "Eeprom read timed out\n");
1139                        return status;
1140                }
1141        }
1142
1143        return 0;
1144}
1145
1146/**
1147 *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1148 *  @hw: pointer to hardware structure
1149 *  @offset: offset within the EEPROM to be used as a scratch pad
1150 *
1151 *  Discover EEPROM page size by writing marching data at given offset.
1152 *  This function is called only when we are writing a new large buffer
1153 *  at given offset so the data would be overwritten anyway.
1154 **/
1155static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1156                                                 u16 offset)
1157{
1158        u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1159        s32 status;
1160        u16 i;
1161
1162        for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1163                data[i] = i;
1164
1165        hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1166        status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1167                                             IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1168        hw->eeprom.word_page_size = 0;
1169        if (status)
1170                return status;
1171
1172        status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1173        if (status)
1174                return status;
1175
1176        /*
1177         * When writing in burst more than the actual page size
1178         * EEPROM address wraps around current page.
1179         */
1180        hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1181
1182        hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1183               hw->eeprom.word_page_size);
1184        return 0;
1185}
1186
1187/**
1188 *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
1189 *  @hw: pointer to hardware structure
1190 *  @offset: offset of  word in the EEPROM to read
1191 *  @data: word read from the EEPROM
1192 *
1193 *  Reads a 16 bit word from the EEPROM using the EERD register.
1194 **/
1195s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1196{
1197        return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1198}
1199
1200/**
1201 *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1202 *  @hw: pointer to hardware structure
1203 *  @offset: offset of  word in the EEPROM to write
1204 *  @words: number of words
1205 *  @data: word(s) write to the EEPROM
1206 *
1207 *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
1208 **/
1209s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1210                                    u16 words, u16 *data)
1211{
1212        u32 eewr;
1213        s32 status;
1214        u16 i;
1215
1216        hw->eeprom.ops.init_params(hw);
1217
1218        if (words == 0)
1219                return IXGBE_ERR_INVALID_ARGUMENT;
1220
1221        if (offset >= hw->eeprom.word_size)
1222                return IXGBE_ERR_EEPROM;
1223
1224        for (i = 0; i < words; i++) {
1225                eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1226                       (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1227                       IXGBE_EEPROM_RW_REG_START;
1228
1229                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1230                if (status) {
1231                        hw_dbg(hw, "Eeprom write EEWR timed out\n");
1232                        return status;
1233                }
1234
1235                IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1236
1237                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1238                if (status) {
1239                        hw_dbg(hw, "Eeprom write EEWR timed out\n");
1240                        return status;
1241                }
1242        }
1243
1244        return 0;
1245}
1246
1247/**
1248 *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1249 *  @hw: pointer to hardware structure
1250 *  @offset: offset of  word in the EEPROM to write
1251 *  @data: word write to the EEPROM
1252 *
1253 *  Write a 16 bit word to the EEPROM using the EEWR register.
1254 **/
1255s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1256{
1257        return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1258}
1259
1260/**
1261 *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1262 *  @hw: pointer to hardware structure
1263 *  @ee_reg: EEPROM flag for polling
1264 *
1265 *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1266 *  read or write is done respectively.
1267 **/
1268static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1269{
1270        u32 i;
1271        u32 reg;
1272
1273        for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1274                if (ee_reg == IXGBE_NVM_POLL_READ)
1275                        reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1276                else
1277                        reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1278
1279                if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1280                        return 0;
1281                }
1282                udelay(5);
1283        }
1284        return IXGBE_ERR_EEPROM;
1285}
1286
1287/**
1288 *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1289 *  @hw: pointer to hardware structure
1290 *
1291 *  Prepares EEPROM for access using bit-bang method. This function should
1292 *  be called before issuing a command to the EEPROM.
1293 **/
1294static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1295{
1296        u32 eec;
1297        u32 i;
1298
1299        if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
1300                return IXGBE_ERR_SWFW_SYNC;
1301
1302        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1303
1304        /* Request EEPROM Access */
1305        eec |= IXGBE_EEC_REQ;
1306        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1307
1308        for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1309                eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1310                if (eec & IXGBE_EEC_GNT)
1311                        break;
1312                udelay(5);
1313        }
1314
1315        /* Release if grant not acquired */
1316        if (!(eec & IXGBE_EEC_GNT)) {
1317                eec &= ~IXGBE_EEC_REQ;
1318                IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1319                hw_dbg(hw, "Could not acquire EEPROM grant\n");
1320
1321                hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1322                return IXGBE_ERR_EEPROM;
1323        }
1324
1325        /* Setup EEPROM for Read/Write */
1326        /* Clear CS and SK */
1327        eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1328        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1329        IXGBE_WRITE_FLUSH(hw);
1330        udelay(1);
1331        return 0;
1332}
1333
1334/**
1335 *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
1336 *  @hw: pointer to hardware structure
1337 *
1338 *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1339 **/
1340static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1341{
1342        u32 timeout = 2000;
1343        u32 i;
1344        u32 swsm;
1345
1346        /* Get SMBI software semaphore between device drivers first */
1347        for (i = 0; i < timeout; i++) {
1348                /*
1349                 * If the SMBI bit is 0 when we read it, then the bit will be
1350                 * set and we have the semaphore
1351                 */
1352                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1353                if (!(swsm & IXGBE_SWSM_SMBI))
1354                        break;
1355                usleep_range(50, 100);
1356        }
1357
1358        if (i == timeout) {
1359                hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
1360                /* this release is particularly important because our attempts
1361                 * above to get the semaphore may have succeeded, and if there
1362                 * was a timeout, we should unconditionally clear the semaphore
1363                 * bits to free the driver to make progress
1364                 */
1365                ixgbe_release_eeprom_semaphore(hw);
1366
1367                usleep_range(50, 100);
1368                /* one last try
1369                 * If the SMBI bit is 0 when we read it, then the bit will be
1370                 * set and we have the semaphore
1371                 */
1372                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1373                if (swsm & IXGBE_SWSM_SMBI) {
1374                        hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
1375                        return IXGBE_ERR_EEPROM;
1376                }
1377        }
1378
1379        /* Now get the semaphore between SW/FW through the SWESMBI bit */
1380        for (i = 0; i < timeout; i++) {
1381                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1382
1383                /* Set the SW EEPROM semaphore bit to request access */
1384                swsm |= IXGBE_SWSM_SWESMBI;
1385                IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
1386
1387                /* If we set the bit successfully then we got the
1388                 * semaphore.
1389                 */
1390                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1391                if (swsm & IXGBE_SWSM_SWESMBI)
1392                        break;
1393
1394                usleep_range(50, 100);
1395        }
1396
1397        /* Release semaphores and return error if SW EEPROM semaphore
1398         * was not granted because we don't have access to the EEPROM
1399         */
1400        if (i >= timeout) {
1401                hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
1402                ixgbe_release_eeprom_semaphore(hw);
1403                return IXGBE_ERR_EEPROM;
1404        }
1405
1406        return 0;
1407}
1408
1409/**
1410 *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
1411 *  @hw: pointer to hardware structure
1412 *
1413 *  This function clears hardware semaphore bits.
1414 **/
1415static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1416{
1417        u32 swsm;
1418
1419        swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
1420
1421        /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1422        swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1423        IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
1424        IXGBE_WRITE_FLUSH(hw);
1425}
1426
1427/**
1428 *  ixgbe_ready_eeprom - Polls for EEPROM ready
1429 *  @hw: pointer to hardware structure
1430 **/
1431static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1432{
1433        u16 i;
1434        u8 spi_stat_reg;
1435
1436        /*
1437         * Read "Status Register" repeatedly until the LSB is cleared.  The
1438         * EEPROM will signal that the command has been completed by clearing
1439         * bit 0 of the internal status register.  If it's not cleared within
1440         * 5 milliseconds, then error out.
1441         */
1442        for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1443                ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1444                                            IXGBE_EEPROM_OPCODE_BITS);
1445                spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1446                if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1447                        break;
1448
1449                udelay(5);
1450                ixgbe_standby_eeprom(hw);
1451        }
1452
1453        /*
1454         * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1455         * devices (and only 0-5mSec on 5V devices)
1456         */
1457        if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1458                hw_dbg(hw, "SPI EEPROM Status error\n");
1459                return IXGBE_ERR_EEPROM;
1460        }
1461
1462        return 0;
1463}
1464
1465/**
1466 *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1467 *  @hw: pointer to hardware structure
1468 **/
1469static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1470{
1471        u32 eec;
1472
1473        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1474
1475        /* Toggle CS to flush commands */
1476        eec |= IXGBE_EEC_CS;
1477        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1478        IXGBE_WRITE_FLUSH(hw);
1479        udelay(1);
1480        eec &= ~IXGBE_EEC_CS;
1481        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1482        IXGBE_WRITE_FLUSH(hw);
1483        udelay(1);
1484}
1485
1486/**
1487 *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1488 *  @hw: pointer to hardware structure
1489 *  @data: data to send to the EEPROM
1490 *  @count: number of bits to shift out
1491 **/
1492static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1493                                        u16 count)
1494{
1495        u32 eec;
1496        u32 mask;
1497        u32 i;
1498
1499        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1500
1501        /*
1502         * Mask is used to shift "count" bits of "data" out to the EEPROM
1503         * one bit at a time.  Determine the starting bit based on count
1504         */
1505        mask = BIT(count - 1);
1506
1507        for (i = 0; i < count; i++) {
1508                /*
1509                 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1510                 * "1", and then raising and then lowering the clock (the SK
1511                 * bit controls the clock input to the EEPROM).  A "0" is
1512                 * shifted out to the EEPROM by setting "DI" to "0" and then
1513                 * raising and then lowering the clock.
1514                 */
1515                if (data & mask)
1516                        eec |= IXGBE_EEC_DI;
1517                else
1518                        eec &= ~IXGBE_EEC_DI;
1519
1520                IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1521                IXGBE_WRITE_FLUSH(hw);
1522
1523                udelay(1);
1524
1525                ixgbe_raise_eeprom_clk(hw, &eec);
1526                ixgbe_lower_eeprom_clk(hw, &eec);
1527
1528                /*
1529                 * Shift mask to signify next bit of data to shift in to the
1530                 * EEPROM
1531                 */
1532                mask = mask >> 1;
1533        }
1534
1535        /* We leave the "DI" bit set to "0" when we leave this routine. */
1536        eec &= ~IXGBE_EEC_DI;
1537        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1538        IXGBE_WRITE_FLUSH(hw);
1539}
1540
1541/**
1542 *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1543 *  @hw: pointer to hardware structure
1544 **/
1545static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1546{
1547        u32 eec;
1548        u32 i;
1549        u16 data = 0;
1550
1551        /*
1552         * In order to read a register from the EEPROM, we need to shift
1553         * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1554         * the clock input to the EEPROM (setting the SK bit), and then reading
1555         * the value of the "DO" bit.  During this "shifting in" process the
1556         * "DI" bit should always be clear.
1557         */
1558        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1559
1560        eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1561
1562        for (i = 0; i < count; i++) {
1563                data = data << 1;
1564                ixgbe_raise_eeprom_clk(hw, &eec);
1565
1566                eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1567
1568                eec &= ~(IXGBE_EEC_DI);
1569                if (eec & IXGBE_EEC_DO)
1570                        data |= 1;
1571
1572                ixgbe_lower_eeprom_clk(hw, &eec);
1573        }
1574
1575        return data;
1576}
1577
1578/**
1579 *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1580 *  @hw: pointer to hardware structure
1581 *  @eec: EEC register's current value
1582 **/
1583static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1584{
1585        /*
1586         * Raise the clock input to the EEPROM
1587         * (setting the SK bit), then delay
1588         */
1589        *eec = *eec | IXGBE_EEC_SK;
1590        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
1591        IXGBE_WRITE_FLUSH(hw);
1592        udelay(1);
1593}
1594
1595/**
1596 *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
1597 *  @hw: pointer to hardware structure
1598 *  @eecd: EECD's current value
1599 **/
1600static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1601{
1602        /*
1603         * Lower the clock input to the EEPROM (clearing the SK bit), then
1604         * delay
1605         */
1606        *eec = *eec & ~IXGBE_EEC_SK;
1607        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec);
1608        IXGBE_WRITE_FLUSH(hw);
1609        udelay(1);
1610}
1611
1612/**
1613 *  ixgbe_release_eeprom - Release EEPROM, release semaphores
1614 *  @hw: pointer to hardware structure
1615 **/
1616static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1617{
1618        u32 eec;
1619
1620        eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
1621
1622        eec |= IXGBE_EEC_CS;  /* Pull CS high */
1623        eec &= ~IXGBE_EEC_SK; /* Lower SCK */
1624
1625        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1626        IXGBE_WRITE_FLUSH(hw);
1627
1628        udelay(1);
1629
1630        /* Stop requesting EEPROM access */
1631        eec &= ~IXGBE_EEC_REQ;
1632        IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec);
1633
1634        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1635
1636        /*
1637         * Delay before attempt to obtain semaphore again to allow FW
1638         * access. semaphore_delay is in ms we need us for usleep_range
1639         */
1640        usleep_range(hw->eeprom.semaphore_delay * 1000,
1641                     hw->eeprom.semaphore_delay * 2000);
1642}
1643
1644/**
1645 *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
1646 *  @hw: pointer to hardware structure
1647 **/
1648s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1649{
1650        u16 i;
1651        u16 j;
1652        u16 checksum = 0;
1653        u16 length = 0;
1654        u16 pointer = 0;
1655        u16 word = 0;
1656
1657        /* Include 0x0-0x3F in the checksum */
1658        for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
1659                if (hw->eeprom.ops.read(hw, i, &word)) {
1660                        hw_dbg(hw, "EEPROM read failed\n");
1661                        break;
1662                }
1663                checksum += word;
1664        }
1665
1666        /* Include all data from pointers except for the fw pointer */
1667        for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
1668                if (hw->eeprom.ops.read(hw, i, &pointer)) {
1669                        hw_dbg(hw, "EEPROM read failed\n");
1670                        return IXGBE_ERR_EEPROM;
1671                }
1672
1673                /* If the pointer seems invalid */
1674                if (pointer == 0xFFFF || pointer == 0)
1675                        continue;
1676
1677                if (hw->eeprom.ops.read(hw, pointer, &length)) {
1678                        hw_dbg(hw, "EEPROM read failed\n");
1679                        return IXGBE_ERR_EEPROM;
1680                }
1681
1682                if (length == 0xFFFF || length == 0)
1683                        continue;
1684
1685                for (j = pointer + 1; j <= pointer + length; j++) {
1686                        if (hw->eeprom.ops.read(hw, j, &word)) {
1687                                hw_dbg(hw, "EEPROM read failed\n");
1688                                return IXGBE_ERR_EEPROM;
1689                        }
1690                        checksum += word;
1691                }
1692        }
1693
1694        checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1695
1696        return (s32)checksum;
1697}
1698
1699/**
1700 *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
1701 *  @hw: pointer to hardware structure
1702 *  @checksum_val: calculated checksum
1703 *
1704 *  Performs checksum calculation and validates the EEPROM checksum.  If the
1705 *  caller does not need checksum_val, the value can be NULL.
1706 **/
1707s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1708                                           u16 *checksum_val)
1709{
1710        s32 status;
1711        u16 checksum;
1712        u16 read_checksum = 0;
1713
1714        /*
1715         * Read the first word from the EEPROM. If this times out or fails, do
1716         * not continue or we could be in for a very long wait while every
1717         * EEPROM read fails
1718         */
1719        status = hw->eeprom.ops.read(hw, 0, &checksum);
1720        if (status) {
1721                hw_dbg(hw, "EEPROM read failed\n");
1722                return status;
1723        }
1724
1725        status = hw->eeprom.ops.calc_checksum(hw);
1726        if (status < 0)
1727                return status;
1728
1729        checksum = (u16)(status & 0xffff);
1730
1731        status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1732        if (status) {
1733                hw_dbg(hw, "EEPROM read failed\n");
1734                return status;
1735        }
1736
1737        /* Verify read checksum from EEPROM is the same as
1738         * calculated checksum
1739         */
1740        if (read_checksum != checksum)
1741                status = IXGBE_ERR_EEPROM_CHECKSUM;
1742
1743        /* If the user cares, return the calculated checksum */
1744        if (checksum_val)
1745                *checksum_val = checksum;
1746
1747        return status;
1748}
1749
1750/**
1751 *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
1752 *  @hw: pointer to hardware structure
1753 **/
1754s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1755{
1756        s32 status;
1757        u16 checksum;
1758
1759        /*
1760         * Read the first word from the EEPROM. If this times out or fails, do
1761         * not continue or we could be in for a very long wait while every
1762         * EEPROM read fails
1763         */
1764        status = hw->eeprom.ops.read(hw, 0, &checksum);
1765        if (status) {
1766                hw_dbg(hw, "EEPROM read failed\n");
1767                return status;
1768        }
1769
1770        status = hw->eeprom.ops.calc_checksum(hw);
1771        if (status < 0)
1772                return status;
1773
1774        checksum = (u16)(status & 0xffff);
1775
1776        status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
1777
1778        return status;
1779}
1780
1781/**
1782 *  ixgbe_set_rar_generic - Set Rx address register
1783 *  @hw: pointer to hardware structure
1784 *  @index: Receive address register to write
1785 *  @addr: Address to put into receive address register
1786 *  @vmdq: VMDq "set" or "pool" index
1787 *  @enable_addr: set flag that address is active
1788 *
1789 *  Puts an ethernet address into a receive address register.
1790 **/
1791s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1792                          u32 enable_addr)
1793{
1794        u32 rar_low, rar_high;
1795        u32 rar_entries = hw->mac.num_rar_entries;
1796
1797        /* Make sure we are using a valid rar index range */
1798        if (index >= rar_entries) {
1799                hw_dbg(hw, "RAR index %d is out of range.\n", index);
1800                return IXGBE_ERR_INVALID_ARGUMENT;
1801        }
1802
1803        /* setup VMDq pool selection before this RAR gets enabled */
1804        hw->mac.ops.set_vmdq(hw, index, vmdq);
1805
1806        /*
1807         * HW expects these in little endian so we reverse the byte
1808         * order from network order (big endian) to little endian
1809         */
1810        rar_low = ((u32)addr[0] |
1811                   ((u32)addr[1] << 8) |
1812                   ((u32)addr[2] << 16) |
1813                   ((u32)addr[3] << 24));
1814        /*
1815         * Some parts put the VMDq setting in the extra RAH bits,
1816         * so save everything except the lower 16 bits that hold part
1817         * of the address and the address valid bit.
1818         */
1819        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1820        rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1821        rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
1822
1823        if (enable_addr != 0)
1824                rar_high |= IXGBE_RAH_AV;
1825
1826        IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1827        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1828
1829        return 0;
1830}
1831
1832/**
1833 *  ixgbe_clear_rar_generic - Remove Rx address register
1834 *  @hw: pointer to hardware structure
1835 *  @index: Receive address register to write
1836 *
1837 *  Clears an ethernet address from a receive address register.
1838 **/
1839s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1840{
1841        u32 rar_high;
1842        u32 rar_entries = hw->mac.num_rar_entries;
1843
1844        /* Make sure we are using a valid rar index range */
1845        if (index >= rar_entries) {
1846                hw_dbg(hw, "RAR index %d is out of range.\n", index);
1847                return IXGBE_ERR_INVALID_ARGUMENT;
1848        }
1849
1850        /*
1851         * Some parts put the VMDq setting in the extra RAH bits,
1852         * so save everything except the lower 16 bits that hold part
1853         * of the address and the address valid bit.
1854         */
1855        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1856        rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1857
1858        IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1859        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1860
1861        /* clear VMDq pool/queue selection for this RAR */
1862        hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1863
1864        return 0;
1865}
1866
1867/**
1868 *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
1869 *  @hw: pointer to hardware structure
1870 *
1871 *  Places the MAC address in receive address register 0 and clears the rest
1872 *  of the receive address registers. Clears the multicast table. Assumes
1873 *  the receiver is in reset when the routine is called.
1874 **/
1875s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1876{
1877        u32 i;
1878        u32 rar_entries = hw->mac.num_rar_entries;
1879
1880        /*
1881         * If the current mac address is valid, assume it is a software override
1882         * to the permanent address.
1883         * Otherwise, use the permanent address from the eeprom.
1884         */
1885        if (!is_valid_ether_addr(hw->mac.addr)) {
1886                /* Get the MAC address from the RAR0 for later reference */
1887                hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1888
1889                hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
1890        } else {
1891                /* Setup the receive address. */
1892                hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
1893                hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
1894
1895                hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1896        }
1897
1898        /*  clear VMDq pool/queue selection for RAR 0 */
1899        hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
1900
1901        hw->addr_ctrl.overflow_promisc = 0;
1902
1903        hw->addr_ctrl.rar_used_count = 1;
1904
1905        /* Zero out the other receive addresses. */
1906        hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
1907        for (i = 1; i < rar_entries; i++) {
1908                IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1909                IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1910        }
1911
1912        /* Clear the MTA */
1913        hw->addr_ctrl.mta_in_use = 0;
1914        IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1915
1916        hw_dbg(hw, " Clearing MTA\n");
1917        for (i = 0; i < hw->mac.mcft_size; i++)
1918                IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1919
1920        if (hw->mac.ops.init_uta_tables)
1921                hw->mac.ops.init_uta_tables(hw);
1922
1923        return 0;
1924}
1925
1926/**
1927 *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
1928 *  @hw: pointer to hardware structure
1929 *  @mc_addr: the multicast address
1930 *
1931 *  Extracts the 12 bits, from a multicast address, to determine which
1932 *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
1933 *  incoming rx multicast addresses, to determine the bit-vector to check in
1934 *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1935 *  by the MO field of the MCSTCTRL. The MO field is set during initialization
1936 *  to mc_filter_type.
1937 **/
1938static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
1939{
1940        u32 vector = 0;
1941
1942        switch (hw->mac.mc_filter_type) {
1943        case 0:   /* use bits [47:36] of the address */
1944                vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1945                break;
1946        case 1:   /* use bits [46:35] of the address */
1947                vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1948                break;
1949        case 2:   /* use bits [45:34] of the address */
1950                vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1951                break;
1952        case 3:   /* use bits [43:32] of the address */
1953                vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1954                break;
1955        default:  /* Invalid mc_filter_type */
1956                hw_dbg(hw, "MC filter type param set incorrectly\n");
1957                break;
1958        }
1959
1960        /* vector can only be 12-bits or boundary will be exceeded */
1961        vector &= 0xFFF;
1962        return vector;
1963}
1964
1965/**
1966 *  ixgbe_set_mta - Set bit-vector in multicast table
1967 *  @hw: pointer to hardware structure
1968 *  @hash_value: Multicast address hash value
1969 *
1970 *  Sets the bit-vector in the multicast table.
1971 **/
1972static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1973{
1974        u32 vector;
1975        u32 vector_bit;
1976        u32 vector_reg;
1977
1978        hw->addr_ctrl.mta_in_use++;
1979
1980        vector = ixgbe_mta_vector(hw, mc_addr);
1981        hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
1982
1983        /*
1984         * The MTA is a register array of 128 32-bit registers. It is treated
1985         * like an array of 4096 bits.  We want to set bit
1986         * BitArray[vector_value]. So we figure out what register the bit is
1987         * in, read it, OR in the new bit, then write back the new value.  The
1988         * register is determined by the upper 7 bits of the vector value and
1989         * the bit within that register are determined by the lower 5 bits of
1990         * the value.
1991         */
1992        vector_reg = (vector >> 5) & 0x7F;
1993        vector_bit = vector & 0x1F;
1994        hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
1995}
1996
1997/**
1998 *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1999 *  @hw: pointer to hardware structure
2000 *  @netdev: pointer to net device structure
2001 *
2002 *  The given list replaces any existing list. Clears the MC addrs from receive
2003 *  address registers and the multicast table. Uses unused receive address
2004 *  registers for the first multicast addresses, and hashes the rest into the
2005 *  multicast table.
2006 **/
2007s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
2008                                      struct net_device *netdev)
2009{
2010        struct netdev_hw_addr *ha;
2011        u32 i;
2012
2013        /*
2014         * Set the new number of MC addresses that we are being requested to
2015         * use.
2016         */
2017        hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
2018        hw->addr_ctrl.mta_in_use = 0;
2019
2020        /* Clear mta_shadow */
2021        hw_dbg(hw, " Clearing MTA\n");
2022        memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2023
2024        /* Update mta shadow */
2025        netdev_for_each_mc_addr(ha, netdev) {
2026                hw_dbg(hw, " Adding the multicast addresses:\n");
2027                ixgbe_set_mta(hw, ha->addr);
2028        }
2029
2030        /* Enable mta */
2031        for (i = 0; i < hw->mac.mcft_size; i++)
2032                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2033                                      hw->mac.mta_shadow[i]);
2034
2035        if (hw->addr_ctrl.mta_in_use > 0)
2036                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2037                                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2038
2039        hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
2040        return 0;
2041}
2042
2043/**
2044 *  ixgbe_enable_mc_generic - Enable multicast address in RAR
2045 *  @hw: pointer to hardware structure
2046 *
2047 *  Enables multicast address in RAR and the use of the multicast hash table.
2048 **/
2049s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2050{
2051        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2052
2053        if (a->mta_in_use > 0)
2054                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2055                                hw->mac.mc_filter_type);
2056
2057        return 0;
2058}
2059
2060/**
2061 *  ixgbe_disable_mc_generic - Disable multicast address in RAR
2062 *  @hw: pointer to hardware structure
2063 *
2064 *  Disables multicast address in RAR and the use of the multicast hash table.
2065 **/
2066s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2067{
2068        struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2069
2070        if (a->mta_in_use > 0)
2071                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2072
2073        return 0;
2074}
2075
2076/**
2077 *  ixgbe_fc_enable_generic - Enable flow control
2078 *  @hw: pointer to hardware structure
2079 *
2080 *  Enable flow control according to the current settings.
2081 **/
2082s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2083{
2084        u32 mflcn_reg, fccfg_reg;
2085        u32 reg;
2086        u32 fcrtl, fcrth;
2087        int i;
2088
2089        /* Validate the water mark configuration. */
2090        if (!hw->fc.pause_time)
2091                return IXGBE_ERR_INVALID_LINK_SETTINGS;
2092
2093        /* Low water mark of zero causes XOFF floods */
2094        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2095                if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2096                    hw->fc.high_water[i]) {
2097                        if (!hw->fc.low_water[i] ||
2098                            hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2099                                hw_dbg(hw, "Invalid water mark configuration\n");
2100                                return IXGBE_ERR_INVALID_LINK_SETTINGS;
2101                        }
2102                }
2103        }
2104
2105        /* Negotiate the fc mode to use */
2106        ixgbe_fc_autoneg(hw);
2107
2108        /* Disable any previous flow control settings */
2109        mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2110        mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2111
2112        fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2113        fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2114
2115        /*
2116         * The possible values of fc.current_mode are:
2117         * 0: Flow control is completely disabled
2118         * 1: Rx flow control is enabled (we can receive pause frames,
2119         *    but not send pause frames).
2120         * 2: Tx flow control is enabled (we can send pause frames but
2121         *    we do not support receiving pause frames).
2122         * 3: Both Rx and Tx flow control (symmetric) are enabled.
2123         * other: Invalid.
2124         */
2125        switch (hw->fc.current_mode) {
2126        case ixgbe_fc_none:
2127                /*
2128                 * Flow control is disabled by software override or autoneg.
2129                 * The code below will actually disable it in the HW.
2130                 */
2131                break;
2132        case ixgbe_fc_rx_pause:
2133                /*
2134                 * Rx Flow control is enabled and Tx Flow control is
2135                 * disabled by software override. Since there really
2136                 * isn't a way to advertise that we are capable of RX
2137                 * Pause ONLY, we will advertise that we support both
2138                 * symmetric and asymmetric Rx PAUSE.  Later, we will
2139                 * disable the adapter's ability to send PAUSE frames.
2140                 */
2141                mflcn_reg |= IXGBE_MFLCN_RFCE;
2142                break;
2143        case ixgbe_fc_tx_pause:
2144                /*
2145                 * Tx Flow control is enabled, and Rx Flow control is
2146                 * disabled by software override.
2147                 */
2148                fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2149                break;
2150        case ixgbe_fc_full:
2151                /* Flow control (both Rx and Tx) is enabled by SW override. */
2152                mflcn_reg |= IXGBE_MFLCN_RFCE;
2153                fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2154                break;
2155        default:
2156                hw_dbg(hw, "Flow control param set incorrectly\n");
2157                return IXGBE_ERR_CONFIG;
2158        }
2159
2160        /* Set 802.3x based flow control settings. */
2161        mflcn_reg |= IXGBE_MFLCN_DPF;
2162        IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2163        IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2164
2165        /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2166        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2167                if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2168                    hw->fc.high_water[i]) {
2169                        fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2170                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2171                        fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2172                } else {
2173                        IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2174                        /*
2175                         * In order to prevent Tx hangs when the internal Tx
2176                         * switch is enabled we must set the high water mark
2177                         * to the Rx packet buffer size - 24KB.  This allows
2178                         * the Tx switch to function even under heavy Rx
2179                         * workloads.
2180                         */
2181                        fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2182                }
2183
2184                IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2185        }
2186
2187        /* Configure pause time (2 TCs per register) */
2188        reg = hw->fc.pause_time * 0x00010001;
2189        for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2190                IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2191
2192        IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2193
2194        return 0;
2195}
2196
2197/**
2198 *  ixgbe_negotiate_fc - Negotiate flow control
2199 *  @hw: pointer to hardware structure
2200 *  @adv_reg: flow control advertised settings
2201 *  @lp_reg: link partner's flow control settings
2202 *  @adv_sym: symmetric pause bit in advertisement
2203 *  @adv_asm: asymmetric pause bit in advertisement
2204 *  @lp_sym: symmetric pause bit in link partner advertisement
2205 *  @lp_asm: asymmetric pause bit in link partner advertisement
2206 *
2207 *  Find the intersection between advertised settings and link partner's
2208 *  advertised settings
2209 **/
2210static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2211                              u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2212{
2213        if ((!(adv_reg)) ||  (!(lp_reg)))
2214                return IXGBE_ERR_FC_NOT_NEGOTIATED;
2215
2216        if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2217                /*
2218                 * Now we need to check if the user selected Rx ONLY
2219                 * of pause frames.  In this case, we had to advertise
2220                 * FULL flow control because we could not advertise RX
2221                 * ONLY. Hence, we must now check to see if we need to
2222                 * turn OFF the TRANSMISSION of PAUSE frames.
2223                 */
2224                if (hw->fc.requested_mode == ixgbe_fc_full) {
2225                        hw->fc.current_mode = ixgbe_fc_full;
2226                        hw_dbg(hw, "Flow Control = FULL.\n");
2227                } else {
2228                        hw->fc.current_mode = ixgbe_fc_rx_pause;
2229                        hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2230                }
2231        } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2232                   (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2233                hw->fc.current_mode = ixgbe_fc_tx_pause;
2234                hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2235        } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2236                   !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2237                hw->fc.current_mode = ixgbe_fc_rx_pause;
2238                hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2239        } else {
2240                hw->fc.current_mode = ixgbe_fc_none;
2241                hw_dbg(hw, "Flow Control = NONE.\n");
2242        }
2243        return 0;
2244}
2245
2246/**
2247 *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2248 *  @hw: pointer to hardware structure
2249 *
2250 *  Enable flow control according on 1 gig fiber.
2251 **/
2252static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2253{
2254        u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2255        s32 ret_val;
2256
2257        /*
2258         * On multispeed fiber at 1g, bail out if
2259         * - link is up but AN did not complete, or if
2260         * - link is up and AN completed but timed out
2261         */
2262
2263        linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2264        if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2265            (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2266                return IXGBE_ERR_FC_NOT_NEGOTIATED;
2267
2268        pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2269        pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2270
2271        ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2272                               pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2273                               IXGBE_PCS1GANA_ASM_PAUSE,
2274                               IXGBE_PCS1GANA_SYM_PAUSE,
2275                               IXGBE_PCS1GANA_ASM_PAUSE);
2276
2277        return ret_val;
2278}
2279
2280/**
2281 *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2282 *  @hw: pointer to hardware structure
2283 *
2284 *  Enable flow control according to IEEE clause 37.
2285 **/
2286static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2287{
2288        u32 links2, anlp1_reg, autoc_reg, links;
2289        s32 ret_val;
2290
2291        /*
2292         * On backplane, bail out if
2293         * - backplane autoneg was not completed, or if
2294         * - we are 82599 and link partner is not AN enabled
2295         */
2296        links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2297        if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2298                return IXGBE_ERR_FC_NOT_NEGOTIATED;
2299
2300        if (hw->mac.type == ixgbe_mac_82599EB) {
2301                links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2302                if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2303                        return IXGBE_ERR_FC_NOT_NEGOTIATED;
2304        }
2305        /*
2306         * Read the 10g AN autoc and LP ability registers and resolve
2307         * local flow control settings accordingly
2308         */
2309        autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2310        anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2311
2312        ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2313                anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2314                IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2315
2316        return ret_val;
2317}
2318
2319/**
2320 *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2321 *  @hw: pointer to hardware structure
2322 *
2323 *  Enable flow control according to IEEE clause 37.
2324 **/
2325static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2326{
2327        u16 technology_ability_reg = 0;
2328        u16 lp_technology_ability_reg = 0;
2329
2330        hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
2331                             MDIO_MMD_AN,
2332                             &technology_ability_reg);
2333        hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
2334                             MDIO_MMD_AN,
2335                             &lp_technology_ability_reg);
2336
2337        return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2338                                  (u32)lp_technology_ability_reg,
2339                                  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2340                                  IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2341}
2342
2343/**
2344 *  ixgbe_fc_autoneg - Configure flow control
2345 *  @hw: pointer to hardware structure
2346 *
2347 *  Compares our advertised flow control capabilities to those advertised by
2348 *  our link partner, and determines the proper flow control mode to use.
2349 **/
2350void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2351{
2352        s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2353        ixgbe_link_speed speed;
2354        bool link_up;
2355
2356        /*
2357         * AN should have completed when the cable was plugged in.
2358         * Look for reasons to bail out.  Bail out if:
2359         * - FC autoneg is disabled, or if
2360         * - link is not up.
2361         *
2362         * Since we're being called from an LSC, link is already known to be up.
2363         * So use link_up_wait_to_complete=false.
2364         */
2365        if (hw->fc.disable_fc_autoneg)
2366                goto out;
2367
2368        hw->mac.ops.check_link(hw, &speed, &link_up, false);
2369        if (!link_up)
2370                goto out;
2371
2372        switch (hw->phy.media_type) {
2373        /* Autoneg flow control on fiber adapters */
2374        case ixgbe_media_type_fiber:
2375                if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2376                        ret_val = ixgbe_fc_autoneg_fiber(hw);
2377                break;
2378
2379        /* Autoneg flow control on backplane adapters */
2380        case ixgbe_media_type_backplane:
2381                ret_val = ixgbe_fc_autoneg_backplane(hw);
2382                break;
2383
2384        /* Autoneg flow control on copper adapters */
2385        case ixgbe_media_type_copper:
2386                if (ixgbe_device_supports_autoneg_fc(hw))
2387                        ret_val = ixgbe_fc_autoneg_copper(hw);
2388                break;
2389
2390        default:
2391                break;
2392        }
2393
2394out:
2395        if (ret_val == 0) {
2396                hw->fc.fc_was_autonegged = true;
2397        } else {
2398                hw->fc.fc_was_autonegged = false;
2399                hw->fc.current_mode = hw->fc.requested_mode;
2400        }
2401}
2402
2403/**
2404 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2405 * @hw: pointer to hardware structure
2406 *
2407 * System-wide timeout range is encoded in PCIe Device Control2 register.
2408 *
2409 *  Add 10% to specified maximum and return the number of times to poll for
2410 *  completion timeout, in units of 100 microsec.  Never return less than
2411 *  800 = 80 millisec.
2412 **/
2413static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2414{
2415        s16 devctl2;
2416        u32 pollcnt;
2417
2418        devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
2419        devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2420
2421        switch (devctl2) {
2422        case IXGBE_PCIDEVCTRL2_65_130ms:
2423                 pollcnt = 1300;         /* 130 millisec */
2424                break;
2425        case IXGBE_PCIDEVCTRL2_260_520ms:
2426                pollcnt = 5200;         /* 520 millisec */
2427                break;
2428        case IXGBE_PCIDEVCTRL2_1_2s:
2429                pollcnt = 20000;        /* 2 sec */
2430                break;
2431        case IXGBE_PCIDEVCTRL2_4_8s:
2432                pollcnt = 80000;        /* 8 sec */
2433                break;
2434        case IXGBE_PCIDEVCTRL2_17_34s:
2435                pollcnt = 34000;        /* 34 sec */
2436                break;
2437        case IXGBE_PCIDEVCTRL2_50_100us:        /* 100 microsecs */
2438        case IXGBE_PCIDEVCTRL2_1_2ms:           /* 2 millisecs */
2439        case IXGBE_PCIDEVCTRL2_16_32ms:         /* 32 millisec */
2440        case IXGBE_PCIDEVCTRL2_16_32ms_def:     /* 32 millisec default */
2441        default:
2442                pollcnt = 800;          /* 80 millisec minimum */
2443                break;
2444        }
2445
2446        /* add 10% to spec maximum */
2447        return (pollcnt * 11) / 10;
2448}
2449
2450/**
2451 *  ixgbe_disable_pcie_master - Disable PCI-express master access
2452 *  @hw: pointer to hardware structure
2453 *
2454 *  Disables PCI-Express master access and verifies there are no pending
2455 *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
2456 *  bit hasn't caused the master requests to be disabled, else 0
2457 *  is returned signifying master requests disabled.
2458 **/
2459static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2460{
2461        u32 i, poll;
2462        u16 value;
2463
2464        /* Always set this bit to ensure any future transactions are blocked */
2465        IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2466
2467        /* Poll for bit to read as set */
2468        for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2469                if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
2470                        break;
2471                usleep_range(100, 120);
2472        }
2473        if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
2474                hw_dbg(hw, "GIO disable did not set - requesting resets\n");
2475                goto gio_disable_fail;
2476        }
2477
2478        /* Exit if master requests are blocked */
2479        if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2480            ixgbe_removed(hw->hw_addr))
2481                return 0;
2482
2483        /* Poll for master request bit to clear */
2484        for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
2485                udelay(100);
2486                if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
2487                        return 0;
2488        }
2489
2490        /*
2491         * Two consecutive resets are required via CTRL.RST per datasheet
2492         * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
2493         * of this need.  The first reset prevents new master requests from
2494         * being issued by our device.  We then must wait 1usec or more for any
2495         * remaining completions from the PCIe bus to trickle in, and then reset
2496         * again to clear out any effects they may have had on our device.
2497         */
2498        hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
2499gio_disable_fail:
2500        hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2501
2502        if (hw->mac.type >= ixgbe_mac_X550)
2503                return 0;
2504
2505        /*
2506         * Before proceeding, make sure that the PCIe block does not have
2507         * transactions pending.
2508         */
2509        poll = ixgbe_pcie_timeout_poll(hw);
2510        for (i = 0; i < poll; i++) {
2511                udelay(100);
2512                value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2513                if (ixgbe_removed(hw->hw_addr))
2514                        return 0;
2515                if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2516                        return 0;
2517        }
2518
2519        hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
2520        return IXGBE_ERR_MASTER_REQUESTS_PENDING;
2521}
2522
2523/**
2524 *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
2525 *  @hw: pointer to hardware structure
2526 *  @mask: Mask to specify which semaphore to acquire
2527 *
2528 *  Acquires the SWFW semaphore through the GSSR register for the specified
2529 *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2530 **/
2531s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2532{
2533        u32 gssr = 0;
2534        u32 swmask = mask;
2535        u32 fwmask = mask << 5;
2536        u32 timeout = 200;
2537        u32 i;
2538
2539        for (i = 0; i < timeout; i++) {
2540                /*
2541                 * SW NVM semaphore bit is used for access to all
2542                 * SW_FW_SYNC bits (not just NVM)
2543                 */
2544                if (ixgbe_get_eeprom_semaphore(hw))
2545                        return IXGBE_ERR_SWFW_SYNC;
2546
2547                gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2548                if (!(gssr & (fwmask | swmask))) {
2549                        gssr |= swmask;
2550                        IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2551                        ixgbe_release_eeprom_semaphore(hw);
2552                        return 0;
2553                } else {
2554                        /* Resource is currently in use by FW or SW */
2555                        ixgbe_release_eeprom_semaphore(hw);
2556                        usleep_range(5000, 10000);
2557                }
2558        }
2559
2560        /* If time expired clear the bits holding the lock and retry */
2561        if (gssr & (fwmask | swmask))
2562                ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
2563
2564        usleep_range(5000, 10000);
2565        return IXGBE_ERR_SWFW_SYNC;
2566}
2567
2568/**
2569 *  ixgbe_release_swfw_sync - Release SWFW semaphore
2570 *  @hw: pointer to hardware structure
2571 *  @mask: Mask to specify which semaphore to release
2572 *
2573 *  Releases the SWFW semaphore through the GSSR register for the specified
2574 *  function (CSR, PHY0, PHY1, EEPROM, Flash)
2575 **/
2576void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
2577{
2578        u32 gssr;
2579        u32 swmask = mask;
2580
2581        ixgbe_get_eeprom_semaphore(hw);
2582
2583        gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
2584        gssr &= ~swmask;
2585        IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
2586
2587        ixgbe_release_eeprom_semaphore(hw);
2588}
2589
2590/**
2591 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2592 * @hw: pointer to hardware structure
2593 * @reg_val: Value we read from AUTOC
2594 * @locked: bool to indicate whether the SW/FW lock should be taken.  Never
2595 *          true in this the generic case.
2596 *
2597 * The default case requires no protection so just to the register read.
2598 **/
2599s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
2600{
2601        *locked = false;
2602        *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2603        return 0;
2604}
2605
2606/**
2607 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2608 * @hw: pointer to hardware structure
2609 * @reg_val: value to write to AUTOC
2610 * @locked: bool to indicate whether the SW/FW lock was already taken by
2611 *          previous read.
2612 **/
2613s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
2614{
2615        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2616        return 0;
2617}
2618
2619/**
2620 *  ixgbe_disable_rx_buff_generic - Stops the receive data path
2621 *  @hw: pointer to hardware structure
2622 *
2623 *  Stops the receive data path and waits for the HW to internally
2624 *  empty the Rx security block.
2625 **/
2626s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
2627{
2628#define IXGBE_MAX_SECRX_POLL 40
2629        int i;
2630        int secrxreg;
2631
2632        secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2633        secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
2634        IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2635        for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2636                secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2637                if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2638                        break;
2639                else
2640                        /* Use interrupt-safe sleep just in case */
2641                        udelay(1000);
2642        }
2643
2644        /* For informational purposes only */
2645        if (i >= IXGBE_MAX_SECRX_POLL)
2646                hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
2647
2648        return 0;
2649
2650}
2651
2652/**
2653 *  ixgbe_enable_rx_buff - Enables the receive data path
2654 *  @hw: pointer to hardware structure
2655 *
2656 *  Enables the receive data path
2657 **/
2658s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
2659{
2660        int secrxreg;
2661
2662        secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
2663        secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
2664        IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2665        IXGBE_WRITE_FLUSH(hw);
2666
2667        return 0;
2668}
2669
2670/**
2671 *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
2672 *  @hw: pointer to hardware structure
2673 *  @regval: register value to write to RXCTRL
2674 *
2675 *  Enables the Rx DMA unit
2676 **/
2677s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
2678{
2679        if (regval & IXGBE_RXCTRL_RXEN)
2680                hw->mac.ops.enable_rx(hw);
2681        else
2682                hw->mac.ops.disable_rx(hw);
2683
2684        return 0;
2685}
2686
2687/**
2688 *  ixgbe_blink_led_start_generic - Blink LED based on index.
2689 *  @hw: pointer to hardware structure
2690 *  @index: led number to blink
2691 **/
2692s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2693{
2694        ixgbe_link_speed speed = 0;
2695        bool link_up = false;
2696        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2697        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2698        bool locked = false;
2699        s32 ret_val;
2700
2701        /*
2702         * Link must be up to auto-blink the LEDs;
2703         * Force it if link is down.
2704         */
2705        hw->mac.ops.check_link(hw, &speed, &link_up, false);
2706
2707        if (!link_up) {
2708                ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2709                if (ret_val)
2710                        return ret_val;
2711
2712                autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2713                autoc_reg |= IXGBE_AUTOC_FLU;
2714
2715                ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2716                if (ret_val)
2717                        return ret_val;
2718
2719                IXGBE_WRITE_FLUSH(hw);
2720
2721                usleep_range(10000, 20000);
2722        }
2723
2724        led_reg &= ~IXGBE_LED_MODE_MASK(index);
2725        led_reg |= IXGBE_LED_BLINK(index);
2726        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2727        IXGBE_WRITE_FLUSH(hw);
2728
2729        return 0;
2730}
2731
2732/**
2733 *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
2734 *  @hw: pointer to hardware structure
2735 *  @index: led number to stop blinking
2736 **/
2737s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2738{
2739        u32 autoc_reg = 0;
2740        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2741        bool locked = false;
2742        s32 ret_val;
2743
2744        ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2745        if (ret_val)
2746                return ret_val;
2747
2748        autoc_reg &= ~IXGBE_AUTOC_FLU;
2749        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2750
2751        ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2752        if (ret_val)
2753                return ret_val;
2754
2755        led_reg &= ~IXGBE_LED_MODE_MASK(index);
2756        led_reg &= ~IXGBE_LED_BLINK(index);
2757        led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2758        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2759        IXGBE_WRITE_FLUSH(hw);
2760
2761        return 0;
2762}
2763
2764/**
2765 *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2766 *  @hw: pointer to hardware structure
2767 *  @san_mac_offset: SAN MAC address offset
2768 *
2769 *  This function will read the EEPROM location for the SAN MAC address
2770 *  pointer, and returns the value at that location.  This is used in both
2771 *  get and set mac_addr routines.
2772 **/
2773static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2774                                        u16 *san_mac_offset)
2775{
2776        s32 ret_val;
2777
2778        /*
2779         * First read the EEPROM pointer to see if the MAC addresses are
2780         * available.
2781         */
2782        ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
2783                                      san_mac_offset);
2784        if (ret_val)
2785                hw_err(hw, "eeprom read at offset %d failed\n",
2786                       IXGBE_SAN_MAC_ADDR_PTR);
2787
2788        return ret_val;
2789}
2790
2791/**
2792 *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2793 *  @hw: pointer to hardware structure
2794 *  @san_mac_addr: SAN MAC address
2795 *
2796 *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
2797 *  per-port, so set_lan_id() must be called before reading the addresses.
2798 *  set_lan_id() is called by identify_sfp(), but this cannot be relied
2799 *  upon for non-SFP connections, so we must call it here.
2800 **/
2801s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2802{
2803        u16 san_mac_data, san_mac_offset;
2804        u8 i;
2805        s32 ret_val;
2806
2807        /*
2808         * First read the EEPROM pointer to see if the MAC addresses are
2809         * available.  If they're not, no point in calling set_lan_id() here.
2810         */
2811        ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2812        if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
2813
2814                goto san_mac_addr_clr;
2815
2816        /* make sure we know which port we need to program */
2817        hw->mac.ops.set_lan_id(hw);
2818        /* apply the port offset to the address offset */
2819        (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2820                         (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2821        for (i = 0; i < 3; i++) {
2822                ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
2823                                              &san_mac_data);
2824                if (ret_val) {
2825                        hw_err(hw, "eeprom read at offset %d failed\n",
2826                               san_mac_offset);
2827                        goto san_mac_addr_clr;
2828                }
2829                san_mac_addr[i * 2] = (u8)(san_mac_data);
2830                san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2831                san_mac_offset++;
2832        }
2833        return 0;
2834
2835san_mac_addr_clr:
2836        /* No addresses available in this EEPROM.  It's not necessarily an
2837         * error though, so just wipe the local address and return.
2838         */
2839        for (i = 0; i < 6; i++)
2840                san_mac_addr[i] = 0xFF;
2841        return ret_val;
2842}
2843
2844/**
2845 *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2846 *  @hw: pointer to hardware structure
2847 *
2848 *  Read PCIe configuration space, and get the MSI-X vector count from
2849 *  the capabilities table.
2850 **/
2851u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2852{
2853        u16 msix_count;
2854        u16 max_msix_count;
2855        u16 pcie_offset;
2856
2857        switch (hw->mac.type) {
2858        case ixgbe_mac_82598EB:
2859                pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2860                max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2861                break;
2862        case ixgbe_mac_82599EB:
2863        case ixgbe_mac_X540:
2864        case ixgbe_mac_X550:
2865        case ixgbe_mac_X550EM_x:
2866        case ixgbe_mac_x550em_a:
2867                pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2868                max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2869                break;
2870        default:
2871                return 1;
2872        }
2873
2874        msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
2875        if (ixgbe_removed(hw->hw_addr))
2876                msix_count = 0;
2877        msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2878
2879        /* MSI-X count is zero-based in HW */
2880        msix_count++;
2881
2882        if (msix_count > max_msix_count)
2883                msix_count = max_msix_count;
2884
2885        return msix_count;
2886}
2887
2888/**
2889 *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2890 *  @hw: pointer to hardware struct
2891 *  @rar: receive address register index to disassociate
2892 *  @vmdq: VMDq pool index to remove from the rar
2893 **/
2894s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2895{
2896        u32 mpsar_lo, mpsar_hi;
2897        u32 rar_entries = hw->mac.num_rar_entries;
2898
2899        /* Make sure we are using a valid rar index range */
2900        if (rar >= rar_entries) {
2901                hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2902                return IXGBE_ERR_INVALID_ARGUMENT;
2903        }
2904
2905        mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2906        mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2907
2908        if (ixgbe_removed(hw->hw_addr))
2909                return 0;
2910
2911        if (!mpsar_lo && !mpsar_hi)
2912                return 0;
2913
2914        if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2915                if (mpsar_lo) {
2916                        IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2917                        mpsar_lo = 0;
2918                }
2919                if (mpsar_hi) {
2920                        IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2921                        mpsar_hi = 0;
2922                }
2923        } else if (vmdq < 32) {
2924                mpsar_lo &= ~BIT(vmdq);
2925                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2926        } else {
2927                mpsar_hi &= ~BIT(vmdq - 32);
2928                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2929        }
2930
2931        /* was that the last pool using this rar? */
2932        if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2933                hw->mac.ops.clear_rar(hw, rar);
2934        return 0;
2935}
2936
2937/**
2938 *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2939 *  @hw: pointer to hardware struct
2940 *  @rar: receive address register index to associate with a VMDq index
2941 *  @vmdq: VMDq pool index
2942 **/
2943s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2944{
2945        u32 mpsar;
2946        u32 rar_entries = hw->mac.num_rar_entries;
2947
2948        /* Make sure we are using a valid rar index range */
2949        if (rar >= rar_entries) {
2950                hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2951                return IXGBE_ERR_INVALID_ARGUMENT;
2952        }
2953
2954        if (vmdq < 32) {
2955                mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2956                mpsar |= BIT(vmdq);
2957                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2958        } else {
2959                mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2960                mpsar |= BIT(vmdq - 32);
2961                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2962        }
2963        return 0;
2964}
2965
2966/**
2967 *  This function should only be involved in the IOV mode.
2968 *  In IOV mode, Default pool is next pool after the number of
2969 *  VFs advertized and not 0.
2970 *  MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
2971 *
2972 *  ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
2973 *  @hw: pointer to hardware struct
2974 *  @vmdq: VMDq pool index
2975 **/
2976s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
2977{
2978        u32 rar = hw->mac.san_mac_rar_index;
2979
2980        if (vmdq < 32) {
2981                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
2982                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2983        } else {
2984                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2985                IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
2986        }
2987
2988        return 0;
2989}
2990
2991/**
2992 *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2993 *  @hw: pointer to hardware structure
2994 **/
2995s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2996{
2997        int i;
2998
2999        for (i = 0; i < 128; i++)
3000                IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3001
3002        return 0;
3003}
3004
3005/**
3006 *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3007 *  @hw: pointer to hardware structure
3008 *  @vlan: VLAN id to write to VLAN filter
3009 *
3010 *  return the VLVF index where this VLAN id should be placed
3011 *
3012 **/
3013static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3014{
3015        s32 regindex, first_empty_slot;
3016        u32 bits;
3017
3018        /* short cut the special case */
3019        if (vlan == 0)
3020                return 0;
3021
3022        /* if vlvf_bypass is set we don't want to use an empty slot, we
3023         * will simply bypass the VLVF if there are no entries present in the
3024         * VLVF that contain our VLAN
3025         */
3026        first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3027
3028        /* add VLAN enable bit for comparison */
3029        vlan |= IXGBE_VLVF_VIEN;
3030
3031        /* Search for the vlan id in the VLVF entries. Save off the first empty
3032         * slot found along the way.
3033         *
3034         * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3035         */
3036        for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3037                bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3038                if (bits == vlan)
3039                        return regindex;
3040                if (!first_empty_slot && !bits)
3041                        first_empty_slot = regindex;
3042        }
3043
3044        /* If we are here then we didn't find the VLAN.  Return first empty
3045         * slot we found during our search, else error.
3046         */
3047        if (!first_empty_slot)
3048                hw_dbg(hw, "No space in VLVF.\n");
3049
3050        return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
3051}
3052
3053/**
3054 *  ixgbe_set_vfta_generic - Set VLAN filter table
3055 *  @hw: pointer to hardware structure
3056 *  @vlan: VLAN id to write to VLAN filter
3057 *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
3058 *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
3059 *  @vlvf_bypass: boolean flag indicating updating default pool is okay
3060 *
3061 *  Turn on/off specified VLAN in the VLAN filter table.
3062 **/
3063s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3064                           bool vlan_on, bool vlvf_bypass)
3065{
3066        u32 regidx, vfta_delta, vfta, bits;
3067        s32 vlvf_index;
3068
3069        if ((vlan > 4095) || (vind > 63))
3070                return IXGBE_ERR_PARAM;
3071
3072        /*
3073         * this is a 2 part operation - first the VFTA, then the
3074         * VLVF and VLVFB if VT Mode is set
3075         * We don't write the VFTA until we know the VLVF part succeeded.
3076         */
3077
3078        /* Part 1
3079         * The VFTA is a bitstring made up of 128 32-bit registers
3080         * that enable the particular VLAN id, much like the MTA:
3081         *    bits[11-5]: which register
3082         *    bits[4-0]:  which bit in the register
3083         */
3084        regidx = vlan / 32;
3085        vfta_delta = BIT(vlan % 32);
3086        vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3087
3088        /* vfta_delta represents the difference between the current value
3089         * of vfta and the value we want in the register.  Since the diff
3090         * is an XOR mask we can just update vfta using an XOR.
3091         */
3092        vfta_delta &= vlan_on ? ~vfta : vfta;
3093        vfta ^= vfta_delta;
3094
3095        /* Part 2
3096         * If VT Mode is set
3097         *   Either vlan_on
3098         *     make sure the vlan is in VLVF
3099         *     set the vind bit in the matching VLVFB
3100         *   Or !vlan_on
3101         *     clear the pool bit and possibly the vind
3102         */
3103        if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
3104                goto vfta_update;
3105
3106        vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
3107        if (vlvf_index < 0) {
3108                if (vlvf_bypass)
3109                        goto vfta_update;
3110                return vlvf_index;
3111        }
3112
3113        bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3114
3115        /* set the pool bit */
3116        bits |= BIT(vind % 32);
3117        if (vlan_on)
3118                goto vlvf_update;
3119
3120        /* clear the pool bit */
3121        bits ^= BIT(vind % 32);
3122
3123        if (!bits &&
3124            !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
3125                /* Clear VFTA first, then disable VLVF.  Otherwise
3126                 * we run the risk of stray packets leaking into
3127                 * the PF via the default pool
3128                 */
3129                if (vfta_delta)
3130                        IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3131
3132                /* disable VLVF and clear remaining bit from pool */
3133                IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3134                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
3135
3136                return 0;
3137        }
3138
3139        /* If there are still bits set in the VLVFB registers
3140         * for the VLAN ID indicated we need to see if the
3141         * caller is requesting that we clear the VFTA entry bit.
3142         * If the caller has requested that we clear the VFTA
3143         * entry bit but there are still pools/VFs using this VLAN
3144         * ID entry then ignore the request.  We're not worried
3145         * about the case where we're turning the VFTA VLAN ID
3146         * entry bit on, only when requested to turn it off as
3147         * there may be multiple pools and/or VFs using the
3148         * VLAN ID entry.  In that case we cannot clear the
3149         * VFTA bit until all pools/VFs using that VLAN ID have also
3150         * been cleared.  This will be indicated by "bits" being
3151         * zero.
3152         */
3153        vfta_delta = 0;
3154
3155vlvf_update:
3156        /* record pool change and enable VLAN ID if not already enabled */
3157        IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
3158        IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
3159
3160vfta_update:
3161        /* Update VFTA now that we are ready for traffic */
3162        if (vfta_delta)
3163                IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3164
3165        return 0;
3166}
3167
3168/**
3169 *  ixgbe_clear_vfta_generic - Clear VLAN filter table
3170 *  @hw: pointer to hardware structure
3171 *
3172 *  Clears the VLAN filer table, and the VMDq index associated with the filter
3173 **/
3174s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3175{
3176        u32 offset;
3177
3178        for (offset = 0; offset < hw->mac.vft_size; offset++)
3179                IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3180
3181        for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3182                IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3183                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3184                IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
3185        }
3186
3187        return 0;
3188}
3189
3190/**
3191 *  ixgbe_check_mac_link_generic - Determine link and speed status
3192 *  @hw: pointer to hardware structure
3193 *  @speed: pointer to link speed
3194 *  @link_up: true when link is up
3195 *  @link_up_wait_to_complete: bool used to wait for link up or not
3196 *
3197 *  Reads the links register to determine if link is up and the current speed
3198 **/
3199s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3200                                 bool *link_up, bool link_up_wait_to_complete)
3201{
3202        u32 links_reg, links_orig;
3203        u32 i;
3204
3205        /* clear the old state */
3206        links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3207
3208        links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3209
3210        if (links_orig != links_reg) {
3211                hw_dbg(hw, "LINKS changed from %08X to %08X\n",
3212                       links_orig, links_reg);
3213        }
3214
3215        if (link_up_wait_to_complete) {
3216                for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3217                        if (links_reg & IXGBE_LINKS_UP) {
3218                                *link_up = true;
3219                                break;
3220                        } else {
3221                                *link_up = false;
3222                        }
3223                        msleep(100);
3224                        links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3225                }
3226        } else {
3227                if (links_reg & IXGBE_LINKS_UP)
3228                        *link_up = true;
3229                else
3230                        *link_up = false;
3231        }
3232
3233        switch (links_reg & IXGBE_LINKS_SPEED_82599) {
3234        case IXGBE_LINKS_SPEED_10G_82599:
3235                if ((hw->mac.type >= ixgbe_mac_X550) &&
3236                    (links_reg & IXGBE_LINKS_SPEED_NON_STD))
3237                        *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
3238                else
3239                        *speed = IXGBE_LINK_SPEED_10GB_FULL;
3240                break;
3241        case IXGBE_LINKS_SPEED_1G_82599:
3242                *speed = IXGBE_LINK_SPEED_1GB_FULL;
3243                break;
3244        case IXGBE_LINKS_SPEED_100_82599:
3245                if ((hw->mac.type >= ixgbe_mac_X550) &&
3246                    (links_reg & IXGBE_LINKS_SPEED_NON_STD))
3247                        *speed = IXGBE_LINK_SPEED_5GB_FULL;
3248                else
3249                        *speed = IXGBE_LINK_SPEED_100_FULL;
3250                break;
3251        default:
3252                *speed = IXGBE_LINK_SPEED_UNKNOWN;
3253        }
3254
3255        return 0;
3256}
3257
3258/**
3259 *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3260 *  the EEPROM
3261 *  @hw: pointer to hardware structure
3262 *  @wwnn_prefix: the alternative WWNN prefix
3263 *  @wwpn_prefix: the alternative WWPN prefix
3264 *
3265 *  This function will read the EEPROM from the alternative SAN MAC address
3266 *  block to check the support for the alternative WWNN/WWPN prefix support.
3267 **/
3268s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3269                                        u16 *wwpn_prefix)
3270{
3271        u16 offset, caps;
3272        u16 alt_san_mac_blk_offset;
3273
3274        /* clear output first */
3275        *wwnn_prefix = 0xFFFF;
3276        *wwpn_prefix = 0xFFFF;
3277
3278        /* check if alternative SAN MAC is supported */
3279        offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3280        if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3281                goto wwn_prefix_err;
3282
3283        if ((alt_san_mac_blk_offset == 0) ||
3284            (alt_san_mac_blk_offset == 0xFFFF))
3285                return 0;
3286
3287        /* check capability in alternative san mac address block */
3288        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
3289        if (hw->eeprom.ops.read(hw, offset, &caps))
3290                goto wwn_prefix_err;
3291        if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
3292                return 0;
3293
3294        /* get the corresponding prefix for WWNN/WWPN */
3295        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
3296        if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
3297                hw_err(hw, "eeprom read at offset %d failed\n", offset);
3298
3299        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
3300        if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
3301                goto wwn_prefix_err;
3302
3303        return 0;
3304
3305wwn_prefix_err:
3306        hw_err(hw, "eeprom read at offset %d failed\n", offset);
3307        return 0;
3308}
3309
3310/**
3311 *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3312 *  @hw: pointer to hardware structure
3313 *  @enable: enable or disable switch for MAC anti-spoofing
3314 *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
3315 *
3316 **/
3317void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3318{
3319        int vf_target_reg = vf >> 3;
3320        int vf_target_shift = vf % 8;
3321        u32 pfvfspoof;
3322
3323        if (hw->mac.type == ixgbe_mac_82598EB)
3324                return;
3325
3326        pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3327        if (enable)
3328                pfvfspoof |= BIT(vf_target_shift);
3329        else
3330                pfvfspoof &= ~BIT(vf_target_shift);
3331        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3332}
3333
3334/**
3335 *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
3336 *  @hw: pointer to hardware structure
3337 *  @enable: enable or disable switch for VLAN anti-spoofing
3338 *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
3339 *
3340 **/
3341void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
3342{
3343        int vf_target_reg = vf >> 3;
3344        int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
3345        u32 pfvfspoof;
3346
3347        if (hw->mac.type == ixgbe_mac_82598EB)
3348                return;
3349
3350        pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3351        if (enable)
3352                pfvfspoof |= BIT(vf_target_shift);
3353        else
3354                pfvfspoof &= ~BIT(vf_target_shift);
3355        IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3356}
3357
3358/**
3359 *  ixgbe_get_device_caps_generic - Get additional device capabilities
3360 *  @hw: pointer to hardware structure
3361 *  @device_caps: the EEPROM word with the extra device capabilities
3362 *
3363 *  This function will read the EEPROM location for the device capabilities,
3364 *  and return the word through device_caps.
3365 **/
3366s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
3367{
3368        hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
3369
3370        return 0;
3371}
3372
3373/**
3374 * ixgbe_set_rxpba_generic - Initialize RX packet buffer
3375 * @hw: pointer to hardware structure
3376 * @num_pb: number of packet buffers to allocate
3377 * @headroom: reserve n KB of headroom
3378 * @strategy: packet buffer allocation strategy
3379 **/
3380void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
3381                             int num_pb,
3382                             u32 headroom,
3383                             int strategy)
3384{
3385        u32 pbsize = hw->mac.rx_pb_size;
3386        int i = 0;
3387        u32 rxpktsize, txpktsize, txpbthresh;
3388
3389        /* Reserve headroom */
3390        pbsize -= headroom;
3391
3392        if (!num_pb)
3393                num_pb = 1;
3394
3395        /* Divide remaining packet buffer space amongst the number
3396         * of packet buffers requested using supplied strategy.
3397         */
3398        switch (strategy) {
3399        case (PBA_STRATEGY_WEIGHTED):
3400                /* pba_80_48 strategy weight first half of packet buffer with
3401                 * 5/8 of the packet buffer space.
3402                 */
3403                rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
3404                pbsize -= rxpktsize * (num_pb / 2);
3405                rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
3406                for (; i < (num_pb / 2); i++)
3407                        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3408                /* Fall through to configure remaining packet buffers */
3409        case (PBA_STRATEGY_EQUAL):
3410                /* Divide the remaining Rx packet buffer evenly among the TCs */
3411                rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
3412                for (; i < num_pb; i++)
3413                        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
3414                break;
3415        default:
3416                break;
3417        }
3418
3419        /*
3420         * Setup Tx packet buffer and threshold equally for all TCs
3421         * TXPBTHRESH register is set in K so divide by 1024 and subtract
3422         * 10 since the largest packet we support is just over 9K.
3423         */
3424        txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
3425        txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
3426        for (i = 0; i < num_pb; i++) {
3427                IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
3428                IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
3429        }
3430
3431        /* Clear unused TCs, if any, to zero buffer size*/
3432        for (; i < IXGBE_MAX_PB; i++) {
3433                IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
3434                IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
3435                IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
3436        }
3437}
3438
3439/**
3440 *  ixgbe_calculate_checksum - Calculate checksum for buffer
3441 *  @buffer: pointer to EEPROM
3442 *  @length: size of EEPROM to calculate a checksum for
3443 *
3444 *  Calculates the checksum for some buffer on a specified length.  The
3445 *  checksum calculated is returned.
3446 **/
3447static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
3448{
3449        u32 i;
3450        u8 sum = 0;
3451
3452        if (!buffer)
3453                return 0;
3454
3455        for (i = 0; i < length; i++)
3456                sum += buffer[i];
3457
3458        return (u8) (0 - sum);
3459}
3460
3461/**
3462 *  ixgbe_host_interface_command - Issue command to manageability block
3463 *  @hw: pointer to the HW structure
3464 *  @buffer: contains the command to write and where the return status will
3465 *           be placed
3466 *  @length: length of buffer, must be multiple of 4 bytes
3467 *  @timeout: time in ms to wait for command completion
3468 *  @return_data: read and return data from the buffer (true) or not (false)
3469 *  Needed because FW structures are big endian and decoding of
3470 *  these fields can be 8 bit or 16 bit based on command. Decoding
3471 *  is not easily understood without making a table of commands.
3472 *  So we will leave this up to the caller to read back the data
3473 *  in these cases.
3474 *
3475 *  Communicates with the manageability block.  On success return 0
3476 *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
3477 **/
3478s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
3479                                 u32 length, u32 timeout,
3480                                 bool return_data)
3481{
3482        u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
3483        u32 hicr, i, bi, fwsts;
3484        u16 buf_len, dword_len;
3485        union {
3486                struct ixgbe_hic_hdr hdr;
3487                u32 u32arr[1];
3488        } *bp = buffer;
3489        s32 status;
3490
3491        if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
3492                hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
3493                return IXGBE_ERR_HOST_INTERFACE_COMMAND;
3494        }
3495        /* Take management host interface semaphore */
3496        status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3497        if (status)
3498                return status;
3499
3500        /* Set bit 9 of FWSTS clearing FW reset indication */
3501        fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
3502        IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
3503
3504        /* Check that the host interface is enabled. */
3505        hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3506        if (!(hicr & IXGBE_HICR_EN)) {
3507                hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
3508                status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3509                goto rel_out;
3510        }
3511
3512        /* Calculate length in DWORDs. We must be DWORD aligned */
3513        if (length % sizeof(u32)) {
3514                hw_dbg(hw, "Buffer length failure, not aligned to dword");
3515                status = IXGBE_ERR_INVALID_ARGUMENT;
3516                goto rel_out;
3517        }
3518
3519        dword_len = length >> 2;
3520
3521        /* The device driver writes the relevant command block
3522         * into the ram area.
3523         */
3524        for (i = 0; i < dword_len; i++)
3525                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
3526                                      i, cpu_to_le32(bp->u32arr[i]));
3527
3528        /* Setting this bit tells the ARC that a new command is pending. */
3529        IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
3530
3531        for (i = 0; i < timeout; i++) {
3532                hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
3533                if (!(hicr & IXGBE_HICR_C))
3534                        break;
3535                usleep_range(1000, 2000);
3536        }
3537
3538        /* Check command successful completion. */
3539        if ((timeout && i == timeout) ||
3540            !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
3541                hw_dbg(hw, "Command has failed with no status valid.\n");
3542                status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3543                goto rel_out;
3544        }
3545
3546        if (!return_data)
3547                goto rel_out;
3548
3549        /* Calculate length in DWORDs */
3550        dword_len = hdr_size >> 2;
3551
3552        /* first pull in the header so we know the buffer length */
3553        for (bi = 0; bi < dword_len; bi++) {
3554                bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3555                le32_to_cpus(&bp->u32arr[bi]);
3556        }
3557
3558        /* If there is any thing in data position pull it in */
3559        buf_len = bp->hdr.buf_len;
3560        if (!buf_len)
3561                goto rel_out;
3562
3563        if (length < round_up(buf_len, 4) + hdr_size) {
3564                hw_dbg(hw, "Buffer not large enough for reply message.\n");
3565                status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3566                goto rel_out;
3567        }
3568
3569        /* Calculate length in DWORDs, add 3 for odd lengths */
3570        dword_len = (buf_len + 3) >> 2;
3571
3572        /* Pull in the rest of the buffer (bi is where we left off) */
3573        for (; bi <= dword_len; bi++) {
3574                bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
3575                le32_to_cpus(&bp->u32arr[bi]);
3576        }
3577
3578rel_out:
3579        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
3580
3581        return status;
3582}
3583
3584/**
3585 *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
3586 *  @hw: pointer to the HW structure
3587 *  @maj: driver version major number
3588 *  @min: driver version minor number
3589 *  @build: driver version build number
3590 *  @sub: driver version sub build number
3591 *
3592 *  Sends driver version number to firmware through the manageability
3593 *  block.  On success return 0
3594 *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
3595 *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
3596 **/
3597s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
3598                                 u8 build, u8 sub)
3599{
3600        struct ixgbe_hic_drv_info fw_cmd;
3601        int i;
3602        s32 ret_val;
3603
3604        fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
3605        fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
3606        fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
3607        fw_cmd.port_num = hw->bus.func;
3608        fw_cmd.ver_maj = maj;
3609        fw_cmd.ver_min = min;
3610        fw_cmd.ver_build = build;
3611        fw_cmd.ver_sub = sub;
3612        fw_cmd.hdr.checksum = 0;
3613        fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
3614                                (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
3615        fw_cmd.pad = 0;
3616        fw_cmd.pad2 = 0;
3617
3618        for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
3619                ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
3620                                                       sizeof(fw_cmd),
3621                                                       IXGBE_HI_COMMAND_TIMEOUT,
3622                                                       true);
3623                if (ret_val != 0)
3624                        continue;
3625
3626                if (fw_cmd.hdr.cmd_or_resp.ret_status ==
3627                    FW_CEM_RESP_STATUS_SUCCESS)
3628                        ret_val = 0;
3629                else
3630                        ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
3631
3632                break;
3633        }
3634
3635        return ret_val;
3636}
3637
3638/**
3639 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
3640 * @hw: pointer to the hardware structure
3641 *
3642 * The 82599 and x540 MACs can experience issues if TX work is still pending
3643 * when a reset occurs.  This function prevents this by flushing the PCIe
3644 * buffers on the system.
3645 **/
3646void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3647{
3648        u32 gcr_ext, hlreg0, i, poll;
3649        u16 value;
3650
3651        /*
3652         * If double reset is not requested then all transactions should
3653         * already be clear and as such there is no work to do
3654         */
3655        if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
3656                return;
3657
3658        /*
3659         * Set loopback enable to prevent any transmits from being sent
3660         * should the link come up.  This assumes that the RXCTRL.RXEN bit
3661         * has already been cleared.
3662         */
3663        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3664        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
3665
3666        /* wait for a last completion before clearing buffers */
3667        IXGBE_WRITE_FLUSH(hw);
3668        usleep_range(3000, 6000);
3669
3670        /* Before proceeding, make sure that the PCIe block does not have
3671         * transactions pending.
3672         */
3673        poll = ixgbe_pcie_timeout_poll(hw);
3674        for (i = 0; i < poll; i++) {
3675                usleep_range(100, 200);
3676                value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
3677                if (ixgbe_removed(hw->hw_addr))
3678                        break;
3679                if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3680                        break;
3681        }
3682
3683        /* initiate cleaning flow for buffers in the PCIe transaction layer */
3684        gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3685        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
3686                        gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
3687
3688        /* Flush all writes and allow 20usec for all transactions to clear */
3689        IXGBE_WRITE_FLUSH(hw);
3690        udelay(20);
3691
3692        /* restore previous register values */
3693        IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3694        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3695}
3696
3697static const u8 ixgbe_emc_temp_data[4] = {
3698        IXGBE_EMC_INTERNAL_DATA,
3699        IXGBE_EMC_DIODE1_DATA,
3700        IXGBE_EMC_DIODE2_DATA,
3701        IXGBE_EMC_DIODE3_DATA
3702};
3703static const u8 ixgbe_emc_therm_limit[4] = {
3704        IXGBE_EMC_INTERNAL_THERM_LIMIT,
3705        IXGBE_EMC_DIODE1_THERM_LIMIT,
3706        IXGBE_EMC_DIODE2_THERM_LIMIT,
3707        IXGBE_EMC_DIODE3_THERM_LIMIT
3708};
3709
3710/**
3711 *  ixgbe_get_ets_data - Extracts the ETS bit data
3712 *  @hw: pointer to hardware structure
3713 *  @ets_cfg: extected ETS data
3714 *  @ets_offset: offset of ETS data
3715 *
3716 *  Returns error code.
3717 **/
3718static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3719                              u16 *ets_offset)
3720{
3721        s32 status;
3722
3723        status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3724        if (status)
3725                return status;
3726
3727        if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
3728                return IXGBE_NOT_IMPLEMENTED;
3729
3730        status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3731        if (status)
3732                return status;
3733
3734        if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
3735                return IXGBE_NOT_IMPLEMENTED;
3736
3737        return 0;
3738}
3739
3740/**
3741 *  ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
3742 *  @hw: pointer to hardware structure
3743 *
3744 *  Returns the thermal sensor data structure
3745 **/
3746s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3747{
3748        s32 status;
3749        u16 ets_offset;
3750        u16 ets_cfg;
3751        u16 ets_sensor;
3752        u8  num_sensors;
3753        u8  i;
3754        struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3755
3756        /* Only support thermal sensors attached to physical port 0 */
3757        if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
3758                return IXGBE_NOT_IMPLEMENTED;
3759
3760        status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3761        if (status)
3762                return status;
3763
3764        num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3765        if (num_sensors > IXGBE_MAX_SENSORS)
3766                num_sensors = IXGBE_MAX_SENSORS;
3767
3768        for (i = 0; i < num_sensors; i++) {
3769                u8  sensor_index;
3770                u8  sensor_location;
3771
3772                status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3773                                             &ets_sensor);
3774                if (status)
3775                        return status;
3776
3777                sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3778                                IXGBE_ETS_DATA_INDEX_SHIFT);
3779                sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3780                                   IXGBE_ETS_DATA_LOC_SHIFT);
3781
3782                if (sensor_location != 0) {
3783                        status = hw->phy.ops.read_i2c_byte(hw,
3784                                        ixgbe_emc_temp_data[sensor_index],
3785                                        IXGBE_I2C_THERMAL_SENSOR_ADDR,
3786                                        &data->sensor[i].temp);
3787                        if (status)
3788                                return status;
3789                }
3790        }
3791
3792        return 0;
3793}
3794
3795/**
3796 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
3797 * @hw: pointer to hardware structure
3798 *
3799 * Inits the thermal sensor thresholds according to the NVM map
3800 * and save off the threshold and location values into mac.thermal_sensor_data
3801 **/
3802s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3803{
3804        s32 status;
3805        u16 ets_offset;
3806        u16 ets_cfg;
3807        u16 ets_sensor;
3808        u8  low_thresh_delta;
3809        u8  num_sensors;
3810        u8  therm_limit;
3811        u8  i;
3812        struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3813
3814        memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3815
3816        /* Only support thermal sensors attached to physical port 0 */
3817        if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
3818                return IXGBE_NOT_IMPLEMENTED;
3819
3820        status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3821        if (status)
3822                return status;
3823
3824        low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3825                             IXGBE_ETS_LTHRES_DELTA_SHIFT);
3826        num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3827        if (num_sensors > IXGBE_MAX_SENSORS)
3828                num_sensors = IXGBE_MAX_SENSORS;
3829
3830        for (i = 0; i < num_sensors; i++) {
3831                u8  sensor_index;
3832                u8  sensor_location;
3833
3834                if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
3835                        hw_err(hw, "eeprom read at offset %d failed\n",
3836                               ets_offset + 1 + i);
3837                        continue;
3838                }
3839                sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3840                                IXGBE_ETS_DATA_INDEX_SHIFT);
3841                sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3842                                   IXGBE_ETS_DATA_LOC_SHIFT);
3843                therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3844
3845                hw->phy.ops.write_i2c_byte(hw,
3846                        ixgbe_emc_therm_limit[sensor_index],
3847                        IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3848
3849                if (sensor_location == 0)
3850                        continue;
3851
3852                data->sensor[i].location = sensor_location;
3853                data->sensor[i].caution_thresh = therm_limit;
3854                data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3855        }
3856
3857        return 0;
3858}
3859
3860void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
3861{
3862        u32 rxctrl;
3863
3864        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3865        if (rxctrl & IXGBE_RXCTRL_RXEN) {
3866                if (hw->mac.type != ixgbe_mac_82598EB) {
3867                        u32 pfdtxgswc;
3868
3869                        pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3870                        if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
3871                                pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
3872                                IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3873                                hw->mac.set_lben = true;
3874                        } else {
3875                                hw->mac.set_lben = false;
3876                        }
3877                }
3878                rxctrl &= ~IXGBE_RXCTRL_RXEN;
3879                IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
3880        }
3881}
3882
3883void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
3884{
3885        u32 rxctrl;
3886
3887        rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3888        IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
3889
3890        if (hw->mac.type != ixgbe_mac_82598EB) {
3891                if (hw->mac.set_lben) {
3892                        u32 pfdtxgswc;
3893
3894                        pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
3895                        pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
3896                        IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
3897                        hw->mac.set_lben = false;
3898                }
3899        }
3900}
3901
3902/** ixgbe_mng_present - returns true when management capability is present
3903 * @hw: pointer to hardware structure
3904 **/
3905bool ixgbe_mng_present(struct ixgbe_hw *hw)
3906{
3907        u32 fwsm;
3908
3909        if (hw->mac.type < ixgbe_mac_82599EB)
3910                return false;
3911
3912        fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
3913        fwsm &= IXGBE_FWSM_MODE_MASK;
3914        return fwsm == IXGBE_FWSM_FW_MODE_PT;
3915}
3916
3917/**
3918 *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
3919 *  @hw: pointer to hardware structure
3920 *  @speed: new link speed
3921 *  @autoneg_wait_to_complete: true when waiting for completion is needed
3922 *
3923 *  Set the link speed in the MAC and/or PHY register and restarts link.
3924 */
3925s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
3926                                          ixgbe_link_speed speed,
3927                                          bool autoneg_wait_to_complete)
3928{
3929        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3930        ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
3931        s32 status = 0;
3932        u32 speedcnt = 0;
3933        u32 i = 0;
3934        bool autoneg, link_up = false;
3935
3936        /* Mask off requested but non-supported speeds */
3937        status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
3938        if (status)
3939                return status;
3940
3941        speed &= link_speed;
3942
3943        /* Try each speed one by one, highest priority first.  We do this in
3944         * software because 10Gb fiber doesn't support speed autonegotiation.
3945         */
3946        if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
3947                speedcnt++;
3948                highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
3949
3950                /* If we already have link at this speed, just jump out */
3951                status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
3952                                                false);
3953                if (status)
3954                        return status;
3955
3956                if (link_speed == IXGBE_LINK_SPEED_10GB_FULL && link_up)
3957                        goto out;
3958
3959                /* Set the module link speed */
3960                switch (hw->phy.media_type) {
3961                case ixgbe_media_type_fiber:
3962                        hw->mac.ops.set_rate_select_speed(hw,
3963                                                    IXGBE_LINK_SPEED_10GB_FULL);
3964                        break;
3965                case ixgbe_media_type_fiber_qsfp:
3966                        /* QSFP module automatically detects MAC link speed */
3967                        break;
3968                default:
3969                        hw_dbg(hw, "Unexpected media type\n");
3970                        break;
3971                }
3972
3973                /* Allow module to change analog characteristics (1G->10G) */
3974                msleep(40);
3975
3976                status = hw->mac.ops.setup_mac_link(hw,
3977                                                    IXGBE_LINK_SPEED_10GB_FULL,
3978                                                    autoneg_wait_to_complete);
3979                if (status)
3980                        return status;
3981
3982                /* Flap the Tx laser if it has not already been done */
3983                if (hw->mac.ops.flap_tx_laser)
3984                        hw->mac.ops.flap_tx_laser(hw);
3985
3986                /* Wait for the controller to acquire link.  Per IEEE 802.3ap,
3987                 * Section 73.10.2, we may have to wait up to 500ms if KR is
3988                 * attempted.  82599 uses the same timing for 10g SFI.
3989                 */
3990                for (i = 0; i < 5; i++) {
3991                        /* Wait for the link partner to also set speed */
3992                        msleep(100);
3993
3994                        /* If we have link, just jump out */
3995                        status = hw->mac.ops.check_link(hw, &link_speed,
3996                                                        &link_up, false);
3997                        if (status)
3998                                return status;
3999
4000                        if (link_up)
4001                                goto out;
4002                }
4003        }
4004
4005        if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4006                speedcnt++;
4007                if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4008                        highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4009
4010                /* If we already have link at this speed, just jump out */
4011                status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
4012                                                false);
4013                if (status)
4014                        return status;
4015
4016                if (link_speed == IXGBE_LINK_SPEED_1GB_FULL && link_up)
4017                        goto out;
4018
4019                /* Set the module link speed */
4020                switch (hw->phy.media_type) {
4021                case ixgbe_media_type_fiber:
4022                        hw->mac.ops.set_rate_select_speed(hw,
4023                                                     IXGBE_LINK_SPEED_1GB_FULL);
4024                        break;
4025                case ixgbe_media_type_fiber_qsfp:
4026                        /* QSFP module automatically detects link speed */
4027                        break;
4028                default:
4029                        hw_dbg(hw, "Unexpected media type\n");
4030                        break;
4031                }
4032
4033                /* Allow module to change analog characteristics (10G->1G) */
4034                msleep(40);
4035
4036                status = hw->mac.ops.setup_mac_link(hw,
4037                                                    IXGBE_LINK_SPEED_1GB_FULL,
4038                                                    autoneg_wait_to_complete);
4039                if (status)
4040                        return status;
4041
4042                /* Flap the Tx laser if it has not already been done */
4043                if (hw->mac.ops.flap_tx_laser)
4044                        hw->mac.ops.flap_tx_laser(hw);
4045
4046                /* Wait for the link partner to also set speed */
4047                msleep(100);
4048
4049                /* If we have link, just jump out */
4050                status = hw->mac.ops.check_link(hw, &link_speed, &link_up,
4051                                                false);
4052                if (status)
4053                        return status;
4054
4055                if (link_up)
4056                        goto out;
4057        }
4058
4059        /* We didn't get link.  Configure back to the highest speed we tried,
4060         * (if there was more than one).  We call ourselves back with just the
4061         * single highest speed that the user requested.
4062         */
4063        if (speedcnt > 1)
4064                status = ixgbe_setup_mac_link_multispeed_fiber(hw,
4065                                                      highest_link_speed,
4066                                                      autoneg_wait_to_complete);
4067
4068out:
4069        /* Set autoneg_advertised value based on input link speed */
4070        hw->phy.autoneg_advertised = 0;
4071
4072        if (speed & IXGBE_LINK_SPEED_10GB_FULL)
4073                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
4074
4075        if (speed & IXGBE_LINK_SPEED_1GB_FULL)
4076                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
4077
4078        return status;
4079}
4080
4081/**
4082 *  ixgbe_set_soft_rate_select_speed - Set module link speed
4083 *  @hw: pointer to hardware structure
4084 *  @speed: link speed to set
4085 *
4086 *  Set module link speed via the soft rate select.
4087 */
4088void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
4089                                      ixgbe_link_speed speed)
4090{
4091        s32 status;
4092        u8 rs, eeprom_data;
4093
4094        switch (speed) {
4095        case IXGBE_LINK_SPEED_10GB_FULL:
4096                /* one bit mask same as setting on */
4097                rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
4098                break;
4099        case IXGBE_LINK_SPEED_1GB_FULL:
4100                rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
4101                break;
4102        default:
4103                hw_dbg(hw, "Invalid fixed module speed\n");
4104                return;
4105        }
4106
4107        /* Set RS0 */
4108        status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4109                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
4110                                           &eeprom_data);
4111        if (status) {
4112                hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
4113                return;
4114        }
4115
4116        eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
4117
4118        status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
4119                                            IXGBE_I2C_EEPROM_DEV_ADDR2,
4120                                            eeprom_data);
4121        if (status) {
4122                hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
4123                return;
4124        }
4125}
4126