linux/drivers/net/ethernet/intel/e1000e/ich8lan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Intel PRO/1000 Linux driver
   3 * Copyright(c) 1999 - 2015 Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * The full GNU General Public License is included in this distribution in
  15 * the file called "COPYING".
  16 *
  17 * Contact Information:
  18 * Linux NICS <linux.nics@intel.com>
  19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  21 */
  22
  23/* 82562G 10/100 Network Connection
  24 * 82562G-2 10/100 Network Connection
  25 * 82562GT 10/100 Network Connection
  26 * 82562GT-2 10/100 Network Connection
  27 * 82562V 10/100 Network Connection
  28 * 82562V-2 10/100 Network Connection
  29 * 82566DC-2 Gigabit Network Connection
  30 * 82566DC Gigabit Network Connection
  31 * 82566DM-2 Gigabit Network Connection
  32 * 82566DM Gigabit Network Connection
  33 * 82566MC Gigabit Network Connection
  34 * 82566MM Gigabit Network Connection
  35 * 82567LM Gigabit Network Connection
  36 * 82567LF Gigabit Network Connection
  37 * 82567V Gigabit Network Connection
  38 * 82567LM-2 Gigabit Network Connection
  39 * 82567LF-2 Gigabit Network Connection
  40 * 82567V-2 Gigabit Network Connection
  41 * 82567LF-3 Gigabit Network Connection
  42 * 82567LM-3 Gigabit Network Connection
  43 * 82567LM-4 Gigabit Network Connection
  44 * 82577LM Gigabit Network Connection
  45 * 82577LC Gigabit Network Connection
  46 * 82578DM Gigabit Network Connection
  47 * 82578DC Gigabit Network Connection
  48 * 82579LM Gigabit Network Connection
  49 * 82579V Gigabit Network Connection
  50 * Ethernet Connection I217-LM
  51 * Ethernet Connection I217-V
  52 * Ethernet Connection I218-V
  53 * Ethernet Connection I218-LM
  54 * Ethernet Connection (2) I218-LM
  55 * Ethernet Connection (2) I218-V
  56 * Ethernet Connection (3) I218-LM
  57 * Ethernet Connection (3) I218-V
  58 */
  59
  60#include "e1000.h"
  61
  62/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
  63/* Offset 04h HSFSTS */
  64union ich8_hws_flash_status {
  65        struct ich8_hsfsts {
  66                u16 flcdone:1;  /* bit 0 Flash Cycle Done */
  67                u16 flcerr:1;   /* bit 1 Flash Cycle Error */
  68                u16 dael:1;     /* bit 2 Direct Access error Log */
  69                u16 berasesz:2; /* bit 4:3 Sector Erase Size */
  70                u16 flcinprog:1;        /* bit 5 flash cycle in Progress */
  71                u16 reserved1:2;        /* bit 13:6 Reserved */
  72                u16 reserved2:6;        /* bit 13:6 Reserved */
  73                u16 fldesvalid:1;       /* bit 14 Flash Descriptor Valid */
  74                u16 flockdn:1;  /* bit 15 Flash Config Lock-Down */
  75        } hsf_status;
  76        u16 regval;
  77};
  78
  79/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
  80/* Offset 06h FLCTL */
  81union ich8_hws_flash_ctrl {
  82        struct ich8_hsflctl {
  83                u16 flcgo:1;    /* 0 Flash Cycle Go */
  84                u16 flcycle:2;  /* 2:1 Flash Cycle */
  85                u16 reserved:5; /* 7:3 Reserved  */
  86                u16 fldbcount:2;        /* 9:8 Flash Data Byte Count */
  87                u16 flockdn:6;  /* 15:10 Reserved */
  88        } hsf_ctrl;
  89        u16 regval;
  90};
  91
  92/* ICH Flash Region Access Permissions */
  93union ich8_hws_flash_regacc {
  94        struct ich8_flracc {
  95                u32 grra:8;     /* 0:7 GbE region Read Access */
  96                u32 grwa:8;     /* 8:15 GbE region Write Access */
  97                u32 gmrag:8;    /* 23:16 GbE Master Read Access Grant */
  98                u32 gmwag:8;    /* 31:24 GbE Master Write Access Grant */
  99        } hsf_flregacc;
 100        u16 regval;
 101};
 102
 103/* ICH Flash Protected Region */
 104union ich8_flash_protected_range {
 105        struct ich8_pr {
 106                u32 base:13;    /* 0:12 Protected Range Base */
 107                u32 reserved1:2;        /* 13:14 Reserved */
 108                u32 rpe:1;      /* 15 Read Protection Enable */
 109                u32 limit:13;   /* 16:28 Protected Range Limit */
 110                u32 reserved2:2;        /* 29:30 Reserved */
 111                u32 wpe:1;      /* 31 Write Protection Enable */
 112        } range;
 113        u32 regval;
 114};
 115
 116static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
 117static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
 118static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
 119static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 120                                                u32 offset, u8 byte);
 121static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
 122                                         u8 *data);
 123static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
 124                                         u16 *data);
 125static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 126                                         u8 size, u16 *data);
 127static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
 128                                           u32 *data);
 129static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
 130                                          u32 offset, u32 *data);
 131static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
 132                                            u32 offset, u32 data);
 133static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
 134                                                 u32 offset, u32 dword);
 135static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 136static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
 137static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
 138static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
 139static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
 140static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
 141static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 142static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 143static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
 144static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
 145static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
 146static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
 147static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 148static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 149static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 150static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
 151static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
 152static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
 153static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
 154static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 155static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 156static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
 157static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
 158static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
 159
 160static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 161{
 162        return readw(hw->flash_address + reg);
 163}
 164
 165static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
 166{
 167        return readl(hw->flash_address + reg);
 168}
 169
 170static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
 171{
 172        writew(val, hw->flash_address + reg);
 173}
 174
 175static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
 176{
 177        writel(val, hw->flash_address + reg);
 178}
 179
 180#define er16flash(reg)          __er16flash(hw, (reg))
 181#define er32flash(reg)          __er32flash(hw, (reg))
 182#define ew16flash(reg, val)     __ew16flash(hw, (reg), (val))
 183#define ew32flash(reg, val)     __ew32flash(hw, (reg), (val))
 184
 185/**
 186 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
 187 *  @hw: pointer to the HW structure
 188 *
 189 *  Test access to the PHY registers by reading the PHY ID registers.  If
 190 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
 191 *  otherwise assume the read PHY ID is correct if it is valid.
 192 *
 193 *  Assumes the sw/fw/hw semaphore is already acquired.
 194 **/
 195static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 196{
 197        u16 phy_reg = 0;
 198        u32 phy_id = 0;
 199        s32 ret_val = 0;
 200        u16 retry_count;
 201        u32 mac_reg = 0;
 202
 203        for (retry_count = 0; retry_count < 2; retry_count++) {
 204                ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
 205                if (ret_val || (phy_reg == 0xFFFF))
 206                        continue;
 207                phy_id = (u32)(phy_reg << 16);
 208
 209                ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
 210                if (ret_val || (phy_reg == 0xFFFF)) {
 211                        phy_id = 0;
 212                        continue;
 213                }
 214                phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
 215                break;
 216        }
 217
 218        if (hw->phy.id) {
 219                if (hw->phy.id == phy_id)
 220                        goto out;
 221        } else if (phy_id) {
 222                hw->phy.id = phy_id;
 223                hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
 224                goto out;
 225        }
 226
 227        /* In case the PHY needs to be in mdio slow mode,
 228         * set slow mode and try to get the PHY id again.
 229         */
 230        if (hw->mac.type < e1000_pch_lpt) {
 231                hw->phy.ops.release(hw);
 232                ret_val = e1000_set_mdio_slow_mode_hv(hw);
 233                if (!ret_val)
 234                        ret_val = e1000e_get_phy_id(hw);
 235                hw->phy.ops.acquire(hw);
 236        }
 237
 238        if (ret_val)
 239                return false;
 240out:
 241        if (hw->mac.type >= e1000_pch_lpt) {
 242                /* Only unforce SMBus if ME is not active */
 243                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
 244                        /* Unforce SMBus mode in PHY */
 245                        e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
 246                        phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
 247                        e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
 248
 249                        /* Unforce SMBus mode in MAC */
 250                        mac_reg = er32(CTRL_EXT);
 251                        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 252                        ew32(CTRL_EXT, mac_reg);
 253                }
 254        }
 255
 256        return true;
 257}
 258
 259/**
 260 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
 261 *  @hw: pointer to the HW structure
 262 *
 263 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
 264 *  used to reset the PHY to a quiescent state when necessary.
 265 **/
 266static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
 267{
 268        u32 mac_reg;
 269
 270        /* Set Phy Config Counter to 50msec */
 271        mac_reg = er32(FEXTNVM3);
 272        mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
 273        mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
 274        ew32(FEXTNVM3, mac_reg);
 275
 276        /* Toggle LANPHYPC Value bit */
 277        mac_reg = er32(CTRL);
 278        mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
 279        mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
 280        ew32(CTRL, mac_reg);
 281        e1e_flush();
 282        usleep_range(10, 20);
 283        mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
 284        ew32(CTRL, mac_reg);
 285        e1e_flush();
 286
 287        if (hw->mac.type < e1000_pch_lpt) {
 288                msleep(50);
 289        } else {
 290                u16 count = 20;
 291
 292                do {
 293                        usleep_range(5000, 10000);
 294                } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
 295
 296                msleep(30);
 297        }
 298}
 299
 300/**
 301 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
 302 *  @hw: pointer to the HW structure
 303 *
 304 *  Workarounds/flow necessary for PHY initialization during driver load
 305 *  and resume paths.
 306 **/
 307static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 308{
 309        struct e1000_adapter *adapter = hw->adapter;
 310        u32 mac_reg, fwsm = er32(FWSM);
 311        s32 ret_val;
 312
 313        /* Gate automatic PHY configuration by hardware on managed and
 314         * non-managed 82579 and newer adapters.
 315         */
 316        e1000_gate_hw_phy_config_ich8lan(hw, true);
 317
 318        /* It is not possible to be certain of the current state of ULP
 319         * so forcibly disable it.
 320         */
 321        hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
 322        e1000_disable_ulp_lpt_lp(hw, true);
 323
 324        ret_val = hw->phy.ops.acquire(hw);
 325        if (ret_val) {
 326                e_dbg("Failed to initialize PHY flow\n");
 327                goto out;
 328        }
 329
 330        /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
 331         * inaccessible and resetting the PHY is not blocked, toggle the
 332         * LANPHYPC Value bit to force the interconnect to PCIe mode.
 333         */
 334        switch (hw->mac.type) {
 335        case e1000_pch_lpt:
 336        case e1000_pch_spt:
 337        case e1000_pch_cnp:
 338                if (e1000_phy_is_accessible_pchlan(hw))
 339                        break;
 340
 341                /* Before toggling LANPHYPC, see if PHY is accessible by
 342                 * forcing MAC to SMBus mode first.
 343                 */
 344                mac_reg = er32(CTRL_EXT);
 345                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
 346                ew32(CTRL_EXT, mac_reg);
 347
 348                /* Wait 50 milliseconds for MAC to finish any retries
 349                 * that it might be trying to perform from previous
 350                 * attempts to acknowledge any phy read requests.
 351                 */
 352                msleep(50);
 353
 354                /* fall-through */
 355        case e1000_pch2lan:
 356                if (e1000_phy_is_accessible_pchlan(hw))
 357                        break;
 358
 359                /* fall-through */
 360        case e1000_pchlan:
 361                if ((hw->mac.type == e1000_pchlan) &&
 362                    (fwsm & E1000_ICH_FWSM_FW_VALID))
 363                        break;
 364
 365                if (hw->phy.ops.check_reset_block(hw)) {
 366                        e_dbg("Required LANPHYPC toggle blocked by ME\n");
 367                        ret_val = -E1000_ERR_PHY;
 368                        break;
 369                }
 370
 371                /* Toggle LANPHYPC Value bit */
 372                e1000_toggle_lanphypc_pch_lpt(hw);
 373                if (hw->mac.type >= e1000_pch_lpt) {
 374                        if (e1000_phy_is_accessible_pchlan(hw))
 375                                break;
 376
 377                        /* Toggling LANPHYPC brings the PHY out of SMBus mode
 378                         * so ensure that the MAC is also out of SMBus mode
 379                         */
 380                        mac_reg = er32(CTRL_EXT);
 381                        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 382                        ew32(CTRL_EXT, mac_reg);
 383
 384                        if (e1000_phy_is_accessible_pchlan(hw))
 385                                break;
 386
 387                        ret_val = -E1000_ERR_PHY;
 388                }
 389                break;
 390        default:
 391                break;
 392        }
 393
 394        hw->phy.ops.release(hw);
 395        if (!ret_val) {
 396
 397                /* Check to see if able to reset PHY.  Print error if not */
 398                if (hw->phy.ops.check_reset_block(hw)) {
 399                        e_err("Reset blocked by ME\n");
 400                        goto out;
 401                }
 402
 403                /* Reset the PHY before any access to it.  Doing so, ensures
 404                 * that the PHY is in a known good state before we read/write
 405                 * PHY registers.  The generic reset is sufficient here,
 406                 * because we haven't determined the PHY type yet.
 407                 */
 408                ret_val = e1000e_phy_hw_reset_generic(hw);
 409                if (ret_val)
 410                        goto out;
 411
 412                /* On a successful reset, possibly need to wait for the PHY
 413                 * to quiesce to an accessible state before returning control
 414                 * to the calling function.  If the PHY does not quiesce, then
 415                 * return E1000E_BLK_PHY_RESET, as this is the condition that
 416                 *  the PHY is in.
 417                 */
 418                ret_val = hw->phy.ops.check_reset_block(hw);
 419                if (ret_val)
 420                        e_err("ME blocked access to PHY after reset\n");
 421        }
 422
 423out:
 424        /* Ungate automatic PHY configuration on non-managed 82579 */
 425        if ((hw->mac.type == e1000_pch2lan) &&
 426            !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
 427                usleep_range(10000, 20000);
 428                e1000_gate_hw_phy_config_ich8lan(hw, false);
 429        }
 430
 431        return ret_val;
 432}
 433
 434/**
 435 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
 436 *  @hw: pointer to the HW structure
 437 *
 438 *  Initialize family-specific PHY parameters and function pointers.
 439 **/
 440static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 441{
 442        struct e1000_phy_info *phy = &hw->phy;
 443        s32 ret_val;
 444
 445        phy->addr = 1;
 446        phy->reset_delay_us = 100;
 447
 448        phy->ops.set_page = e1000_set_page_igp;
 449        phy->ops.read_reg = e1000_read_phy_reg_hv;
 450        phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
 451        phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
 452        phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
 453        phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
 454        phy->ops.write_reg = e1000_write_phy_reg_hv;
 455        phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
 456        phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
 457        phy->ops.power_up = e1000_power_up_phy_copper;
 458        phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 459        phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 460
 461        phy->id = e1000_phy_unknown;
 462
 463        ret_val = e1000_init_phy_workarounds_pchlan(hw);
 464        if (ret_val)
 465                return ret_val;
 466
 467        if (phy->id == e1000_phy_unknown)
 468                switch (hw->mac.type) {
 469                default:
 470                        ret_val = e1000e_get_phy_id(hw);
 471                        if (ret_val)
 472                                return ret_val;
 473                        if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
 474                                break;
 475                        /* fall-through */
 476                case e1000_pch2lan:
 477                case e1000_pch_lpt:
 478                case e1000_pch_spt:
 479                case e1000_pch_cnp:
 480                        /* In case the PHY needs to be in mdio slow mode,
 481                         * set slow mode and try to get the PHY id again.
 482                         */
 483                        ret_val = e1000_set_mdio_slow_mode_hv(hw);
 484                        if (ret_val)
 485                                return ret_val;
 486                        ret_val = e1000e_get_phy_id(hw);
 487                        if (ret_val)
 488                                return ret_val;
 489                        break;
 490                }
 491        phy->type = e1000e_get_phy_type_from_id(phy->id);
 492
 493        switch (phy->type) {
 494        case e1000_phy_82577:
 495        case e1000_phy_82579:
 496        case e1000_phy_i217:
 497                phy->ops.check_polarity = e1000_check_polarity_82577;
 498                phy->ops.force_speed_duplex =
 499                    e1000_phy_force_speed_duplex_82577;
 500                phy->ops.get_cable_length = e1000_get_cable_length_82577;
 501                phy->ops.get_info = e1000_get_phy_info_82577;
 502                phy->ops.commit = e1000e_phy_sw_reset;
 503                break;
 504        case e1000_phy_82578:
 505                phy->ops.check_polarity = e1000_check_polarity_m88;
 506                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 507                phy->ops.get_cable_length = e1000e_get_cable_length_m88;
 508                phy->ops.get_info = e1000e_get_phy_info_m88;
 509                break;
 510        default:
 511                ret_val = -E1000_ERR_PHY;
 512                break;
 513        }
 514
 515        return ret_val;
 516}
 517
 518/**
 519 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
 520 *  @hw: pointer to the HW structure
 521 *
 522 *  Initialize family-specific PHY parameters and function pointers.
 523 **/
 524static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
 525{
 526        struct e1000_phy_info *phy = &hw->phy;
 527        s32 ret_val;
 528        u16 i = 0;
 529
 530        phy->addr = 1;
 531        phy->reset_delay_us = 100;
 532
 533        phy->ops.power_up = e1000_power_up_phy_copper;
 534        phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 535
 536        /* We may need to do this twice - once for IGP and if that fails,
 537         * we'll set BM func pointers and try again
 538         */
 539        ret_val = e1000e_determine_phy_address(hw);
 540        if (ret_val) {
 541                phy->ops.write_reg = e1000e_write_phy_reg_bm;
 542                phy->ops.read_reg = e1000e_read_phy_reg_bm;
 543                ret_val = e1000e_determine_phy_address(hw);
 544                if (ret_val) {
 545                        e_dbg("Cannot determine PHY addr. Erroring out\n");
 546                        return ret_val;
 547                }
 548        }
 549
 550        phy->id = 0;
 551        while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
 552               (i++ < 100)) {
 553                usleep_range(1000, 2000);
 554                ret_val = e1000e_get_phy_id(hw);
 555                if (ret_val)
 556                        return ret_val;
 557        }
 558
 559        /* Verify phy id */
 560        switch (phy->id) {
 561        case IGP03E1000_E_PHY_ID:
 562                phy->type = e1000_phy_igp_3;
 563                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 564                phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
 565                phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
 566                phy->ops.get_info = e1000e_get_phy_info_igp;
 567                phy->ops.check_polarity = e1000_check_polarity_igp;
 568                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
 569                break;
 570        case IFE_E_PHY_ID:
 571        case IFE_PLUS_E_PHY_ID:
 572        case IFE_C_E_PHY_ID:
 573                phy->type = e1000_phy_ife;
 574                phy->autoneg_mask = E1000_ALL_NOT_GIG;
 575                phy->ops.get_info = e1000_get_phy_info_ife;
 576                phy->ops.check_polarity = e1000_check_polarity_ife;
 577                phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
 578                break;
 579        case BME1000_E_PHY_ID:
 580                phy->type = e1000_phy_bm;
 581                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 582                phy->ops.read_reg = e1000e_read_phy_reg_bm;
 583                phy->ops.write_reg = e1000e_write_phy_reg_bm;
 584                phy->ops.commit = e1000e_phy_sw_reset;
 585                phy->ops.get_info = e1000e_get_phy_info_m88;
 586                phy->ops.check_polarity = e1000_check_polarity_m88;
 587                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 588                break;
 589        default:
 590                return -E1000_ERR_PHY;
 591        }
 592
 593        return 0;
 594}
 595
 596/**
 597 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
 598 *  @hw: pointer to the HW structure
 599 *
 600 *  Initialize family-specific NVM parameters and function
 601 *  pointers.
 602 **/
 603static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 604{
 605        struct e1000_nvm_info *nvm = &hw->nvm;
 606        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 607        u32 gfpreg, sector_base_addr, sector_end_addr;
 608        u16 i;
 609        u32 nvm_size;
 610
 611        nvm->type = e1000_nvm_flash_sw;
 612
 613        if (hw->mac.type >= e1000_pch_spt) {
 614                /* in SPT, gfpreg doesn't exist. NVM size is taken from the
 615                 * STRAP register. This is because in SPT the GbE Flash region
 616                 * is no longer accessed through the flash registers. Instead,
 617                 * the mechanism has changed, and the Flash region access
 618                 * registers are now implemented in GbE memory space.
 619                 */
 620                nvm->flash_base_addr = 0;
 621                nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
 622                    * NVM_SIZE_MULTIPLIER;
 623                nvm->flash_bank_size = nvm_size / 2;
 624                /* Adjust to word count */
 625                nvm->flash_bank_size /= sizeof(u16);
 626                /* Set the base address for flash register access */
 627                hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
 628        } else {
 629                /* Can't read flash registers if register set isn't mapped. */
 630                if (!hw->flash_address) {
 631                        e_dbg("ERROR: Flash registers not mapped\n");
 632                        return -E1000_ERR_CONFIG;
 633                }
 634
 635                gfpreg = er32flash(ICH_FLASH_GFPREG);
 636
 637                /* sector_X_addr is a "sector"-aligned address (4096 bytes)
 638                 * Add 1 to sector_end_addr since this sector is included in
 639                 * the overall size.
 640                 */
 641                sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
 642                sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
 643
 644                /* flash_base_addr is byte-aligned */
 645                nvm->flash_base_addr = sector_base_addr
 646                    << FLASH_SECTOR_ADDR_SHIFT;
 647
 648                /* find total size of the NVM, then cut in half since the total
 649                 * size represents two separate NVM banks.
 650                 */
 651                nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
 652                                        << FLASH_SECTOR_ADDR_SHIFT);
 653                nvm->flash_bank_size /= 2;
 654                /* Adjust to word count */
 655                nvm->flash_bank_size /= sizeof(u16);
 656        }
 657
 658        nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
 659
 660        /* Clear shadow ram */
 661        for (i = 0; i < nvm->word_size; i++) {
 662                dev_spec->shadow_ram[i].modified = false;
 663                dev_spec->shadow_ram[i].value = 0xFFFF;
 664        }
 665
 666        return 0;
 667}
 668
 669/**
 670 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
 671 *  @hw: pointer to the HW structure
 672 *
 673 *  Initialize family-specific MAC parameters and function
 674 *  pointers.
 675 **/
 676static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
 677{
 678        struct e1000_mac_info *mac = &hw->mac;
 679
 680        /* Set media type function pointer */
 681        hw->phy.media_type = e1000_media_type_copper;
 682
 683        /* Set mta register count */
 684        mac->mta_reg_count = 32;
 685        /* Set rar entry count */
 686        mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
 687        if (mac->type == e1000_ich8lan)
 688                mac->rar_entry_count--;
 689        /* FWSM register */
 690        mac->has_fwsm = true;
 691        /* ARC subsystem not supported */
 692        mac->arc_subsystem_valid = false;
 693        /* Adaptive IFS supported */
 694        mac->adaptive_ifs = true;
 695
 696        /* LED and other operations */
 697        switch (mac->type) {
 698        case e1000_ich8lan:
 699        case e1000_ich9lan:
 700        case e1000_ich10lan:
 701                /* check management mode */
 702                mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
 703                /* ID LED init */
 704                mac->ops.id_led_init = e1000e_id_led_init_generic;
 705                /* blink LED */
 706                mac->ops.blink_led = e1000e_blink_led_generic;
 707                /* setup LED */
 708                mac->ops.setup_led = e1000e_setup_led_generic;
 709                /* cleanup LED */
 710                mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
 711                /* turn on/off LED */
 712                mac->ops.led_on = e1000_led_on_ich8lan;
 713                mac->ops.led_off = e1000_led_off_ich8lan;
 714                break;
 715        case e1000_pch2lan:
 716                mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
 717                mac->ops.rar_set = e1000_rar_set_pch2lan;
 718                /* fall-through */
 719        case e1000_pch_lpt:
 720        case e1000_pch_spt:
 721        case e1000_pch_cnp:
 722        case e1000_pchlan:
 723                /* check management mode */
 724                mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
 725                /* ID LED init */
 726                mac->ops.id_led_init = e1000_id_led_init_pchlan;
 727                /* setup LED */
 728                mac->ops.setup_led = e1000_setup_led_pchlan;
 729                /* cleanup LED */
 730                mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
 731                /* turn on/off LED */
 732                mac->ops.led_on = e1000_led_on_pchlan;
 733                mac->ops.led_off = e1000_led_off_pchlan;
 734                break;
 735        default:
 736                break;
 737        }
 738
 739        if (mac->type >= e1000_pch_lpt) {
 740                mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
 741                mac->ops.rar_set = e1000_rar_set_pch_lpt;
 742                mac->ops.setup_physical_interface =
 743                    e1000_setup_copper_link_pch_lpt;
 744                mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
 745        }
 746
 747        /* Enable PCS Lock-loss workaround for ICH8 */
 748        if (mac->type == e1000_ich8lan)
 749                e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 750
 751        return 0;
 752}
 753
 754/**
 755 *  __e1000_access_emi_reg_locked - Read/write EMI register
 756 *  @hw: pointer to the HW structure
 757 *  @addr: EMI address to program
 758 *  @data: pointer to value to read/write from/to the EMI address
 759 *  @read: boolean flag to indicate read or write
 760 *
 761 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
 762 **/
 763static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
 764                                         u16 *data, bool read)
 765{
 766        s32 ret_val;
 767
 768        ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
 769        if (ret_val)
 770                return ret_val;
 771
 772        if (read)
 773                ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
 774        else
 775                ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
 776
 777        return ret_val;
 778}
 779
 780/**
 781 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
 782 *  @hw: pointer to the HW structure
 783 *  @addr: EMI address to program
 784 *  @data: value to be read from the EMI address
 785 *
 786 *  Assumes the SW/FW/HW Semaphore is already acquired.
 787 **/
 788s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
 789{
 790        return __e1000_access_emi_reg_locked(hw, addr, data, true);
 791}
 792
 793/**
 794 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
 795 *  @hw: pointer to the HW structure
 796 *  @addr: EMI address to program
 797 *  @data: value to be written to the EMI address
 798 *
 799 *  Assumes the SW/FW/HW Semaphore is already acquired.
 800 **/
 801s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
 802{
 803        return __e1000_access_emi_reg_locked(hw, addr, &data, false);
 804}
 805
 806/**
 807 *  e1000_set_eee_pchlan - Enable/disable EEE support
 808 *  @hw: pointer to the HW structure
 809 *
 810 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
 811 *  the link and the EEE capabilities of the link partner.  The LPI Control
 812 *  register bits will remain set only if/when link is up.
 813 *
 814 *  EEE LPI must not be asserted earlier than one second after link is up.
 815 *  On 82579, EEE LPI should not be enabled until such time otherwise there
 816 *  can be link issues with some switches.  Other devices can have EEE LPI
 817 *  enabled immediately upon link up since they have a timer in hardware which
 818 *  prevents LPI from being asserted too early.
 819 **/
 820s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 821{
 822        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 823        s32 ret_val;
 824        u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
 825
 826        switch (hw->phy.type) {
 827        case e1000_phy_82579:
 828                lpa = I82579_EEE_LP_ABILITY;
 829                pcs_status = I82579_EEE_PCS_STATUS;
 830                adv_addr = I82579_EEE_ADVERTISEMENT;
 831                break;
 832        case e1000_phy_i217:
 833                lpa = I217_EEE_LP_ABILITY;
 834                pcs_status = I217_EEE_PCS_STATUS;
 835                adv_addr = I217_EEE_ADVERTISEMENT;
 836                break;
 837        default:
 838                return 0;
 839        }
 840
 841        ret_val = hw->phy.ops.acquire(hw);
 842        if (ret_val)
 843                return ret_val;
 844
 845        ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
 846        if (ret_val)
 847                goto release;
 848
 849        /* Clear bits that enable EEE in various speeds */
 850        lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
 851
 852        /* Enable EEE if not disabled by user */
 853        if (!dev_spec->eee_disable) {
 854                /* Save off link partner's EEE ability */
 855                ret_val = e1000_read_emi_reg_locked(hw, lpa,
 856                                                    &dev_spec->eee_lp_ability);
 857                if (ret_val)
 858                        goto release;
 859
 860                /* Read EEE advertisement */
 861                ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
 862                if (ret_val)
 863                        goto release;
 864
 865                /* Enable EEE only for speeds in which the link partner is
 866                 * EEE capable and for which we advertise EEE.
 867                 */
 868                if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
 869                        lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
 870
 871                if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
 872                        e1e_rphy_locked(hw, MII_LPA, &data);
 873                        if (data & LPA_100FULL)
 874                                lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
 875                        else
 876                                /* EEE is not supported in 100Half, so ignore
 877                                 * partner's EEE in 100 ability if full-duplex
 878                                 * is not advertised.
 879                                 */
 880                                dev_spec->eee_lp_ability &=
 881                                    ~I82579_EEE_100_SUPPORTED;
 882                }
 883        }
 884
 885        if (hw->phy.type == e1000_phy_82579) {
 886                ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 887                                                    &data);
 888                if (ret_val)
 889                        goto release;
 890
 891                data &= ~I82579_LPI_100_PLL_SHUT;
 892                ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 893                                                     data);
 894        }
 895
 896        /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
 897        ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
 898        if (ret_val)
 899                goto release;
 900
 901        ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
 902release:
 903        hw->phy.ops.release(hw);
 904
 905        return ret_val;
 906}
 907
 908/**
 909 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
 910 *  @hw:   pointer to the HW structure
 911 *  @link: link up bool flag
 912 *
 913 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
 914 *  preventing further DMA write requests.  Workaround the issue by disabling
 915 *  the de-assertion of the clock request when in 1Gpbs mode.
 916 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
 917 *  speeds in order to avoid Tx hangs.
 918 **/
 919static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
 920{
 921        u32 fextnvm6 = er32(FEXTNVM6);
 922        u32 status = er32(STATUS);
 923        s32 ret_val = 0;
 924        u16 reg;
 925
 926        if (link && (status & E1000_STATUS_SPEED_1000)) {
 927                ret_val = hw->phy.ops.acquire(hw);
 928                if (ret_val)
 929                        return ret_val;
 930
 931                ret_val =
 932                    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
 933                                                &reg);
 934                if (ret_val)
 935                        goto release;
 936
 937                ret_val =
 938                    e1000e_write_kmrn_reg_locked(hw,
 939                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
 940                                                 reg &
 941                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
 942                if (ret_val)
 943                        goto release;
 944
 945                usleep_range(10, 20);
 946
 947                ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
 948
 949                ret_val =
 950                    e1000e_write_kmrn_reg_locked(hw,
 951                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
 952                                                 reg);
 953release:
 954                hw->phy.ops.release(hw);
 955        } else {
 956                /* clear FEXTNVM6 bit 8 on link down or 10/100 */
 957                fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
 958
 959                if ((hw->phy.revision > 5) || !link ||
 960                    ((status & E1000_STATUS_SPEED_100) &&
 961                     (status & E1000_STATUS_FD)))
 962                        goto update_fextnvm6;
 963
 964                ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
 965                if (ret_val)
 966                        return ret_val;
 967
 968                /* Clear link status transmit timeout */
 969                reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
 970
 971                if (status & E1000_STATUS_SPEED_100) {
 972                        /* Set inband Tx timeout to 5x10us for 100Half */
 973                        reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 974
 975                        /* Do not extend the K1 entry latency for 100Half */
 976                        fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 977                } else {
 978                        /* Set inband Tx timeout to 50x10us for 10Full/Half */
 979                        reg |= 50 <<
 980                            I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 981
 982                        /* Extend the K1 entry latency for 10 Mbps */
 983                        fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 984                }
 985
 986                ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
 987                if (ret_val)
 988                        return ret_val;
 989
 990update_fextnvm6:
 991                ew32(FEXTNVM6, fextnvm6);
 992        }
 993
 994        return ret_val;
 995}
 996
 997/**
 998 *  e1000_platform_pm_pch_lpt - Set platform power management values
 999 *  @hw: pointer to the HW structure
1000 *  @link: bool indicating link status
1001 *
1002 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1003 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1004 *  when link is up (which must not exceed the maximum latency supported
1005 *  by the platform), otherwise specify there is no LTR requirement.
1006 *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
1007 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1008 *  Capability register set, on this device LTR is set by writing the
1009 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1010 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1011 *  message to the PMC.
1012 **/
1013static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1014{
1015        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1016            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1017        u16 lat_enc = 0;        /* latency encoded */
1018
1019        if (link) {
1020                u16 speed, duplex, scale = 0;
1021                u16 max_snoop, max_nosnoop;
1022                u16 max_ltr_enc;        /* max LTR latency encoded */
1023                u64 value;
1024                u32 rxa;
1025
1026                if (!hw->adapter->max_frame_size) {
1027                        e_dbg("max_frame_size not set.\n");
1028                        return -E1000_ERR_CONFIG;
1029                }
1030
1031                hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1032                if (!speed) {
1033                        e_dbg("Speed not set.\n");
1034                        return -E1000_ERR_CONFIG;
1035                }
1036
1037                /* Rx Packet Buffer Allocation size (KB) */
1038                rxa = er32(PBA) & E1000_PBA_RXA_MASK;
1039
1040                /* Determine the maximum latency tolerated by the device.
1041                 *
1042                 * Per the PCIe spec, the tolerated latencies are encoded as
1043                 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1044                 * a 10-bit value (0-1023) to provide a range from 1 ns to
1045                 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1046                 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1047                 */
1048                rxa *= 512;
1049                value = (rxa > hw->adapter->max_frame_size) ?
1050                        (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
1051                        0;
1052
1053                while (value > PCI_LTR_VALUE_MASK) {
1054                        scale++;
1055                        value = DIV_ROUND_UP(value, BIT(5));
1056                }
1057                if (scale > E1000_LTRV_SCALE_MAX) {
1058                        e_dbg("Invalid LTR latency scale %d\n", scale);
1059                        return -E1000_ERR_CONFIG;
1060                }
1061                lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
1062
1063                /* Determine the maximum latency tolerated by the platform */
1064                pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
1065                                     &max_snoop);
1066                pci_read_config_word(hw->adapter->pdev,
1067                                     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1068                max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
1069
1070                if (lat_enc > max_ltr_enc)
1071                        lat_enc = max_ltr_enc;
1072        }
1073
1074        /* Set Snoop and No-Snoop latencies the same */
1075        reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1076        ew32(LTRV, reg);
1077
1078        return 0;
1079}
1080
1081/**
1082 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1083 *  @hw: pointer to the HW structure
1084 *  @to_sx: boolean indicating a system power state transition to Sx
1085 *
1086 *  When link is down, configure ULP mode to significantly reduce the power
1087 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1088 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1089 *  system, configure the ULP mode by software.
1090 */
1091s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1092{
1093        u32 mac_reg;
1094        s32 ret_val = 0;
1095        u16 phy_reg;
1096        u16 oem_reg = 0;
1097
1098        if ((hw->mac.type < e1000_pch_lpt) ||
1099            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1100            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1101            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1102            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1103            (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1104                return 0;
1105
1106        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1107                /* Request ME configure ULP mode in the PHY */
1108                mac_reg = er32(H2ME);
1109                mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1110                ew32(H2ME, mac_reg);
1111
1112                goto out;
1113        }
1114
1115        if (!to_sx) {
1116                int i = 0;
1117
1118                /* Poll up to 5 seconds for Cable Disconnected indication */
1119                while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1120                        /* Bail if link is re-acquired */
1121                        if (er32(STATUS) & E1000_STATUS_LU)
1122                                return -E1000_ERR_PHY;
1123
1124                        if (i++ == 100)
1125                                break;
1126
1127                        msleep(50);
1128                }
1129                e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1130                      (er32(FEXT) &
1131                       E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1132        }
1133
1134        ret_val = hw->phy.ops.acquire(hw);
1135        if (ret_val)
1136                goto out;
1137
1138        /* Force SMBus mode in PHY */
1139        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1140        if (ret_val)
1141                goto release;
1142        phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1143        e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1144
1145        /* Force SMBus mode in MAC */
1146        mac_reg = er32(CTRL_EXT);
1147        mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1148        ew32(CTRL_EXT, mac_reg);
1149
1150        /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1151         * LPLU and disable Gig speed when entering ULP
1152         */
1153        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1154                ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1155                                                       &oem_reg);
1156                if (ret_val)
1157                        goto release;
1158
1159                phy_reg = oem_reg;
1160                phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1161
1162                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1163                                                        phy_reg);
1164
1165                if (ret_val)
1166                        goto release;
1167        }
1168
1169        /* Set Inband ULP Exit, Reset to SMBus mode and
1170         * Disable SMBus Release on PERST# in PHY
1171         */
1172        ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1173        if (ret_val)
1174                goto release;
1175        phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1176                    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1177        if (to_sx) {
1178                if (er32(WUFC) & E1000_WUFC_LNKC)
1179                        phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1180                else
1181                        phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1182
1183                phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1184                phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1185        } else {
1186                phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1187                phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1188                phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1189        }
1190        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1191
1192        /* Set Disable SMBus Release on PERST# in MAC */
1193        mac_reg = er32(FEXTNVM7);
1194        mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1195        ew32(FEXTNVM7, mac_reg);
1196
1197        /* Commit ULP changes in PHY by starting auto ULP configuration */
1198        phy_reg |= I218_ULP_CONFIG1_START;
1199        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1200
1201        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1202            to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
1203                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1204                                                        oem_reg);
1205                if (ret_val)
1206                        goto release;
1207        }
1208
1209release:
1210        hw->phy.ops.release(hw);
1211out:
1212        if (ret_val)
1213                e_dbg("Error in ULP enable flow: %d\n", ret_val);
1214        else
1215                hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1216
1217        return ret_val;
1218}
1219
1220/**
1221 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1222 *  @hw: pointer to the HW structure
1223 *  @force: boolean indicating whether or not to force disabling ULP
1224 *
1225 *  Un-configure ULP mode when link is up, the system is transitioned from
1226 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1227 *  system, poll for an indication from ME that ULP has been un-configured.
1228 *  If not on an ME enabled system, un-configure the ULP mode by software.
1229 *
1230 *  During nominal operation, this function is called when link is acquired
1231 *  to disable ULP mode (force=false); otherwise, for example when unloading
1232 *  the driver or during Sx->S0 transitions, this is called with force=true
1233 *  to forcibly disable ULP.
1234 */
1235static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1236{
1237        s32 ret_val = 0;
1238        u32 mac_reg;
1239        u16 phy_reg;
1240        int i = 0;
1241
1242        if ((hw->mac.type < e1000_pch_lpt) ||
1243            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1244            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1245            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1246            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1247            (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1248                return 0;
1249
1250        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1251                if (force) {
1252                        /* Request ME un-configure ULP mode in the PHY */
1253                        mac_reg = er32(H2ME);
1254                        mac_reg &= ~E1000_H2ME_ULP;
1255                        mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1256                        ew32(H2ME, mac_reg);
1257                }
1258
1259                /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1260                while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1261                        if (i++ == 30) {
1262                                ret_val = -E1000_ERR_PHY;
1263                                goto out;
1264                        }
1265
1266                        usleep_range(10000, 20000);
1267                }
1268                e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1269
1270                if (force) {
1271                        mac_reg = er32(H2ME);
1272                        mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1273                        ew32(H2ME, mac_reg);
1274                } else {
1275                        /* Clear H2ME.ULP after ME ULP configuration */
1276                        mac_reg = er32(H2ME);
1277                        mac_reg &= ~E1000_H2ME_ULP;
1278                        ew32(H2ME, mac_reg);
1279                }
1280
1281                goto out;
1282        }
1283
1284        ret_val = hw->phy.ops.acquire(hw);
1285        if (ret_val)
1286                goto out;
1287
1288        if (force)
1289                /* Toggle LANPHYPC Value bit */
1290                e1000_toggle_lanphypc_pch_lpt(hw);
1291
1292        /* Unforce SMBus mode in PHY */
1293        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1294        if (ret_val) {
1295                /* The MAC might be in PCIe mode, so temporarily force to
1296                 * SMBus mode in order to access the PHY.
1297                 */
1298                mac_reg = er32(CTRL_EXT);
1299                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1300                ew32(CTRL_EXT, mac_reg);
1301
1302                msleep(50);
1303
1304                ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1305                                                       &phy_reg);
1306                if (ret_val)
1307                        goto release;
1308        }
1309        phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1310        e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1311
1312        /* Unforce SMBus mode in MAC */
1313        mac_reg = er32(CTRL_EXT);
1314        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1315        ew32(CTRL_EXT, mac_reg);
1316
1317        /* When ULP mode was previously entered, K1 was disabled by the
1318         * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1319         */
1320        ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1321        if (ret_val)
1322                goto release;
1323        phy_reg |= HV_PM_CTRL_K1_ENABLE;
1324        e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1325
1326        /* Clear ULP enabled configuration */
1327        ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1328        if (ret_val)
1329                goto release;
1330        phy_reg &= ~(I218_ULP_CONFIG1_IND |
1331                     I218_ULP_CONFIG1_STICKY_ULP |
1332                     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1333                     I218_ULP_CONFIG1_WOL_HOST |
1334                     I218_ULP_CONFIG1_INBAND_EXIT |
1335                     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1336                     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1337                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1338        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1339
1340        /* Commit ULP changes by starting auto ULP configuration */
1341        phy_reg |= I218_ULP_CONFIG1_START;
1342        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1343
1344        /* Clear Disable SMBus Release on PERST# in MAC */
1345        mac_reg = er32(FEXTNVM7);
1346        mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1347        ew32(FEXTNVM7, mac_reg);
1348
1349release:
1350        hw->phy.ops.release(hw);
1351        if (force) {
1352                e1000_phy_hw_reset(hw);
1353                msleep(50);
1354        }
1355out:
1356        if (ret_val)
1357                e_dbg("Error in ULP disable flow: %d\n", ret_val);
1358        else
1359                hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1360
1361        return ret_val;
1362}
1363
1364/**
1365 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1366 *  @hw: pointer to the HW structure
1367 *
1368 *  Checks to see of the link status of the hardware has changed.  If a
1369 *  change in link status has been detected, then we read the PHY registers
1370 *  to get the current speed/duplex if link exists.
1371 **/
1372static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1373{
1374        struct e1000_mac_info *mac = &hw->mac;
1375        s32 ret_val, tipg_reg = 0;
1376        u16 emi_addr, emi_val = 0;
1377        bool link;
1378        u16 phy_reg;
1379
1380        /* We only want to go out to the PHY registers to see if Auto-Neg
1381         * has completed and/or if our link status has changed.  The
1382         * get_link_status flag is set upon receiving a Link Status
1383         * Change or Rx Sequence Error interrupt.
1384         */
1385        if (!mac->get_link_status)
1386                return 0;
1387        mac->get_link_status = false;
1388
1389        /* First we want to see if the MII Status Register reports
1390         * link.  If so, then we want to get the current speed/duplex
1391         * of the PHY.
1392         */
1393        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1394        if (ret_val)
1395                goto out;
1396
1397        if (hw->mac.type == e1000_pchlan) {
1398                ret_val = e1000_k1_gig_workaround_hv(hw, link);
1399                if (ret_val)
1400                        goto out;
1401        }
1402
1403        /* When connected at 10Mbps half-duplex, some parts are excessively
1404         * aggressive resulting in many collisions. To avoid this, increase
1405         * the IPG and reduce Rx latency in the PHY.
1406         */
1407        if ((hw->mac.type >= e1000_pch2lan) && link) {
1408                u16 speed, duplex;
1409
1410                e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
1411                tipg_reg = er32(TIPG);
1412                tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1413
1414                if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1415                        tipg_reg |= 0xFF;
1416                        /* Reduce Rx latency in analog PHY */
1417                        emi_val = 0;
1418                } else if (hw->mac.type >= e1000_pch_spt &&
1419                           duplex == FULL_DUPLEX && speed != SPEED_1000) {
1420                        tipg_reg |= 0xC;
1421                        emi_val = 1;
1422                } else {
1423
1424                        /* Roll back the default values */
1425                        tipg_reg |= 0x08;
1426                        emi_val = 1;
1427                }
1428
1429                ew32(TIPG, tipg_reg);
1430
1431                ret_val = hw->phy.ops.acquire(hw);
1432                if (ret_val)
1433                        goto out;
1434
1435                if (hw->mac.type == e1000_pch2lan)
1436                        emi_addr = I82579_RX_CONFIG;
1437                else
1438                        emi_addr = I217_RX_CONFIG;
1439                ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1440
1441                if (hw->mac.type >= e1000_pch_lpt) {
1442                        u16 phy_reg;
1443
1444                        e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
1445                        phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1446                        if (speed == SPEED_100 || speed == SPEED_10)
1447                                phy_reg |= 0x3E8;
1448                        else
1449                                phy_reg |= 0xFA;
1450                        e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
1451                }
1452                hw->phy.ops.release(hw);
1453
1454                if (ret_val)
1455                        goto out;
1456
1457                if (hw->mac.type >= e1000_pch_spt) {
1458                        u16 data;
1459                        u16 ptr_gap;
1460
1461                        if (speed == SPEED_1000) {
1462                                ret_val = hw->phy.ops.acquire(hw);
1463                                if (ret_val)
1464                                        goto out;
1465
1466                                ret_val = e1e_rphy_locked(hw,
1467                                                          PHY_REG(776, 20),
1468                                                          &data);
1469                                if (ret_val) {
1470                                        hw->phy.ops.release(hw);
1471                                        goto out;
1472                                }
1473
1474                                ptr_gap = (data & (0x3FF << 2)) >> 2;
1475                                if (ptr_gap < 0x18) {
1476                                        data &= ~(0x3FF << 2);
1477                                        data |= (0x18 << 2);
1478                                        ret_val =
1479                                            e1e_wphy_locked(hw,
1480                                                            PHY_REG(776, 20),
1481                                                            data);
1482                                }
1483                                hw->phy.ops.release(hw);
1484                                if (ret_val)
1485                                        goto out;
1486                        } else {
1487                                ret_val = hw->phy.ops.acquire(hw);
1488                                if (ret_val)
1489                                        goto out;
1490
1491                                ret_val = e1e_wphy_locked(hw,
1492                                                          PHY_REG(776, 20),
1493                                                          0xC023);
1494                                hw->phy.ops.release(hw);
1495                                if (ret_val)
1496                                        goto out;
1497
1498                        }
1499                }
1500        }
1501
1502        /* I217 Packet Loss issue:
1503         * ensure that FEXTNVM4 Beacon Duration is set correctly
1504         * on power up.
1505         * Set the Beacon Duration for I217 to 8 usec
1506         */
1507        if (hw->mac.type >= e1000_pch_lpt) {
1508                u32 mac_reg;
1509
1510                mac_reg = er32(FEXTNVM4);
1511                mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1512                mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1513                ew32(FEXTNVM4, mac_reg);
1514        }
1515
1516        /* Work-around I218 hang issue */
1517        if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1518            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1519            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1520            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1521                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1522                if (ret_val)
1523                        goto out;
1524        }
1525        if (hw->mac.type >= e1000_pch_lpt) {
1526                /* Set platform power management values for
1527                 * Latency Tolerance Reporting (LTR)
1528                 */
1529                ret_val = e1000_platform_pm_pch_lpt(hw, link);
1530                if (ret_val)
1531                        goto out;
1532        }
1533
1534        /* Clear link partner's EEE ability */
1535        hw->dev_spec.ich8lan.eee_lp_ability = 0;
1536
1537        if (hw->mac.type >= e1000_pch_lpt) {
1538                u32 fextnvm6 = er32(FEXTNVM6);
1539
1540                if (hw->mac.type == e1000_pch_spt) {
1541                        /* FEXTNVM6 K1-off workaround - for SPT only */
1542                        u32 pcieanacfg = er32(PCIEANACFG);
1543
1544                        if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1545                                fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1546                        else
1547                                fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1548                }
1549
1550                ew32(FEXTNVM6, fextnvm6);
1551        }
1552
1553        if (!link)
1554                goto out;
1555
1556        switch (hw->mac.type) {
1557        case e1000_pch2lan:
1558                ret_val = e1000_k1_workaround_lv(hw);
1559                if (ret_val)
1560                        return ret_val;
1561                /* fall-thru */
1562        case e1000_pchlan:
1563                if (hw->phy.type == e1000_phy_82578) {
1564                        ret_val = e1000_link_stall_workaround_hv(hw);
1565                        if (ret_val)
1566                                return ret_val;
1567                }
1568
1569                /* Workaround for PCHx parts in half-duplex:
1570                 * Set the number of preambles removed from the packet
1571                 * when it is passed from the PHY to the MAC to prevent
1572                 * the MAC from misinterpreting the packet type.
1573                 */
1574                e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1575                phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1576
1577                if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
1578                        phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1579
1580                e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1581                break;
1582        default:
1583                break;
1584        }
1585
1586        /* Check if there was DownShift, must be checked
1587         * immediately after link-up
1588         */
1589        e1000e_check_downshift(hw);
1590
1591        /* Enable/Disable EEE after link up */
1592        if (hw->phy.type > e1000_phy_82579) {
1593                ret_val = e1000_set_eee_pchlan(hw);
1594                if (ret_val)
1595                        return ret_val;
1596        }
1597
1598        /* If we are forcing speed/duplex, then we simply return since
1599         * we have already determined whether we have link or not.
1600         */
1601        if (!mac->autoneg)
1602                return -E1000_ERR_CONFIG;
1603
1604        /* Auto-Neg is enabled.  Auto Speed Detection takes care
1605         * of MAC speed/duplex configuration.  So we only need to
1606         * configure Collision Distance in the MAC.
1607         */
1608        mac->ops.config_collision_dist(hw);
1609
1610        /* Configure Flow Control now that Auto-Neg has completed.
1611         * First, we need to restore the desired flow control
1612         * settings because we may have had to re-autoneg with a
1613         * different link partner.
1614         */
1615        ret_val = e1000e_config_fc_after_link_up(hw);
1616        if (ret_val)
1617                e_dbg("Error configuring flow control\n");
1618
1619        return ret_val;
1620
1621out:
1622        mac->get_link_status = true;
1623        return ret_val;
1624}
1625
1626static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1627{
1628        struct e1000_hw *hw = &adapter->hw;
1629        s32 rc;
1630
1631        rc = e1000_init_mac_params_ich8lan(hw);
1632        if (rc)
1633                return rc;
1634
1635        rc = e1000_init_nvm_params_ich8lan(hw);
1636        if (rc)
1637                return rc;
1638
1639        switch (hw->mac.type) {
1640        case e1000_ich8lan:
1641        case e1000_ich9lan:
1642        case e1000_ich10lan:
1643                rc = e1000_init_phy_params_ich8lan(hw);
1644                break;
1645        case e1000_pchlan:
1646        case e1000_pch2lan:
1647        case e1000_pch_lpt:
1648        case e1000_pch_spt:
1649        case e1000_pch_cnp:
1650                rc = e1000_init_phy_params_pchlan(hw);
1651                break;
1652        default:
1653                break;
1654        }
1655        if (rc)
1656                return rc;
1657
1658        /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1659         * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1660         */
1661        if ((adapter->hw.phy.type == e1000_phy_ife) ||
1662            ((adapter->hw.mac.type >= e1000_pch2lan) &&
1663             (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1664                adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
1665                adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
1666
1667                hw->mac.ops.blink_led = NULL;
1668        }
1669
1670        if ((adapter->hw.mac.type == e1000_ich8lan) &&
1671            (adapter->hw.phy.type != e1000_phy_ife))
1672                adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
1673
1674        /* Enable workaround for 82579 w/ ME enabled */
1675        if ((adapter->hw.mac.type == e1000_pch2lan) &&
1676            (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1677                adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1678
1679        return 0;
1680}
1681
1682static DEFINE_MUTEX(nvm_mutex);
1683
1684/**
1685 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1686 *  @hw: pointer to the HW structure
1687 *
1688 *  Acquires the mutex for performing NVM operations.
1689 **/
1690static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1691{
1692        mutex_lock(&nvm_mutex);
1693
1694        return 0;
1695}
1696
1697/**
1698 *  e1000_release_nvm_ich8lan - Release NVM mutex
1699 *  @hw: pointer to the HW structure
1700 *
1701 *  Releases the mutex used while performing NVM operations.
1702 **/
1703static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1704{
1705        mutex_unlock(&nvm_mutex);
1706}
1707
1708/**
1709 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1710 *  @hw: pointer to the HW structure
1711 *
1712 *  Acquires the software control flag for performing PHY and select
1713 *  MAC CSR accesses.
1714 **/
1715static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1716{
1717        u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1718        s32 ret_val = 0;
1719
1720        if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1721                             &hw->adapter->state)) {
1722                e_dbg("contention for Phy access\n");
1723                return -E1000_ERR_PHY;
1724        }
1725
1726        while (timeout) {
1727                extcnf_ctrl = er32(EXTCNF_CTRL);
1728                if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1729                        break;
1730
1731                mdelay(1);
1732                timeout--;
1733        }
1734
1735        if (!timeout) {
1736                e_dbg("SW has already locked the resource.\n");
1737                ret_val = -E1000_ERR_CONFIG;
1738                goto out;
1739        }
1740
1741        timeout = SW_FLAG_TIMEOUT;
1742
1743        extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1744        ew32(EXTCNF_CTRL, extcnf_ctrl);
1745
1746        while (timeout) {
1747                extcnf_ctrl = er32(EXTCNF_CTRL);
1748                if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1749                        break;
1750
1751                mdelay(1);
1752                timeout--;
1753        }
1754
1755        if (!timeout) {
1756                e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1757                      er32(FWSM), extcnf_ctrl);
1758                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1759                ew32(EXTCNF_CTRL, extcnf_ctrl);
1760                ret_val = -E1000_ERR_CONFIG;
1761                goto out;
1762        }
1763
1764out:
1765        if (ret_val)
1766                clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1767
1768        return ret_val;
1769}
1770
1771/**
1772 *  e1000_release_swflag_ich8lan - Release software control flag
1773 *  @hw: pointer to the HW structure
1774 *
1775 *  Releases the software control flag for performing PHY and select
1776 *  MAC CSR accesses.
1777 **/
1778static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1779{
1780        u32 extcnf_ctrl;
1781
1782        extcnf_ctrl = er32(EXTCNF_CTRL);
1783
1784        if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1785                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1786                ew32(EXTCNF_CTRL, extcnf_ctrl);
1787        } else {
1788                e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1789        }
1790
1791        clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1792}
1793
1794/**
1795 *  e1000_check_mng_mode_ich8lan - Checks management mode
1796 *  @hw: pointer to the HW structure
1797 *
1798 *  This checks if the adapter has any manageability enabled.
1799 *  This is a function pointer entry point only called by read/write
1800 *  routines for the PHY and NVM parts.
1801 **/
1802static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1803{
1804        u32 fwsm;
1805
1806        fwsm = er32(FWSM);
1807        return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1808                ((fwsm & E1000_FWSM_MODE_MASK) ==
1809                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1810}
1811
1812/**
1813 *  e1000_check_mng_mode_pchlan - Checks management mode
1814 *  @hw: pointer to the HW structure
1815 *
1816 *  This checks if the adapter has iAMT enabled.
1817 *  This is a function pointer entry point only called by read/write
1818 *  routines for the PHY and NVM parts.
1819 **/
1820static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1821{
1822        u32 fwsm;
1823
1824        fwsm = er32(FWSM);
1825        return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1826            (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1827}
1828
1829/**
1830 *  e1000_rar_set_pch2lan - Set receive address register
1831 *  @hw: pointer to the HW structure
1832 *  @addr: pointer to the receive address
1833 *  @index: receive address array register
1834 *
1835 *  Sets the receive address array register at index to the address passed
1836 *  in by addr.  For 82579, RAR[0] is the base address register that is to
1837 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1838 *  Use SHRA[0-3] in place of those reserved for ME.
1839 **/
1840static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1841{
1842        u32 rar_low, rar_high;
1843
1844        /* HW expects these in little endian so we reverse the byte order
1845         * from network order (big endian) to little endian
1846         */
1847        rar_low = ((u32)addr[0] |
1848                   ((u32)addr[1] << 8) |
1849                   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1850
1851        rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1852
1853        /* If MAC address zero, no need to set the AV bit */
1854        if (rar_low || rar_high)
1855                rar_high |= E1000_RAH_AV;
1856
1857        if (index == 0) {
1858                ew32(RAL(index), rar_low);
1859                e1e_flush();
1860                ew32(RAH(index), rar_high);
1861                e1e_flush();
1862                return 0;
1863        }
1864
1865        /* RAR[1-6] are owned by manageability.  Skip those and program the
1866         * next address into the SHRA register array.
1867         */
1868        if (index < (u32)(hw->mac.rar_entry_count)) {
1869                s32 ret_val;
1870
1871                ret_val = e1000_acquire_swflag_ich8lan(hw);
1872                if (ret_val)
1873                        goto out;
1874
1875                ew32(SHRAL(index - 1), rar_low);
1876                e1e_flush();
1877                ew32(SHRAH(index - 1), rar_high);
1878                e1e_flush();
1879
1880                e1000_release_swflag_ich8lan(hw);
1881
1882                /* verify the register updates */
1883                if ((er32(SHRAL(index - 1)) == rar_low) &&
1884                    (er32(SHRAH(index - 1)) == rar_high))
1885                        return 0;
1886
1887                e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1888                      (index - 1), er32(FWSM));
1889        }
1890
1891out:
1892        e_dbg("Failed to write receive address at index %d\n", index);
1893        return -E1000_ERR_CONFIG;
1894}
1895
1896/**
1897 *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1898 *  @hw: pointer to the HW structure
1899 *
1900 *  Get the number of available receive registers that the Host can
1901 *  program. SHRA[0-10] are the shared receive address registers
1902 *  that are shared between the Host and manageability engine (ME).
1903 *  ME can reserve any number of addresses and the host needs to be
1904 *  able to tell how many available registers it has access to.
1905 **/
1906static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1907{
1908        u32 wlock_mac;
1909        u32 num_entries;
1910
1911        wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1912        wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1913
1914        switch (wlock_mac) {
1915        case 0:
1916                /* All SHRA[0..10] and RAR[0] available */
1917                num_entries = hw->mac.rar_entry_count;
1918                break;
1919        case 1:
1920                /* Only RAR[0] available */
1921                num_entries = 1;
1922                break;
1923        default:
1924                /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1925                num_entries = wlock_mac + 1;
1926                break;
1927        }
1928
1929        return num_entries;
1930}
1931
1932/**
1933 *  e1000_rar_set_pch_lpt - Set receive address registers
1934 *  @hw: pointer to the HW structure
1935 *  @addr: pointer to the receive address
1936 *  @index: receive address array register
1937 *
1938 *  Sets the receive address register array at index to the address passed
1939 *  in by addr. For LPT, RAR[0] is the base address register that is to
1940 *  contain the MAC address. SHRA[0-10] are the shared receive address
1941 *  registers that are shared between the Host and manageability engine (ME).
1942 **/
1943static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1944{
1945        u32 rar_low, rar_high;
1946        u32 wlock_mac;
1947
1948        /* HW expects these in little endian so we reverse the byte order
1949         * from network order (big endian) to little endian
1950         */
1951        rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1952                   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1953
1954        rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1955
1956        /* If MAC address zero, no need to set the AV bit */
1957        if (rar_low || rar_high)
1958                rar_high |= E1000_RAH_AV;
1959
1960        if (index == 0) {
1961                ew32(RAL(index), rar_low);
1962                e1e_flush();
1963                ew32(RAH(index), rar_high);
1964                e1e_flush();
1965                return 0;
1966        }
1967
1968        /* The manageability engine (ME) can lock certain SHRAR registers that
1969         * it is using - those registers are unavailable for use.
1970         */
1971        if (index < hw->mac.rar_entry_count) {
1972                wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1973                wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1974
1975                /* Check if all SHRAR registers are locked */
1976                if (wlock_mac == 1)
1977                        goto out;
1978
1979                if ((wlock_mac == 0) || (index <= wlock_mac)) {
1980                        s32 ret_val;
1981
1982                        ret_val = e1000_acquire_swflag_ich8lan(hw);
1983
1984                        if (ret_val)
1985                                goto out;
1986
1987                        ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1988                        e1e_flush();
1989                        ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1990                        e1e_flush();
1991
1992                        e1000_release_swflag_ich8lan(hw);
1993
1994                        /* verify the register updates */
1995                        if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1996                            (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1997                                return 0;
1998                }
1999        }
2000
2001out:
2002        e_dbg("Failed to write receive address at index %d\n", index);
2003        return -E1000_ERR_CONFIG;
2004}
2005
2006/**
2007 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2008 *  @hw: pointer to the HW structure
2009 *
2010 *  Checks if firmware is blocking the reset of the PHY.
2011 *  This is a function pointer entry point only called by
2012 *  reset routines.
2013 **/
2014static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2015{
2016        bool blocked = false;
2017        int i = 0;
2018
2019        while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
2020               (i++ < 30))
2021                usleep_range(10000, 20000);
2022        return blocked ? E1000_BLK_PHY_RESET : 0;
2023}
2024
2025/**
2026 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2027 *  @hw: pointer to the HW structure
2028 *
2029 *  Assumes semaphore already acquired.
2030 *
2031 **/
2032static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2033{
2034        u16 phy_data;
2035        u32 strap = er32(STRAP);
2036        u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2037            E1000_STRAP_SMT_FREQ_SHIFT;
2038        s32 ret_val;
2039
2040        strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2041
2042        ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2043        if (ret_val)
2044                return ret_val;
2045
2046        phy_data &= ~HV_SMB_ADDR_MASK;
2047        phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2048        phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2049
2050        if (hw->phy.type == e1000_phy_i217) {
2051                /* Restore SMBus frequency */
2052                if (freq--) {
2053                        phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2054                        phy_data |= (freq & BIT(0)) <<
2055                            HV_SMB_ADDR_FREQ_LOW_SHIFT;
2056                        phy_data |= (freq & BIT(1)) <<
2057                            (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2058                } else {
2059                        e_dbg("Unsupported SMB frequency in PHY\n");
2060                }
2061        }
2062
2063        return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2064}
2065
2066/**
2067 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2068 *  @hw:   pointer to the HW structure
2069 *
2070 *  SW should configure the LCD from the NVM extended configuration region
2071 *  as a workaround for certain parts.
2072 **/
2073static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2074{
2075        struct e1000_phy_info *phy = &hw->phy;
2076        u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2077        s32 ret_val = 0;
2078        u16 word_addr, reg_data, reg_addr, phy_page = 0;
2079
2080        /* Initialize the PHY from the NVM on ICH platforms.  This
2081         * is needed due to an issue where the NVM configuration is
2082         * not properly autoloaded after power transitions.
2083         * Therefore, after each PHY reset, we will load the
2084         * configuration data out of the NVM manually.
2085         */
2086        switch (hw->mac.type) {
2087        case e1000_ich8lan:
2088                if (phy->type != e1000_phy_igp_3)
2089                        return ret_val;
2090
2091                if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
2092                    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
2093                        sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2094                        break;
2095                }
2096                /* Fall-thru */
2097        case e1000_pchlan:
2098        case e1000_pch2lan:
2099        case e1000_pch_lpt:
2100        case e1000_pch_spt:
2101        case e1000_pch_cnp:
2102                sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2103                break;
2104        default:
2105                return ret_val;
2106        }
2107
2108        ret_val = hw->phy.ops.acquire(hw);
2109        if (ret_val)
2110                return ret_val;
2111
2112        data = er32(FEXTNVM);
2113        if (!(data & sw_cfg_mask))
2114                goto release;
2115
2116        /* Make sure HW does not configure LCD from PHY
2117         * extended configuration before SW configuration
2118         */
2119        data = er32(EXTCNF_CTRL);
2120        if ((hw->mac.type < e1000_pch2lan) &&
2121            (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2122                goto release;
2123
2124        cnf_size = er32(EXTCNF_SIZE);
2125        cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2126        cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2127        if (!cnf_size)
2128                goto release;
2129
2130        cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2131        cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2132
2133        if (((hw->mac.type == e1000_pchlan) &&
2134             !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2135            (hw->mac.type > e1000_pchlan)) {
2136                /* HW configures the SMBus address and LEDs when the
2137                 * OEM and LCD Write Enable bits are set in the NVM.
2138                 * When both NVM bits are cleared, SW will configure
2139                 * them instead.
2140                 */
2141                ret_val = e1000_write_smbus_addr(hw);
2142                if (ret_val)
2143                        goto release;
2144
2145                data = er32(LEDCTL);
2146                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2147                                                        (u16)data);
2148                if (ret_val)
2149                        goto release;
2150        }
2151
2152        /* Configure LCD from extended configuration region. */
2153
2154        /* cnf_base_addr is in DWORD */
2155        word_addr = (u16)(cnf_base_addr << 1);
2156
2157        for (i = 0; i < cnf_size; i++) {
2158                ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
2159                if (ret_val)
2160                        goto release;
2161
2162                ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
2163                                         1, &reg_addr);
2164                if (ret_val)
2165                        goto release;
2166
2167                /* Save off the PHY page for future writes. */
2168                if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2169                        phy_page = reg_data;
2170                        continue;
2171                }
2172
2173                reg_addr &= PHY_REG_MASK;
2174                reg_addr |= phy_page;
2175
2176                ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2177                if (ret_val)
2178                        goto release;
2179        }
2180
2181release:
2182        hw->phy.ops.release(hw);
2183        return ret_val;
2184}
2185
2186/**
2187 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2188 *  @hw:   pointer to the HW structure
2189 *  @link: link up bool flag
2190 *
2191 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2192 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2193 *  If link is down, the function will restore the default K1 setting located
2194 *  in the NVM.
2195 **/
2196static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2197{
2198        s32 ret_val = 0;
2199        u16 status_reg = 0;
2200        bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2201
2202        if (hw->mac.type != e1000_pchlan)
2203                return 0;
2204
2205        /* Wrap the whole flow with the sw flag */
2206        ret_val = hw->phy.ops.acquire(hw);
2207        if (ret_val)
2208                return ret_val;
2209
2210        /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2211        if (link) {
2212                if (hw->phy.type == e1000_phy_82578) {
2213                        ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
2214                                                  &status_reg);
2215                        if (ret_val)
2216                                goto release;
2217
2218                        status_reg &= (BM_CS_STATUS_LINK_UP |
2219                                       BM_CS_STATUS_RESOLVED |
2220                                       BM_CS_STATUS_SPEED_MASK);
2221
2222                        if (status_reg == (BM_CS_STATUS_LINK_UP |
2223                                           BM_CS_STATUS_RESOLVED |
2224                                           BM_CS_STATUS_SPEED_1000))
2225                                k1_enable = false;
2226                }
2227
2228                if (hw->phy.type == e1000_phy_82577) {
2229                        ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2230                        if (ret_val)
2231                                goto release;
2232
2233                        status_reg &= (HV_M_STATUS_LINK_UP |
2234                                       HV_M_STATUS_AUTONEG_COMPLETE |
2235                                       HV_M_STATUS_SPEED_MASK);
2236
2237                        if (status_reg == (HV_M_STATUS_LINK_UP |
2238                                           HV_M_STATUS_AUTONEG_COMPLETE |
2239                                           HV_M_STATUS_SPEED_1000))
2240                                k1_enable = false;
2241                }
2242
2243                /* Link stall fix for link up */
2244                ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2245                if (ret_val)
2246                        goto release;
2247
2248        } else {
2249                /* Link stall fix for link down */
2250                ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2251                if (ret_val)
2252                        goto release;
2253        }
2254
2255        ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2256
2257release:
2258        hw->phy.ops.release(hw);
2259
2260        return ret_val;
2261}
2262
2263/**
2264 *  e1000_configure_k1_ich8lan - Configure K1 power state
2265 *  @hw: pointer to the HW structure
2266 *  @enable: K1 state to configure
2267 *
2268 *  Configure the K1 power state based on the provided parameter.
2269 *  Assumes semaphore already acquired.
2270 *
2271 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2272 **/
2273s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2274{
2275        s32 ret_val;
2276        u32 ctrl_reg = 0;
2277        u32 ctrl_ext = 0;
2278        u32 reg = 0;
2279        u16 kmrn_reg = 0;
2280
2281        ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2282                                              &kmrn_reg);
2283        if (ret_val)
2284                return ret_val;
2285
2286        if (k1_enable)
2287                kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2288        else
2289                kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2290
2291        ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2292                                               kmrn_reg);
2293        if (ret_val)
2294                return ret_val;
2295
2296        usleep_range(20, 40);
2297        ctrl_ext = er32(CTRL_EXT);
2298        ctrl_reg = er32(CTRL);
2299
2300        reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2301        reg |= E1000_CTRL_FRCSPD;
2302        ew32(CTRL, reg);
2303
2304        ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2305        e1e_flush();
2306        usleep_range(20, 40);
2307        ew32(CTRL, ctrl_reg);
2308        ew32(CTRL_EXT, ctrl_ext);
2309        e1e_flush();
2310        usleep_range(20, 40);
2311
2312        return 0;
2313}
2314
2315/**
2316 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2317 *  @hw:       pointer to the HW structure
2318 *  @d0_state: boolean if entering d0 or d3 device state
2319 *
2320 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2321 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2322 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2323 **/
2324static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2325{
2326        s32 ret_val = 0;
2327        u32 mac_reg;
2328        u16 oem_reg;
2329
2330        if (hw->mac.type < e1000_pchlan)
2331                return ret_val;
2332
2333        ret_val = hw->phy.ops.acquire(hw);
2334        if (ret_val)
2335                return ret_val;
2336
2337        if (hw->mac.type == e1000_pchlan) {
2338                mac_reg = er32(EXTCNF_CTRL);
2339                if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2340                        goto release;
2341        }
2342
2343        mac_reg = er32(FEXTNVM);
2344        if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2345                goto release;
2346
2347        mac_reg = er32(PHY_CTRL);
2348
2349        ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2350        if (ret_val)
2351                goto release;
2352
2353        oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2354
2355        if (d0_state) {
2356                if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2357                        oem_reg |= HV_OEM_BITS_GBE_DIS;
2358
2359                if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2360                        oem_reg |= HV_OEM_BITS_LPLU;
2361        } else {
2362                if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2363                               E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2364                        oem_reg |= HV_OEM_BITS_GBE_DIS;
2365
2366                if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2367                               E1000_PHY_CTRL_NOND0A_LPLU))
2368                        oem_reg |= HV_OEM_BITS_LPLU;
2369        }
2370
2371        /* Set Restart auto-neg to activate the bits */
2372        if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2373            !hw->phy.ops.check_reset_block(hw))
2374                oem_reg |= HV_OEM_BITS_RESTART_AN;
2375
2376        ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2377
2378release:
2379        hw->phy.ops.release(hw);
2380
2381        return ret_val;
2382}
2383
2384/**
2385 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2386 *  @hw:   pointer to the HW structure
2387 **/
2388static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2389{
2390        s32 ret_val;
2391        u16 data;
2392
2393        ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
2394        if (ret_val)
2395                return ret_val;
2396
2397        data |= HV_KMRN_MDIO_SLOW;
2398
2399        ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
2400
2401        return ret_val;
2402}
2403
2404/**
2405 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2406 *  done after every PHY reset.
2407 **/
2408static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2409{
2410        s32 ret_val = 0;
2411        u16 phy_data;
2412
2413        if (hw->mac.type != e1000_pchlan)
2414                return 0;
2415
2416        /* Set MDIO slow mode before any other MDIO access */
2417        if (hw->phy.type == e1000_phy_82577) {
2418                ret_val = e1000_set_mdio_slow_mode_hv(hw);
2419                if (ret_val)
2420                        return ret_val;
2421        }
2422
2423        if (((hw->phy.type == e1000_phy_82577) &&
2424             ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2425            ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2426                /* Disable generation of early preamble */
2427                ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
2428                if (ret_val)
2429                        return ret_val;
2430
2431                /* Preamble tuning for SSC */
2432                ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2433                if (ret_val)
2434                        return ret_val;
2435        }
2436
2437        if (hw->phy.type == e1000_phy_82578) {
2438                /* Return registers to default by doing a soft reset then
2439                 * writing 0x3140 to the control register.
2440                 */
2441                if (hw->phy.revision < 2) {
2442                        e1000e_phy_sw_reset(hw);
2443                        ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2444                        if (ret_val)
2445                                return ret_val;
2446                }
2447        }
2448
2449        /* Select page 0 */
2450        ret_val = hw->phy.ops.acquire(hw);
2451        if (ret_val)
2452                return ret_val;
2453
2454        hw->phy.addr = 1;
2455        ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2456        hw->phy.ops.release(hw);
2457        if (ret_val)
2458                return ret_val;
2459
2460        /* Configure the K1 Si workaround during phy reset assuming there is
2461         * link so that it disables K1 if link is in 1Gbps.
2462         */
2463        ret_val = e1000_k1_gig_workaround_hv(hw, true);
2464        if (ret_val)
2465                return ret_val;
2466
2467        /* Workaround for link disconnects on a busy hub in half duplex */
2468        ret_val = hw->phy.ops.acquire(hw);
2469        if (ret_val)
2470                return ret_val;
2471        ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2472        if (ret_val)
2473                goto release;
2474        ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2475        if (ret_val)
2476                goto release;
2477
2478        /* set MSE higher to enable link to stay up when noise is high */
2479        ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2480release:
2481        hw->phy.ops.release(hw);
2482
2483        return ret_val;
2484}
2485
2486/**
2487 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2488 *  @hw:   pointer to the HW structure
2489 **/
2490void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2491{
2492        u32 mac_reg;
2493        u16 i, phy_reg = 0;
2494        s32 ret_val;
2495
2496        ret_val = hw->phy.ops.acquire(hw);
2497        if (ret_val)
2498                return;
2499        ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2500        if (ret_val)
2501                goto release;
2502
2503        /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2504        for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2505                mac_reg = er32(RAL(i));
2506                hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2507                                           (u16)(mac_reg & 0xFFFF));
2508                hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2509                                           (u16)((mac_reg >> 16) & 0xFFFF));
2510
2511                mac_reg = er32(RAH(i));
2512                hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2513                                           (u16)(mac_reg & 0xFFFF));
2514                hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2515                                           (u16)((mac_reg & E1000_RAH_AV)
2516                                                 >> 16));
2517        }
2518
2519        e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2520
2521release:
2522        hw->phy.ops.release(hw);
2523}
2524
2525/**
2526 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2527 *  with 82579 PHY
2528 *  @hw: pointer to the HW structure
2529 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2530 **/
2531s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2532{
2533        s32 ret_val = 0;
2534        u16 phy_reg, data;
2535        u32 mac_reg;
2536        u16 i;
2537
2538        if (hw->mac.type < e1000_pch2lan)
2539                return 0;
2540
2541        /* disable Rx path while enabling/disabling workaround */
2542        e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
2543        ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
2544        if (ret_val)
2545                return ret_val;
2546
2547        if (enable) {
2548                /* Write Rx addresses (rar_entry_count for RAL/H, and
2549                 * SHRAL/H) and initial CRC values to the MAC
2550                 */
2551                for (i = 0; i < hw->mac.rar_entry_count; i++) {
2552                        u8 mac_addr[ETH_ALEN] = { 0 };
2553                        u32 addr_high, addr_low;
2554
2555                        addr_high = er32(RAH(i));
2556                        if (!(addr_high & E1000_RAH_AV))
2557                                continue;
2558                        addr_low = er32(RAL(i));
2559                        mac_addr[0] = (addr_low & 0xFF);
2560                        mac_addr[1] = ((addr_low >> 8) & 0xFF);
2561                        mac_addr[2] = ((addr_low >> 16) & 0xFF);
2562                        mac_addr[3] = ((addr_low >> 24) & 0xFF);
2563                        mac_addr[4] = (addr_high & 0xFF);
2564                        mac_addr[5] = ((addr_high >> 8) & 0xFF);
2565
2566                        ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2567                }
2568
2569                /* Write Rx addresses to the PHY */
2570                e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2571
2572                /* Enable jumbo frame workaround in the MAC */
2573                mac_reg = er32(FFLT_DBG);
2574                mac_reg &= ~BIT(14);
2575                mac_reg |= (7 << 15);
2576                ew32(FFLT_DBG, mac_reg);
2577
2578                mac_reg = er32(RCTL);
2579                mac_reg |= E1000_RCTL_SECRC;
2580                ew32(RCTL, mac_reg);
2581
2582                ret_val = e1000e_read_kmrn_reg(hw,
2583                                               E1000_KMRNCTRLSTA_CTRL_OFFSET,
2584                                               &data);
2585                if (ret_val)
2586                        return ret_val;
2587                ret_val = e1000e_write_kmrn_reg(hw,
2588                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
2589                                                data | BIT(0));
2590                if (ret_val)
2591                        return ret_val;
2592                ret_val = e1000e_read_kmrn_reg(hw,
2593                                               E1000_KMRNCTRLSTA_HD_CTRL,
2594                                               &data);
2595                if (ret_val)
2596                        return ret_val;
2597                data &= ~(0xF << 8);
2598                data |= (0xB << 8);
2599                ret_val = e1000e_write_kmrn_reg(hw,
2600                                                E1000_KMRNCTRLSTA_HD_CTRL,
2601                                                data);
2602                if (ret_val)
2603                        return ret_val;
2604
2605                /* Enable jumbo frame workaround in the PHY */
2606                e1e_rphy(hw, PHY_REG(769, 23), &data);
2607                data &= ~(0x7F << 5);
2608                data |= (0x37 << 5);
2609                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2610                if (ret_val)
2611                        return ret_val;
2612                e1e_rphy(hw, PHY_REG(769, 16), &data);
2613                data &= ~BIT(13);
2614                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2615                if (ret_val)
2616                        return ret_val;
2617                e1e_rphy(hw, PHY_REG(776, 20), &data);
2618                data &= ~(0x3FF << 2);
2619                data |= (E1000_TX_PTR_GAP << 2);
2620                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2621                if (ret_val)
2622                        return ret_val;
2623                ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2624                if (ret_val)
2625                        return ret_val;
2626                e1e_rphy(hw, HV_PM_CTRL, &data);
2627                ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
2628                if (ret_val)
2629                        return ret_val;
2630        } else {
2631                /* Write MAC register values back to h/w defaults */
2632                mac_reg = er32(FFLT_DBG);
2633                mac_reg &= ~(0xF << 14);
2634                ew32(FFLT_DBG, mac_reg);
2635
2636                mac_reg = er32(RCTL);
2637                mac_reg &= ~E1000_RCTL_SECRC;
2638                ew32(RCTL, mac_reg);
2639
2640                ret_val = e1000e_read_kmrn_reg(hw,
2641                                               E1000_KMRNCTRLSTA_CTRL_OFFSET,
2642                                               &data);
2643                if (ret_val)
2644                        return ret_val;
2645                ret_val = e1000e_write_kmrn_reg(hw,
2646                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
2647                                                data & ~BIT(0));
2648                if (ret_val)
2649                        return ret_val;
2650                ret_val = e1000e_read_kmrn_reg(hw,
2651                                               E1000_KMRNCTRLSTA_HD_CTRL,
2652                                               &data);
2653                if (ret_val)
2654                        return ret_val;
2655                data &= ~(0xF << 8);
2656                data |= (0xB << 8);
2657                ret_val = e1000e_write_kmrn_reg(hw,
2658                                                E1000_KMRNCTRLSTA_HD_CTRL,
2659                                                data);
2660                if (ret_val)
2661                        return ret_val;
2662
2663                /* Write PHY register values back to h/w defaults */
2664                e1e_rphy(hw, PHY_REG(769, 23), &data);
2665                data &= ~(0x7F << 5);
2666                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2667                if (ret_val)
2668                        return ret_val;
2669                e1e_rphy(hw, PHY_REG(769, 16), &data);
2670                data |= BIT(13);
2671                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2672                if (ret_val)
2673                        return ret_val;
2674                e1e_rphy(hw, PHY_REG(776, 20), &data);
2675                data &= ~(0x3FF << 2);
2676                data |= (0x8 << 2);
2677                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2678                if (ret_val)
2679                        return ret_val;
2680                ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
2681                if (ret_val)
2682                        return ret_val;
2683                e1e_rphy(hw, HV_PM_CTRL, &data);
2684                ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
2685                if (ret_val)
2686                        return ret_val;
2687        }
2688
2689        /* re-enable Rx path after enabling/disabling workaround */
2690        return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
2691}
2692
2693/**
2694 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2695 *  done after every PHY reset.
2696 **/
2697static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2698{
2699        s32 ret_val = 0;
2700
2701        if (hw->mac.type != e1000_pch2lan)
2702                return 0;
2703
2704        /* Set MDIO slow mode before any other MDIO access */
2705        ret_val = e1000_set_mdio_slow_mode_hv(hw);
2706        if (ret_val)
2707                return ret_val;
2708
2709        ret_val = hw->phy.ops.acquire(hw);
2710        if (ret_val)
2711                return ret_val;
2712        /* set MSE higher to enable link to stay up when noise is high */
2713        ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2714        if (ret_val)
2715                goto release;
2716        /* drop link after 5 times MSE threshold was reached */
2717        ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2718release:
2719        hw->phy.ops.release(hw);
2720
2721        return ret_val;
2722}
2723
2724/**
2725 *  e1000_k1_gig_workaround_lv - K1 Si workaround
2726 *  @hw:   pointer to the HW structure
2727 *
2728 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2729 *  Disable K1 in 1000Mbps and 100Mbps
2730 **/
2731static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2732{
2733        s32 ret_val = 0;
2734        u16 status_reg = 0;
2735
2736        if (hw->mac.type != e1000_pch2lan)
2737                return 0;
2738
2739        /* Set K1 beacon duration based on 10Mbs speed */
2740        ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2741        if (ret_val)
2742                return ret_val;
2743
2744        if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2745            == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2746                if (status_reg &
2747                    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2748                        u16 pm_phy_reg;
2749
2750                        /* LV 1G/100 Packet drop issue wa  */
2751                        ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2752                        if (ret_val)
2753                                return ret_val;
2754                        pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2755                        ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2756                        if (ret_val)
2757                                return ret_val;
2758                } else {
2759                        u32 mac_reg;
2760
2761                        mac_reg = er32(FEXTNVM4);
2762                        mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2763                        mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2764                        ew32(FEXTNVM4, mac_reg);
2765                }
2766        }
2767
2768        return ret_val;
2769}
2770
2771/**
2772 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2773 *  @hw:   pointer to the HW structure
2774 *  @gate: boolean set to true to gate, false to ungate
2775 *
2776 *  Gate/ungate the automatic PHY configuration via hardware; perform
2777 *  the configuration via software instead.
2778 **/
2779static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2780{
2781        u32 extcnf_ctrl;
2782
2783        if (hw->mac.type < e1000_pch2lan)
2784                return;
2785
2786        extcnf_ctrl = er32(EXTCNF_CTRL);
2787
2788        if (gate)
2789                extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2790        else
2791                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2792
2793        ew32(EXTCNF_CTRL, extcnf_ctrl);
2794}
2795
2796/**
2797 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2798 *  @hw: pointer to the HW structure
2799 *
2800 *  Check the appropriate indication the MAC has finished configuring the
2801 *  PHY after a software reset.
2802 **/
2803static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2804{
2805        u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2806
2807        /* Wait for basic configuration completes before proceeding */
2808        do {
2809                data = er32(STATUS);
2810                data &= E1000_STATUS_LAN_INIT_DONE;
2811                usleep_range(100, 200);
2812        } while ((!data) && --loop);
2813
2814        /* If basic configuration is incomplete before the above loop
2815         * count reaches 0, loading the configuration from NVM will
2816         * leave the PHY in a bad state possibly resulting in no link.
2817         */
2818        if (loop == 0)
2819                e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2820
2821        /* Clear the Init Done bit for the next init event */
2822        data = er32(STATUS);
2823        data &= ~E1000_STATUS_LAN_INIT_DONE;
2824        ew32(STATUS, data);
2825}
2826
2827/**
2828 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2829 *  @hw: pointer to the HW structure
2830 **/
2831static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2832{
2833        s32 ret_val = 0;
2834        u16 reg;
2835
2836        if (hw->phy.ops.check_reset_block(hw))
2837                return 0;
2838
2839        /* Allow time for h/w to get to quiescent state after reset */
2840        usleep_range(10000, 20000);
2841
2842        /* Perform any necessary post-reset workarounds */
2843        switch (hw->mac.type) {
2844        case e1000_pchlan:
2845                ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2846                if (ret_val)
2847                        return ret_val;
2848                break;
2849        case e1000_pch2lan:
2850                ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2851                if (ret_val)
2852                        return ret_val;
2853                break;
2854        default:
2855                break;
2856        }
2857
2858        /* Clear the host wakeup bit after lcd reset */
2859        if (hw->mac.type >= e1000_pchlan) {
2860                e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2861                reg &= ~BM_WUC_HOST_WU_BIT;
2862                e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2863        }
2864
2865        /* Configure the LCD with the extended configuration region in NVM */
2866        ret_val = e1000_sw_lcd_config_ich8lan(hw);
2867        if (ret_val)
2868                return ret_val;
2869
2870        /* Configure the LCD with the OEM bits in NVM */
2871        ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2872
2873        if (hw->mac.type == e1000_pch2lan) {
2874                /* Ungate automatic PHY configuration on non-managed 82579 */
2875                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2876                        usleep_range(10000, 20000);
2877                        e1000_gate_hw_phy_config_ich8lan(hw, false);
2878                }
2879
2880                /* Set EEE LPI Update Timer to 200usec */
2881                ret_val = hw->phy.ops.acquire(hw);
2882                if (ret_val)
2883                        return ret_val;
2884                ret_val = e1000_write_emi_reg_locked(hw,
2885                                                     I82579_LPI_UPDATE_TIMER,
2886                                                     0x1387);
2887                hw->phy.ops.release(hw);
2888        }
2889
2890        return ret_val;
2891}
2892
2893/**
2894 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2895 *  @hw: pointer to the HW structure
2896 *
2897 *  Resets the PHY
2898 *  This is a function pointer entry point called by drivers
2899 *  or other shared routines.
2900 **/
2901static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2902{
2903        s32 ret_val = 0;
2904
2905        /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2906        if ((hw->mac.type == e1000_pch2lan) &&
2907            !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2908                e1000_gate_hw_phy_config_ich8lan(hw, true);
2909
2910        ret_val = e1000e_phy_hw_reset_generic(hw);
2911        if (ret_val)
2912                return ret_val;
2913
2914        return e1000_post_phy_reset_ich8lan(hw);
2915}
2916
2917/**
2918 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2919 *  @hw: pointer to the HW structure
2920 *  @active: true to enable LPLU, false to disable
2921 *
2922 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2923 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2924 *  the phy speed. This function will manually set the LPLU bit and restart
2925 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2926 *  since it configures the same bit.
2927 **/
2928static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2929{
2930        s32 ret_val;
2931        u16 oem_reg;
2932
2933        ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2934        if (ret_val)
2935                return ret_val;
2936
2937        if (active)
2938                oem_reg |= HV_OEM_BITS_LPLU;
2939        else
2940                oem_reg &= ~HV_OEM_BITS_LPLU;
2941
2942        if (!hw->phy.ops.check_reset_block(hw))
2943                oem_reg |= HV_OEM_BITS_RESTART_AN;
2944
2945        return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2946}
2947
2948/**
2949 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2950 *  @hw: pointer to the HW structure
2951 *  @active: true to enable LPLU, false to disable
2952 *
2953 *  Sets the LPLU D0 state according to the active flag.  When
2954 *  activating LPLU this function also disables smart speed
2955 *  and vice versa.  LPLU will not be activated unless the
2956 *  device autonegotiation advertisement meets standards of
2957 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2958 *  This is a function pointer entry point only called by
2959 *  PHY setup routines.
2960 **/
2961static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2962{
2963        struct e1000_phy_info *phy = &hw->phy;
2964        u32 phy_ctrl;
2965        s32 ret_val = 0;
2966        u16 data;
2967
2968        if (phy->type == e1000_phy_ife)
2969                return 0;
2970
2971        phy_ctrl = er32(PHY_CTRL);
2972
2973        if (active) {
2974                phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2975                ew32(PHY_CTRL, phy_ctrl);
2976
2977                if (phy->type != e1000_phy_igp_3)
2978                        return 0;
2979
2980                /* Call gig speed drop workaround on LPLU before accessing
2981                 * any PHY registers
2982                 */
2983                if (hw->mac.type == e1000_ich8lan)
2984                        e1000e_gig_downshift_workaround_ich8lan(hw);
2985
2986                /* When LPLU is enabled, we should disable SmartSpeed */
2987                ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2988                if (ret_val)
2989                        return ret_val;
2990                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2991                ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2992                if (ret_val)
2993                        return ret_val;
2994        } else {
2995                phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2996                ew32(PHY_CTRL, phy_ctrl);
2997
2998                if (phy->type != e1000_phy_igp_3)
2999                        return 0;
3000
3001                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3002                 * during Dx states where the power conservation is most
3003                 * important.  During driver activity we should enable
3004                 * SmartSpeed, so performance is maintained.
3005                 */
3006                if (phy->smart_speed == e1000_smart_speed_on) {
3007                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3008                                           &data);
3009                        if (ret_val)
3010                                return ret_val;
3011
3012                        data |= IGP01E1000_PSCFR_SMART_SPEED;
3013                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3014                                           data);
3015                        if (ret_val)
3016                                return ret_val;
3017                } else if (phy->smart_speed == e1000_smart_speed_off) {
3018                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3019                                           &data);
3020                        if (ret_val)
3021                                return ret_val;
3022
3023                        data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3024                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3025                                           data);
3026                        if (ret_val)
3027                                return ret_val;
3028                }
3029        }
3030
3031        return 0;
3032}
3033
3034/**
3035 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3036 *  @hw: pointer to the HW structure
3037 *  @active: true to enable LPLU, false to disable
3038 *
3039 *  Sets the LPLU D3 state according to the active flag.  When
3040 *  activating LPLU this function also disables smart speed
3041 *  and vice versa.  LPLU will not be activated unless the
3042 *  device autonegotiation advertisement meets standards of
3043 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3044 *  This is a function pointer entry point only called by
3045 *  PHY setup routines.
3046 **/
3047static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3048{
3049        struct e1000_phy_info *phy = &hw->phy;
3050        u32 phy_ctrl;
3051        s32 ret_val = 0;
3052        u16 data;
3053
3054        phy_ctrl = er32(PHY_CTRL);
3055
3056        if (!active) {
3057                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3058                ew32(PHY_CTRL, phy_ctrl);
3059
3060                if (phy->type != e1000_phy_igp_3)
3061                        return 0;
3062
3063                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3064                 * during Dx states where the power conservation is most
3065                 * important.  During driver activity we should enable
3066                 * SmartSpeed, so performance is maintained.
3067                 */
3068                if (phy->smart_speed == e1000_smart_speed_on) {
3069                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3070                                           &data);
3071                        if (ret_val)
3072                                return ret_val;
3073
3074                        data |= IGP01E1000_PSCFR_SMART_SPEED;
3075                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3076                                           data);
3077                        if (ret_val)
3078                                return ret_val;
3079                } else if (phy->smart_speed == e1000_smart_speed_off) {
3080                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3081                                           &data);
3082                        if (ret_val)
3083                                return ret_val;
3084
3085                        data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3086                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3087                                           data);
3088                        if (ret_val)
3089                                return ret_val;
3090                }
3091        } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3092                   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3093                   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3094                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3095                ew32(PHY_CTRL, phy_ctrl);
3096
3097                if (phy->type != e1000_phy_igp_3)
3098                        return 0;
3099
3100                /* Call gig speed drop workaround on LPLU before accessing
3101                 * any PHY registers
3102                 */
3103                if (hw->mac.type == e1000_ich8lan)
3104                        e1000e_gig_downshift_workaround_ich8lan(hw);
3105
3106                /* When LPLU is enabled, we should disable SmartSpeed */
3107                ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3108                if (ret_val)
3109                        return ret_val;
3110
3111                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3112                ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3113        }
3114
3115        return ret_val;
3116}
3117
3118/**
3119 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3120 *  @hw: pointer to the HW structure
3121 *  @bank:  pointer to the variable that returns the active bank
3122 *
3123 *  Reads signature byte from the NVM using the flash access registers.
3124 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3125 **/
3126static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3127{
3128        u32 eecd;
3129        struct e1000_nvm_info *nvm = &hw->nvm;
3130        u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3131        u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3132        u32 nvm_dword = 0;
3133        u8 sig_byte = 0;
3134        s32 ret_val;
3135
3136        switch (hw->mac.type) {
3137        case e1000_pch_spt:
3138        case e1000_pch_cnp:
3139                bank1_offset = nvm->flash_bank_size;
3140                act_offset = E1000_ICH_NVM_SIG_WORD;
3141
3142                /* set bank to 0 in case flash read fails */
3143                *bank = 0;
3144
3145                /* Check bank 0 */
3146                ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3147                                                         &nvm_dword);
3148                if (ret_val)
3149                        return ret_val;
3150                sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3151                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3152                    E1000_ICH_NVM_SIG_VALUE) {
3153                        *bank = 0;
3154                        return 0;
3155                }
3156
3157                /* Check bank 1 */
3158                ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3159                                                         bank1_offset,
3160                                                         &nvm_dword);
3161                if (ret_val)
3162                        return ret_val;
3163                sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3164                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3165                    E1000_ICH_NVM_SIG_VALUE) {
3166                        *bank = 1;
3167                        return 0;
3168                }
3169
3170                e_dbg("ERROR: No valid NVM bank present\n");
3171                return -E1000_ERR_NVM;
3172        case e1000_ich8lan:
3173        case e1000_ich9lan:
3174                eecd = er32(EECD);
3175                if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3176                    E1000_EECD_SEC1VAL_VALID_MASK) {
3177                        if (eecd & E1000_EECD_SEC1VAL)
3178                                *bank = 1;
3179                        else
3180                                *bank = 0;
3181
3182                        return 0;
3183                }
3184                e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3185                /* fall-thru */
3186        default:
3187                /* set bank to 0 in case flash read fails */
3188                *bank = 0;
3189
3190                /* Check bank 0 */
3191                ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3192                                                        &sig_byte);
3193                if (ret_val)
3194                        return ret_val;
3195                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3196                    E1000_ICH_NVM_SIG_VALUE) {
3197                        *bank = 0;
3198                        return 0;
3199                }
3200
3201                /* Check bank 1 */
3202                ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3203                                                        bank1_offset,
3204                                                        &sig_byte);
3205                if (ret_val)
3206                        return ret_val;
3207                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3208                    E1000_ICH_NVM_SIG_VALUE) {
3209                        *bank = 1;
3210                        return 0;
3211                }
3212
3213                e_dbg("ERROR: No valid NVM bank present\n");
3214                return -E1000_ERR_NVM;
3215        }
3216}
3217
3218/**
3219 *  e1000_read_nvm_spt - NVM access for SPT
3220 *  @hw: pointer to the HW structure
3221 *  @offset: The offset (in bytes) of the word(s) to read.
3222 *  @words: Size of data to read in words.
3223 *  @data: pointer to the word(s) to read at offset.
3224 *
3225 *  Reads a word(s) from the NVM
3226 **/
3227static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3228                              u16 *data)
3229{
3230        struct e1000_nvm_info *nvm = &hw->nvm;
3231        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3232        u32 act_offset;
3233        s32 ret_val = 0;
3234        u32 bank = 0;
3235        u32 dword = 0;
3236        u16 offset_to_read;
3237        u16 i;
3238
3239        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3240            (words == 0)) {
3241                e_dbg("nvm parameter(s) out of bounds\n");
3242                ret_val = -E1000_ERR_NVM;
3243                goto out;
3244        }
3245
3246        nvm->ops.acquire(hw);
3247
3248        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3249        if (ret_val) {
3250                e_dbg("Could not detect valid bank, assuming bank 0\n");
3251                bank = 0;
3252        }
3253
3254        act_offset = (bank) ? nvm->flash_bank_size : 0;
3255        act_offset += offset;
3256
3257        ret_val = 0;
3258
3259        for (i = 0; i < words; i += 2) {
3260                if (words - i == 1) {
3261                        if (dev_spec->shadow_ram[offset + i].modified) {
3262                                data[i] =
3263                                    dev_spec->shadow_ram[offset + i].value;
3264                        } else {
3265                                offset_to_read = act_offset + i -
3266                                    ((act_offset + i) % 2);
3267                                ret_val =
3268                                  e1000_read_flash_dword_ich8lan(hw,
3269                                                                 offset_to_read,
3270                                                                 &dword);
3271                                if (ret_val)
3272                                        break;
3273                                if ((act_offset + i) % 2 == 0)
3274                                        data[i] = (u16)(dword & 0xFFFF);
3275                                else
3276                                        data[i] = (u16)((dword >> 16) & 0xFFFF);
3277                        }
3278                } else {
3279                        offset_to_read = act_offset + i;
3280                        if (!(dev_spec->shadow_ram[offset + i].modified) ||
3281                            !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3282                                ret_val =
3283                                  e1000_read_flash_dword_ich8lan(hw,
3284                                                                 offset_to_read,
3285                                                                 &dword);
3286                                if (ret_val)
3287                                        break;
3288                        }
3289                        if (dev_spec->shadow_ram[offset + i].modified)
3290                                data[i] =
3291                                    dev_spec->shadow_ram[offset + i].value;
3292                        else
3293                                data[i] = (u16)(dword & 0xFFFF);
3294                        if (dev_spec->shadow_ram[offset + i].modified)
3295                                data[i + 1] =
3296                                    dev_spec->shadow_ram[offset + i + 1].value;
3297                        else
3298                                data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3299                }
3300        }
3301
3302        nvm->ops.release(hw);
3303
3304out:
3305        if (ret_val)
3306                e_dbg("NVM read error: %d\n", ret_val);
3307
3308        return ret_val;
3309}
3310
3311/**
3312 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3313 *  @hw: pointer to the HW structure
3314 *  @offset: The offset (in bytes) of the word(s) to read.
3315 *  @words: Size of data to read in words
3316 *  @data: Pointer to the word(s) to read at offset.
3317 *
3318 *  Reads a word(s) from the NVM using the flash access registers.
3319 **/
3320static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3321                                  u16 *data)
3322{
3323        struct e1000_nvm_info *nvm = &hw->nvm;
3324        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3325        u32 act_offset;
3326        s32 ret_val = 0;
3327        u32 bank = 0;
3328        u16 i, word;
3329
3330        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3331            (words == 0)) {
3332                e_dbg("nvm parameter(s) out of bounds\n");
3333                ret_val = -E1000_ERR_NVM;
3334                goto out;
3335        }
3336
3337        nvm->ops.acquire(hw);
3338
3339        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3340        if (ret_val) {
3341                e_dbg("Could not detect valid bank, assuming bank 0\n");
3342                bank = 0;
3343        }
3344
3345        act_offset = (bank) ? nvm->flash_bank_size : 0;
3346        act_offset += offset;
3347
3348        ret_val = 0;
3349        for (i = 0; i < words; i++) {
3350                if (dev_spec->shadow_ram[offset + i].modified) {
3351                        data[i] = dev_spec->shadow_ram[offset + i].value;
3352                } else {
3353                        ret_val = e1000_read_flash_word_ich8lan(hw,
3354                                                                act_offset + i,
3355                                                                &word);
3356                        if (ret_val)
3357                                break;
3358                        data[i] = word;
3359                }
3360        }
3361
3362        nvm->ops.release(hw);
3363
3364out:
3365        if (ret_val)
3366                e_dbg("NVM read error: %d\n", ret_val);
3367
3368        return ret_val;
3369}
3370
3371/**
3372 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3373 *  @hw: pointer to the HW structure
3374 *
3375 *  This function does initial flash setup so that a new read/write/erase cycle
3376 *  can be started.
3377 **/
3378static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3379{
3380        union ich8_hws_flash_status hsfsts;
3381        s32 ret_val = -E1000_ERR_NVM;
3382
3383        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3384
3385        /* Check if the flash descriptor is valid */
3386        if (!hsfsts.hsf_status.fldesvalid) {
3387                e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
3388                return -E1000_ERR_NVM;
3389        }
3390
3391        /* Clear FCERR and DAEL in hw status by writing 1 */
3392        hsfsts.hsf_status.flcerr = 1;
3393        hsfsts.hsf_status.dael = 1;
3394        if (hw->mac.type >= e1000_pch_spt)
3395                ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3396        else
3397                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3398
3399        /* Either we should have a hardware SPI cycle in progress
3400         * bit to check against, in order to start a new cycle or
3401         * FDONE bit should be changed in the hardware so that it
3402         * is 1 after hardware reset, which can then be used as an
3403         * indication whether a cycle is in progress or has been
3404         * completed.
3405         */
3406
3407        if (!hsfsts.hsf_status.flcinprog) {
3408                /* There is no cycle running at present,
3409                 * so we can start a cycle.
3410                 * Begin by setting Flash Cycle Done.
3411                 */
3412                hsfsts.hsf_status.flcdone = 1;
3413                if (hw->mac.type >= e1000_pch_spt)
3414                        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3415                else
3416                        ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3417                ret_val = 0;
3418        } else {
3419                s32 i;
3420
3421                /* Otherwise poll for sometime so the current
3422                 * cycle has a chance to end before giving up.
3423                 */
3424                for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3425                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3426                        if (!hsfsts.hsf_status.flcinprog) {
3427                                ret_val = 0;
3428                                break;
3429                        }
3430                        udelay(1);
3431                }
3432                if (!ret_val) {
3433                        /* Successful in waiting for previous cycle to timeout,
3434                         * now set the Flash Cycle Done.
3435                         */
3436                        hsfsts.hsf_status.flcdone = 1;
3437                        if (hw->mac.type >= e1000_pch_spt)
3438                                ew32flash(ICH_FLASH_HSFSTS,
3439                                          hsfsts.regval & 0xFFFF);
3440                        else
3441                                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3442                } else {
3443                        e_dbg("Flash controller busy, cannot get access\n");
3444                }
3445        }
3446
3447        return ret_val;
3448}
3449
3450/**
3451 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3452 *  @hw: pointer to the HW structure
3453 *  @timeout: maximum time to wait for completion
3454 *
3455 *  This function starts a flash cycle and waits for its completion.
3456 **/
3457static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3458{
3459        union ich8_hws_flash_ctrl hsflctl;
3460        union ich8_hws_flash_status hsfsts;
3461        u32 i = 0;
3462
3463        /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3464        if (hw->mac.type >= e1000_pch_spt)
3465                hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3466        else
3467                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3468        hsflctl.hsf_ctrl.flcgo = 1;
3469
3470        if (hw->mac.type >= e1000_pch_spt)
3471                ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3472        else
3473                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3474
3475        /* wait till FDONE bit is set to 1 */
3476        do {
3477                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3478                if (hsfsts.hsf_status.flcdone)
3479                        break;
3480                udelay(1);
3481        } while (i++ < timeout);
3482
3483        if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3484                return 0;
3485
3486        return -E1000_ERR_NVM;
3487}
3488
3489/**
3490 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3491 *  @hw: pointer to the HW structure
3492 *  @offset: offset to data location
3493 *  @data: pointer to the location for storing the data
3494 *
3495 *  Reads the flash dword at offset into data.  Offset is converted
3496 *  to bytes before read.
3497 **/
3498static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3499                                          u32 *data)
3500{
3501        /* Must convert word offset into bytes. */
3502        offset <<= 1;
3503        return e1000_read_flash_data32_ich8lan(hw, offset, data);
3504}
3505
3506/**
3507 *  e1000_read_flash_word_ich8lan - Read word from flash
3508 *  @hw: pointer to the HW structure
3509 *  @offset: offset to data location
3510 *  @data: pointer to the location for storing the data
3511 *
3512 *  Reads the flash word at offset into data.  Offset is converted
3513 *  to bytes before read.
3514 **/
3515static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3516                                         u16 *data)
3517{
3518        /* Must convert offset into bytes. */
3519        offset <<= 1;
3520
3521        return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3522}
3523
3524/**
3525 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3526 *  @hw: pointer to the HW structure
3527 *  @offset: The offset of the byte to read.
3528 *  @data: Pointer to a byte to store the value read.
3529 *
3530 *  Reads a single byte from the NVM using the flash access registers.
3531 **/
3532static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3533                                         u8 *data)
3534{
3535        s32 ret_val;
3536        u16 word = 0;
3537
3538        /* In SPT, only 32 bits access is supported,
3539         * so this function should not be called.
3540         */
3541        if (hw->mac.type >= e1000_pch_spt)
3542                return -E1000_ERR_NVM;
3543        else
3544                ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3545
3546        if (ret_val)
3547                return ret_val;
3548
3549        *data = (u8)word;
3550
3551        return 0;
3552}
3553
3554/**
3555 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3556 *  @hw: pointer to the HW structure
3557 *  @offset: The offset (in bytes) of the byte or word to read.
3558 *  @size: Size of data to read, 1=byte 2=word
3559 *  @data: Pointer to the word to store the value read.
3560 *
3561 *  Reads a byte or word from the NVM using the flash access registers.
3562 **/
3563static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3564                                         u8 size, u16 *data)
3565{
3566        union ich8_hws_flash_status hsfsts;
3567        union ich8_hws_flash_ctrl hsflctl;
3568        u32 flash_linear_addr;
3569        u32 flash_data = 0;
3570        s32 ret_val = -E1000_ERR_NVM;
3571        u8 count = 0;
3572
3573        if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3574                return -E1000_ERR_NVM;
3575
3576        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3577                             hw->nvm.flash_base_addr);
3578
3579        do {
3580                udelay(1);
3581                /* Steps */
3582                ret_val = e1000_flash_cycle_init_ich8lan(hw);
3583                if (ret_val)
3584                        break;
3585
3586                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3587                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3588                hsflctl.hsf_ctrl.fldbcount = size - 1;
3589                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3590                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3591
3592                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3593
3594                ret_val =
3595                    e1000_flash_cycle_ich8lan(hw,
3596                                              ICH_FLASH_READ_COMMAND_TIMEOUT);
3597
3598                /* Check if FCERR is set to 1, if set to 1, clear it
3599                 * and try the whole sequence a few more times, else
3600                 * read in (shift in) the Flash Data0, the order is
3601                 * least significant byte first msb to lsb
3602                 */
3603                if (!ret_val) {
3604                        flash_data = er32flash(ICH_FLASH_FDATA0);
3605                        if (size == 1)
3606                                *data = (u8)(flash_data & 0x000000FF);
3607                        else if (size == 2)
3608                                *data = (u16)(flash_data & 0x0000FFFF);
3609                        break;
3610                } else {
3611                        /* If we've gotten here, then things are probably
3612                         * completely hosed, but if the error condition is
3613                         * detected, it won't hurt to give it another try...
3614                         * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3615                         */
3616                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3617                        if (hsfsts.hsf_status.flcerr) {
3618                                /* Repeat for some time before giving up. */
3619                                continue;
3620                        } else if (!hsfsts.hsf_status.flcdone) {
3621                                e_dbg("Timeout error - flash cycle did not complete.\n");
3622                                break;
3623                        }
3624                }
3625        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3626
3627        return ret_val;
3628}
3629
3630/**
3631 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3632 *  @hw: pointer to the HW structure
3633 *  @offset: The offset (in bytes) of the dword to read.
3634 *  @data: Pointer to the dword to store the value read.
3635 *
3636 *  Reads a byte or word from the NVM using the flash access registers.
3637 **/
3638
3639static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3640                                           u32 *data)
3641{
3642        union ich8_hws_flash_status hsfsts;
3643        union ich8_hws_flash_ctrl hsflctl;
3644        u32 flash_linear_addr;
3645        s32 ret_val = -E1000_ERR_NVM;
3646        u8 count = 0;
3647
3648        if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
3649                return -E1000_ERR_NVM;
3650        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3651                             hw->nvm.flash_base_addr);
3652
3653        do {
3654                udelay(1);
3655                /* Steps */
3656                ret_val = e1000_flash_cycle_init_ich8lan(hw);
3657                if (ret_val)
3658                        break;
3659                /* In SPT, This register is in Lan memory space, not flash.
3660                 * Therefore, only 32 bit access is supported
3661                 */
3662                hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3663
3664                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3665                hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3666                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3667                /* In SPT, This register is in Lan memory space, not flash.
3668                 * Therefore, only 32 bit access is supported
3669                 */
3670                ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3671                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3672
3673                ret_val =
3674                   e1000_flash_cycle_ich8lan(hw,
3675                                             ICH_FLASH_READ_COMMAND_TIMEOUT);
3676
3677                /* Check if FCERR is set to 1, if set to 1, clear it
3678                 * and try the whole sequence a few more times, else
3679                 * read in (shift in) the Flash Data0, the order is
3680                 * least significant byte first msb to lsb
3681                 */
3682                if (!ret_val) {
3683                        *data = er32flash(ICH_FLASH_FDATA0);
3684                        break;
3685                } else {
3686                        /* If we've gotten here, then things are probably
3687                         * completely hosed, but if the error condition is
3688                         * detected, it won't hurt to give it another try...
3689                         * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3690                         */
3691                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3692                        if (hsfsts.hsf_status.flcerr) {
3693                                /* Repeat for some time before giving up. */
3694                                continue;
3695                        } else if (!hsfsts.hsf_status.flcdone) {
3696                                e_dbg("Timeout error - flash cycle did not complete.\n");
3697                                break;
3698                        }
3699                }
3700        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3701
3702        return ret_val;
3703}
3704
3705/**
3706 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3707 *  @hw: pointer to the HW structure
3708 *  @offset: The offset (in bytes) of the word(s) to write.
3709 *  @words: Size of data to write in words
3710 *  @data: Pointer to the word(s) to write at offset.
3711 *
3712 *  Writes a byte or word to the NVM using the flash access registers.
3713 **/
3714static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3715                                   u16 *data)
3716{
3717        struct e1000_nvm_info *nvm = &hw->nvm;
3718        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3719        u16 i;
3720
3721        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3722            (words == 0)) {
3723                e_dbg("nvm parameter(s) out of bounds\n");
3724                return -E1000_ERR_NVM;
3725        }
3726
3727        nvm->ops.acquire(hw);
3728
3729        for (i = 0; i < words; i++) {
3730                dev_spec->shadow_ram[offset + i].modified = true;
3731                dev_spec->shadow_ram[offset + i].value = data[i];
3732        }
3733
3734        nvm->ops.release(hw);
3735
3736        return 0;
3737}
3738
3739/**
3740 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3741 *  @hw: pointer to the HW structure
3742 *
3743 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3744 *  which writes the checksum to the shadow ram.  The changes in the shadow
3745 *  ram are then committed to the EEPROM by processing each bank at a time
3746 *  checking for the modified bit and writing only the pending changes.
3747 *  After a successful commit, the shadow ram is cleared and is ready for
3748 *  future writes.
3749 **/
3750static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3751{
3752        struct e1000_nvm_info *nvm = &hw->nvm;
3753        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3754        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3755        s32 ret_val;
3756        u32 dword = 0;
3757
3758        ret_val = e1000e_update_nvm_checksum_generic(hw);
3759        if (ret_val)
3760                goto out;
3761
3762        if (nvm->type != e1000_nvm_flash_sw)
3763                goto out;
3764
3765        nvm->ops.acquire(hw);
3766
3767        /* We're writing to the opposite bank so if we're on bank 1,
3768         * write to bank 0 etc.  We also need to erase the segment that
3769         * is going to be written
3770         */
3771        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3772        if (ret_val) {
3773                e_dbg("Could not detect valid bank, assuming bank 0\n");
3774                bank = 0;
3775        }
3776
3777        if (bank == 0) {
3778                new_bank_offset = nvm->flash_bank_size;
3779                old_bank_offset = 0;
3780                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3781                if (ret_val)
3782                        goto release;
3783        } else {
3784                old_bank_offset = nvm->flash_bank_size;
3785                new_bank_offset = 0;
3786                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3787                if (ret_val)
3788                        goto release;
3789        }
3790        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3791                /* Determine whether to write the value stored
3792                 * in the other NVM bank or a modified value stored
3793                 * in the shadow RAM
3794                 */
3795                ret_val = e1000_read_flash_dword_ich8lan(hw,
3796                                                         i + old_bank_offset,
3797                                                         &dword);
3798
3799                if (dev_spec->shadow_ram[i].modified) {
3800                        dword &= 0xffff0000;
3801                        dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3802                }
3803                if (dev_spec->shadow_ram[i + 1].modified) {
3804                        dword &= 0x0000ffff;
3805                        dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3806                                  << 16);
3807                }
3808                if (ret_val)
3809                        break;
3810
3811                /* If the word is 0x13, then make sure the signature bits
3812                 * (15:14) are 11b until the commit has completed.
3813                 * This will allow us to write 10b which indicates the
3814                 * signature is valid.  We want to do this after the write
3815                 * has completed so that we don't mark the segment valid
3816                 * while the write is still in progress
3817                 */
3818                if (i == E1000_ICH_NVM_SIG_WORD - 1)
3819                        dword |= E1000_ICH_NVM_SIG_MASK << 16;
3820
3821                /* Convert offset to bytes. */
3822                act_offset = (i + new_bank_offset) << 1;
3823
3824                usleep_range(100, 200);
3825
3826                /* Write the data to the new bank. Offset in words */
3827                act_offset = i + new_bank_offset;
3828                ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3829                                                                dword);
3830                if (ret_val)
3831                        break;
3832        }
3833
3834        /* Don't bother writing the segment valid bits if sector
3835         * programming failed.
3836         */
3837        if (ret_val) {
3838                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3839                e_dbg("Flash commit failed.\n");
3840                goto release;
3841        }
3842
3843        /* Finally validate the new segment by setting bit 15:14
3844         * to 10b in word 0x13 , this can be done without an
3845         * erase as well since these bits are 11 to start with
3846         * and we need to change bit 14 to 0b
3847         */
3848        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3849
3850        /*offset in words but we read dword */
3851        --act_offset;
3852        ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3853
3854        if (ret_val)
3855                goto release;
3856
3857        dword &= 0xBFFFFFFF;
3858        ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3859
3860        if (ret_val)
3861                goto release;
3862
3863        /* And invalidate the previously valid segment by setting
3864         * its signature word (0x13) high_byte to 0b. This can be
3865         * done without an erase because flash erase sets all bits
3866         * to 1's. We can write 1's to 0's without an erase
3867         */
3868        act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3869
3870        /* offset in words but we read dword */
3871        act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3872        ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3873
3874        if (ret_val)
3875                goto release;
3876
3877        dword &= 0x00FFFFFF;
3878        ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3879
3880        if (ret_val)
3881                goto release;
3882
3883        /* Great!  Everything worked, we can now clear the cached entries. */
3884        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3885                dev_spec->shadow_ram[i].modified = false;
3886                dev_spec->shadow_ram[i].value = 0xFFFF;
3887        }
3888
3889release:
3890        nvm->ops.release(hw);
3891
3892        /* Reload the EEPROM, or else modifications will not appear
3893         * until after the next adapter reset.
3894         */
3895        if (!ret_val) {
3896                nvm->ops.reload(hw);
3897                usleep_range(10000, 20000);
3898        }
3899
3900out:
3901        if (ret_val)
3902                e_dbg("NVM update error: %d\n", ret_val);
3903
3904        return ret_val;
3905}
3906
3907/**
3908 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3909 *  @hw: pointer to the HW structure
3910 *
3911 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3912 *  which writes the checksum to the shadow ram.  The changes in the shadow
3913 *  ram are then committed to the EEPROM by processing each bank at a time
3914 *  checking for the modified bit and writing only the pending changes.
3915 *  After a successful commit, the shadow ram is cleared and is ready for
3916 *  future writes.
3917 **/
3918static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3919{
3920        struct e1000_nvm_info *nvm = &hw->nvm;
3921        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3922        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3923        s32 ret_val;
3924        u16 data = 0;
3925
3926        ret_val = e1000e_update_nvm_checksum_generic(hw);
3927        if (ret_val)
3928                goto out;
3929
3930        if (nvm->type != e1000_nvm_flash_sw)
3931                goto out;
3932
3933        nvm->ops.acquire(hw);
3934
3935        /* We're writing to the opposite bank so if we're on bank 1,
3936         * write to bank 0 etc.  We also need to erase the segment that
3937         * is going to be written
3938         */
3939        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3940        if (ret_val) {
3941                e_dbg("Could not detect valid bank, assuming bank 0\n");
3942                bank = 0;
3943        }
3944
3945        if (bank == 0) {
3946                new_bank_offset = nvm->flash_bank_size;
3947                old_bank_offset = 0;
3948                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3949                if (ret_val)
3950                        goto release;
3951        } else {
3952                old_bank_offset = nvm->flash_bank_size;
3953                new_bank_offset = 0;
3954                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3955                if (ret_val)
3956                        goto release;
3957        }
3958        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3959                if (dev_spec->shadow_ram[i].modified) {
3960                        data = dev_spec->shadow_ram[i].value;
3961                } else {
3962                        ret_val = e1000_read_flash_word_ich8lan(hw, i +
3963                                                                old_bank_offset,
3964                                                                &data);
3965                        if (ret_val)
3966                                break;
3967                }
3968
3969                /* If the word is 0x13, then make sure the signature bits
3970                 * (15:14) are 11b until the commit has completed.
3971                 * This will allow us to write 10b which indicates the
3972                 * signature is valid.  We want to do this after the write
3973                 * has completed so that we don't mark the segment valid
3974                 * while the write is still in progress
3975                 */
3976                if (i == E1000_ICH_NVM_SIG_WORD)
3977                        data |= E1000_ICH_NVM_SIG_MASK;
3978
3979                /* Convert offset to bytes. */
3980                act_offset = (i + new_bank_offset) << 1;
3981
3982                usleep_range(100, 200);
3983                /* Write the bytes to the new bank. */
3984                ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3985                                                               act_offset,
3986                                                               (u8)data);
3987                if (ret_val)
3988                        break;
3989
3990                usleep_range(100, 200);
3991                ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3992                                                               act_offset + 1,
3993                                                               (u8)(data >> 8));
3994                if (ret_val)
3995                        break;
3996        }
3997
3998        /* Don't bother writing the segment valid bits if sector
3999         * programming failed.
4000         */
4001        if (ret_val) {
4002                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
4003                e_dbg("Flash commit failed.\n");
4004                goto release;
4005        }
4006
4007        /* Finally validate the new segment by setting bit 15:14
4008         * to 10b in word 0x13 , this can be done without an
4009         * erase as well since these bits are 11 to start with
4010         * and we need to change bit 14 to 0b
4011         */
4012        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4013        ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4014        if (ret_val)
4015                goto release;
4016
4017        data &= 0xBFFF;
4018        ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4019                                                       act_offset * 2 + 1,
4020                                                       (u8)(data >> 8));
4021        if (ret_val)
4022                goto release;
4023
4024        /* And invalidate the previously valid segment by setting
4025         * its signature word (0x13) high_byte to 0b. This can be
4026         * done without an erase because flash erase sets all bits
4027         * to 1's. We can write 1's to 0's without an erase
4028         */
4029        act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4030        ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4031        if (ret_val)
4032                goto release;
4033
4034        /* Great!  Everything worked, we can now clear the cached entries. */
4035        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
4036                dev_spec->shadow_ram[i].modified = false;
4037                dev_spec->shadow_ram[i].value = 0xFFFF;
4038        }
4039
4040release:
4041        nvm->ops.release(hw);
4042
4043        /* Reload the EEPROM, or else modifications will not appear
4044         * until after the next adapter reset.
4045         */
4046        if (!ret_val) {
4047                nvm->ops.reload(hw);
4048                usleep_range(10000, 20000);
4049        }
4050
4051out:
4052        if (ret_val)
4053                e_dbg("NVM update error: %d\n", ret_val);
4054
4055        return ret_val;
4056}
4057
4058/**
4059 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4060 *  @hw: pointer to the HW structure
4061 *
4062 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4063 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4064 *  calculated, in which case we need to calculate the checksum and set bit 6.
4065 **/
4066static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4067{
4068        s32 ret_val;
4069        u16 data;
4070        u16 word;
4071        u16 valid_csum_mask;
4072
4073        /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4074         * the checksum needs to be fixed.  This bit is an indication that
4075         * the NVM was prepared by OEM software and did not calculate
4076         * the checksum...a likely scenario.
4077         */
4078        switch (hw->mac.type) {
4079        case e1000_pch_lpt:
4080        case e1000_pch_spt:
4081        case e1000_pch_cnp:
4082                word = NVM_COMPAT;
4083                valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4084                break;
4085        default:
4086                word = NVM_FUTURE_INIT_WORD1;
4087                valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4088                break;
4089        }
4090
4091        ret_val = e1000_read_nvm(hw, word, 1, &data);
4092        if (ret_val)
4093                return ret_val;
4094
4095        if (!(data & valid_csum_mask)) {
4096                data |= valid_csum_mask;
4097                ret_val = e1000_write_nvm(hw, word, 1, &data);
4098                if (ret_val)
4099                        return ret_val;
4100                ret_val = e1000e_update_nvm_checksum(hw);
4101                if (ret_val)
4102                        return ret_val;
4103        }
4104
4105        return e1000e_validate_nvm_checksum_generic(hw);
4106}
4107
4108/**
4109 *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
4110 *  @hw: pointer to the HW structure
4111 *
4112 *  To prevent malicious write/erase of the NVM, set it to be read-only
4113 *  so that the hardware ignores all write/erase cycles of the NVM via
4114 *  the flash control registers.  The shadow-ram copy of the NVM will
4115 *  still be updated, however any updates to this copy will not stick
4116 *  across driver reloads.
4117 **/
4118void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
4119{
4120        struct e1000_nvm_info *nvm = &hw->nvm;
4121        union ich8_flash_protected_range pr0;
4122        union ich8_hws_flash_status hsfsts;
4123        u32 gfpreg;
4124
4125        nvm->ops.acquire(hw);
4126
4127        gfpreg = er32flash(ICH_FLASH_GFPREG);
4128
4129        /* Write-protect GbE Sector of NVM */
4130        pr0.regval = er32flash(ICH_FLASH_PR0);
4131        pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
4132        pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
4133        pr0.range.wpe = true;
4134        ew32flash(ICH_FLASH_PR0, pr0.regval);
4135
4136        /* Lock down a subset of GbE Flash Control Registers, e.g.
4137         * PR0 to prevent the write-protection from being lifted.
4138         * Once FLOCKDN is set, the registers protected by it cannot
4139         * be written until FLOCKDN is cleared by a hardware reset.
4140         */
4141        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4142        hsfsts.hsf_status.flockdn = true;
4143        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
4144
4145        nvm->ops.release(hw);
4146}
4147
4148/**
4149 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4150 *  @hw: pointer to the HW structure
4151 *  @offset: The offset (in bytes) of the byte/word to read.
4152 *  @size: Size of data to read, 1=byte 2=word
4153 *  @data: The byte(s) to write to the NVM.
4154 *
4155 *  Writes one/two bytes to the NVM using the flash access registers.
4156 **/
4157static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4158                                          u8 size, u16 data)
4159{
4160        union ich8_hws_flash_status hsfsts;
4161        union ich8_hws_flash_ctrl hsflctl;
4162        u32 flash_linear_addr;
4163        u32 flash_data = 0;
4164        s32 ret_val;
4165        u8 count = 0;
4166
4167        if (hw->mac.type >= e1000_pch_spt) {
4168                if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4169                        return -E1000_ERR_NVM;
4170        } else {
4171                if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4172                        return -E1000_ERR_NVM;
4173        }
4174
4175        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4176                             hw->nvm.flash_base_addr);
4177
4178        do {
4179                udelay(1);
4180                /* Steps */
4181                ret_val = e1000_flash_cycle_init_ich8lan(hw);
4182                if (ret_val)
4183                        break;
4184                /* In SPT, This register is in Lan memory space, not
4185                 * flash.  Therefore, only 32 bit access is supported
4186                 */
4187                if (hw->mac.type >= e1000_pch_spt)
4188                        hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4189                else
4190                        hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4191
4192                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4193                hsflctl.hsf_ctrl.fldbcount = size - 1;
4194                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4195                /* In SPT, This register is in Lan memory space,
4196                 * not flash.  Therefore, only 32 bit access is
4197                 * supported
4198                 */
4199                if (hw->mac.type >= e1000_pch_spt)
4200                        ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4201                else
4202                        ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4203
4204                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4205
4206                if (size == 1)
4207                        flash_data = (u32)data & 0x00FF;
4208                else
4209                        flash_data = (u32)data;
4210
4211                ew32flash(ICH_FLASH_FDATA0, flash_data);
4212
4213                /* check if FCERR is set to 1 , if set to 1, clear it
4214                 * and try the whole sequence a few more times else done
4215                 */
4216                ret_val =
4217                    e1000_flash_cycle_ich8lan(hw,
4218                                              ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4219                if (!ret_val)
4220                        break;
4221
4222                /* If we're here, then things are most likely
4223                 * completely hosed, but if the error condition
4224                 * is detected, it won't hurt to give it another
4225                 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4226                 */
4227                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4228                if (hsfsts.hsf_status.flcerr)
4229                        /* Repeat for some time before giving up. */
4230                        continue;
4231                if (!hsfsts.hsf_status.flcdone) {
4232                        e_dbg("Timeout error - flash cycle did not complete.\n");
4233                        break;
4234                }
4235        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4236
4237        return ret_val;
4238}
4239
4240/**
4241*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4242*  @hw: pointer to the HW structure
4243*  @offset: The offset (in bytes) of the dwords to read.
4244*  @data: The 4 bytes to write to the NVM.
4245*
4246*  Writes one/two/four bytes to the NVM using the flash access registers.
4247**/
4248static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4249                                            u32 data)
4250{
4251        union ich8_hws_flash_status hsfsts;
4252        union ich8_hws_flash_ctrl hsflctl;
4253        u32 flash_linear_addr;
4254        s32 ret_val;
4255        u8 count = 0;
4256
4257        if (hw->mac.type >= e1000_pch_spt) {
4258                if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4259                        return -E1000_ERR_NVM;
4260        }
4261        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4262                             hw->nvm.flash_base_addr);
4263        do {
4264                udelay(1);
4265                /* Steps */
4266                ret_val = e1000_flash_cycle_init_ich8lan(hw);
4267                if (ret_val)
4268                        break;
4269
4270                /* In SPT, This register is in Lan memory space, not
4271                 * flash.  Therefore, only 32 bit access is supported
4272                 */
4273                if (hw->mac.type >= e1000_pch_spt)
4274                        hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4275                            >> 16;
4276                else
4277                        hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4278
4279                hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4280                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4281
4282                /* In SPT, This register is in Lan memory space,
4283                 * not flash.  Therefore, only 32 bit access is
4284                 * supported
4285                 */
4286                if (hw->mac.type >= e1000_pch_spt)
4287                        ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4288                else
4289                        ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4290
4291                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4292
4293                ew32flash(ICH_FLASH_FDATA0, data);
4294
4295                /* check if FCERR is set to 1 , if set to 1, clear it
4296                 * and try the whole sequence a few more times else done
4297                 */
4298                ret_val =
4299                   e1000_flash_cycle_ich8lan(hw,
4300                                             ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4301
4302                if (!ret_val)
4303                        break;
4304
4305                /* If we're here, then things are most likely
4306                 * completely hosed, but if the error condition
4307                 * is detected, it won't hurt to give it another
4308                 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4309                 */
4310                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4311
4312                if (hsfsts.hsf_status.flcerr)
4313                        /* Repeat for some time before giving up. */
4314                        continue;
4315                if (!hsfsts.hsf_status.flcdone) {
4316                        e_dbg("Timeout error - flash cycle did not complete.\n");
4317                        break;
4318                }
4319        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4320
4321        return ret_val;
4322}
4323
4324/**
4325 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4326 *  @hw: pointer to the HW structure
4327 *  @offset: The index of the byte to read.
4328 *  @data: The byte to write to the NVM.
4329 *
4330 *  Writes a single byte to the NVM using the flash access registers.
4331 **/
4332static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4333                                          u8 data)
4334{
4335        u16 word = (u16)data;
4336
4337        return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4338}
4339
4340/**
4341*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4342*  @hw: pointer to the HW structure
4343*  @offset: The offset of the word to write.
4344*  @dword: The dword to write to the NVM.
4345*
4346*  Writes a single dword to the NVM using the flash access registers.
4347*  Goes through a retry algorithm before giving up.
4348**/
4349static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4350                                                 u32 offset, u32 dword)
4351{
4352        s32 ret_val;
4353        u16 program_retries;
4354
4355        /* Must convert word offset into bytes. */
4356        offset <<= 1;
4357        ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4358
4359        if (!ret_val)
4360                return ret_val;
4361        for (program_retries = 0; program_retries < 100; program_retries++) {
4362                e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4363                usleep_range(100, 200);
4364                ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4365                if (!ret_val)
4366                        break;
4367        }
4368        if (program_retries == 100)
4369                return -E1000_ERR_NVM;
4370
4371        return 0;
4372}
4373
4374/**
4375 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4376 *  @hw: pointer to the HW structure
4377 *  @offset: The offset of the byte to write.
4378 *  @byte: The byte to write to the NVM.
4379 *
4380 *  Writes a single byte to the NVM using the flash access registers.
4381 *  Goes through a retry algorithm before giving up.
4382 **/
4383static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4384                                                u32 offset, u8 byte)
4385{
4386        s32 ret_val;
4387        u16 program_retries;
4388
4389        ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4390        if (!ret_val)
4391                return ret_val;
4392
4393        for (program_retries = 0; program_retries < 100; program_retries++) {
4394                e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
4395                usleep_range(100, 200);
4396                ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4397                if (!ret_val)
4398                        break;
4399        }
4400        if (program_retries == 100)
4401                return -E1000_ERR_NVM;
4402
4403        return 0;
4404}
4405
4406/**
4407 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4408 *  @hw: pointer to the HW structure
4409 *  @bank: 0 for first bank, 1 for second bank, etc.
4410 *
4411 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4412 *  bank N is 4096 * N + flash_reg_addr.
4413 **/
4414static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4415{
4416        struct e1000_nvm_info *nvm = &hw->nvm;
4417        union ich8_hws_flash_status hsfsts;
4418        union ich8_hws_flash_ctrl hsflctl;
4419        u32 flash_linear_addr;
4420        /* bank size is in 16bit words - adjust to bytes */
4421        u32 flash_bank_size = nvm->flash_bank_size * 2;
4422        s32 ret_val;
4423        s32 count = 0;
4424        s32 j, iteration, sector_size;
4425
4426        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4427
4428        /* Determine HW Sector size: Read BERASE bits of hw flash status
4429         * register
4430         * 00: The Hw sector is 256 bytes, hence we need to erase 16
4431         *     consecutive sectors.  The start index for the nth Hw sector
4432         *     can be calculated as = bank * 4096 + n * 256
4433         * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4434         *     The start index for the nth Hw sector can be calculated
4435         *     as = bank * 4096
4436         * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4437         *     (ich9 only, otherwise error condition)
4438         * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4439         */
4440        switch (hsfsts.hsf_status.berasesz) {
4441        case 0:
4442                /* Hw sector size 256 */
4443                sector_size = ICH_FLASH_SEG_SIZE_256;
4444                iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4445                break;
4446        case 1:
4447                sector_size = ICH_FLASH_SEG_SIZE_4K;
4448                iteration = 1;
4449                break;
4450        case 2:
4451                sector_size = ICH_FLASH_SEG_SIZE_8K;
4452                iteration = 1;
4453                break;
4454        case 3:
4455                sector_size = ICH_FLASH_SEG_SIZE_64K;
4456                iteration = 1;
4457                break;
4458        default:
4459                return -E1000_ERR_NVM;
4460        }
4461
4462        /* Start with the base address, then add the sector offset. */
4463        flash_linear_addr = hw->nvm.flash_base_addr;
4464        flash_linear_addr += (bank) ? flash_bank_size : 0;
4465
4466        for (j = 0; j < iteration; j++) {
4467                do {
4468                        u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4469
4470                        /* Steps */
4471                        ret_val = e1000_flash_cycle_init_ich8lan(hw);
4472                        if (ret_val)
4473                                return ret_val;
4474
4475                        /* Write a value 11 (block Erase) in Flash
4476                         * Cycle field in hw flash control
4477                         */
4478                        if (hw->mac.type >= e1000_pch_spt)
4479                                hsflctl.regval =
4480                                    er32flash(ICH_FLASH_HSFSTS) >> 16;
4481                        else
4482                                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4483
4484                        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4485                        if (hw->mac.type >= e1000_pch_spt)
4486                                ew32flash(ICH_FLASH_HSFSTS,
4487                                          hsflctl.regval << 16);
4488                        else
4489                                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4490
4491                        /* Write the last 24 bits of an index within the
4492                         * block into Flash Linear address field in Flash
4493                         * Address.
4494                         */
4495                        flash_linear_addr += (j * sector_size);
4496                        ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4497
4498                        ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4499                        if (!ret_val)
4500                                break;
4501
4502                        /* Check if FCERR is set to 1.  If 1,
4503                         * clear it and try the whole sequence
4504                         * a few more times else Done
4505                         */
4506                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4507                        if (hsfsts.hsf_status.flcerr)
4508                                /* repeat for some time before giving up */
4509                                continue;
4510                        else if (!hsfsts.hsf_status.flcdone)
4511                                return ret_val;
4512                } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4513        }
4514
4515        return 0;
4516}
4517
4518/**
4519 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4520 *  @hw: pointer to the HW structure
4521 *  @data: Pointer to the LED settings
4522 *
4523 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4524 *  settings is all 0's or F's, set the LED default to a valid LED default
4525 *  setting.
4526 **/
4527static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4528{
4529        s32 ret_val;
4530
4531        ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
4532        if (ret_val) {
4533                e_dbg("NVM Read Error\n");
4534                return ret_val;
4535        }
4536
4537        if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4538                *data = ID_LED_DEFAULT_ICH8LAN;
4539
4540        return 0;
4541}
4542
4543/**
4544 *  e1000_id_led_init_pchlan - store LED configurations
4545 *  @hw: pointer to the HW structure
4546 *
4547 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4548 *  the PHY LED configuration register.
4549 *
4550 *  PCH also does not have an "always on" or "always off" mode which
4551 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4552 *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
4553 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4554 *  link based on logic in e1000_led_[on|off]_pchlan().
4555 **/
4556static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4557{
4558        struct e1000_mac_info *mac = &hw->mac;
4559        s32 ret_val;
4560        const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4561        const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4562        u16 data, i, temp, shift;
4563
4564        /* Get default ID LED modes */
4565        ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4566        if (ret_val)
4567                return ret_val;
4568
4569        mac->ledctl_default = er32(LEDCTL);
4570        mac->ledctl_mode1 = mac->ledctl_default;
4571        mac->ledctl_mode2 = mac->ledctl_default;
4572
4573        for (i = 0; i < 4; i++) {
4574                temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4575                shift = (i * 5);
4576                switch (temp) {
4577                case ID_LED_ON1_DEF2:
4578                case ID_LED_ON1_ON2:
4579                case ID_LED_ON1_OFF2:
4580                        mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4581                        mac->ledctl_mode1 |= (ledctl_on << shift);
4582                        break;
4583                case ID_LED_OFF1_DEF2:
4584                case ID_LED_OFF1_ON2:
4585                case ID_LED_OFF1_OFF2:
4586                        mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4587                        mac->ledctl_mode1 |= (ledctl_off << shift);
4588                        break;
4589                default:
4590                        /* Do nothing */
4591                        break;
4592                }
4593                switch (temp) {
4594                case ID_LED_DEF1_ON2:
4595                case ID_LED_ON1_ON2:
4596                case ID_LED_OFF1_ON2:
4597                        mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4598                        mac->ledctl_mode2 |= (ledctl_on << shift);
4599                        break;
4600                case ID_LED_DEF1_OFF2:
4601                case ID_LED_ON1_OFF2:
4602                case ID_LED_OFF1_OFF2:
4603                        mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4604                        mac->ledctl_mode2 |= (ledctl_off << shift);
4605                        break;
4606                default:
4607                        /* Do nothing */
4608                        break;
4609                }
4610        }
4611
4612        return 0;
4613}
4614
4615/**
4616 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4617 *  @hw: pointer to the HW structure
4618 *
4619 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4620 *  register, so the the bus width is hard coded.
4621 **/
4622static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4623{
4624        struct e1000_bus_info *bus = &hw->bus;
4625        s32 ret_val;
4626
4627        ret_val = e1000e_get_bus_info_pcie(hw);
4628
4629        /* ICH devices are "PCI Express"-ish.  They have
4630         * a configuration space, but do not contain
4631         * PCI Express Capability registers, so bus width
4632         * must be hardcoded.
4633         */
4634        if (bus->width == e1000_bus_width_unknown)
4635                bus->width = e1000_bus_width_pcie_x1;
4636
4637        return ret_val;
4638}
4639
4640/**
4641 *  e1000_reset_hw_ich8lan - Reset the hardware
4642 *  @hw: pointer to the HW structure
4643 *
4644 *  Does a full reset of the hardware which includes a reset of the PHY and
4645 *  MAC.
4646 **/
4647static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4648{
4649        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4650        u16 kum_cfg;
4651        u32 ctrl, reg;
4652        s32 ret_val;
4653
4654        /* Prevent the PCI-E bus from sticking if there is no TLP connection
4655         * on the last TLP read/write transaction when MAC is reset.
4656         */
4657        ret_val = e1000e_disable_pcie_master(hw);
4658        if (ret_val)
4659                e_dbg("PCI-E Master disable polling has failed.\n");
4660
4661        e_dbg("Masking off all interrupts\n");
4662        ew32(IMC, 0xffffffff);
4663
4664        /* Disable the Transmit and Receive units.  Then delay to allow
4665         * any pending transactions to complete before we hit the MAC
4666         * with the global reset.
4667         */
4668        ew32(RCTL, 0);
4669        ew32(TCTL, E1000_TCTL_PSP);
4670        e1e_flush();
4671
4672        usleep_range(10000, 20000);
4673
4674        /* Workaround for ICH8 bit corruption issue in FIFO memory */
4675        if (hw->mac.type == e1000_ich8lan) {
4676                /* Set Tx and Rx buffer allocation to 8k apiece. */
4677                ew32(PBA, E1000_PBA_8K);
4678                /* Set Packet Buffer Size to 16k. */
4679                ew32(PBS, E1000_PBS_16K);
4680        }
4681
4682        if (hw->mac.type == e1000_pchlan) {
4683                /* Save the NVM K1 bit setting */
4684                ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4685                if (ret_val)
4686                        return ret_val;
4687
4688                if (kum_cfg & E1000_NVM_K1_ENABLE)
4689                        dev_spec->nvm_k1_enabled = true;
4690                else
4691                        dev_spec->nvm_k1_enabled = false;
4692        }
4693
4694        ctrl = er32(CTRL);
4695
4696        if (!hw->phy.ops.check_reset_block(hw)) {
4697                /* Full-chip reset requires MAC and PHY reset at the same
4698                 * time to make sure the interface between MAC and the
4699                 * external PHY is reset.
4700                 */
4701                ctrl |= E1000_CTRL_PHY_RST;
4702
4703                /* Gate automatic PHY configuration by hardware on
4704                 * non-managed 82579
4705                 */
4706                if ((hw->mac.type == e1000_pch2lan) &&
4707                    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
4708                        e1000_gate_hw_phy_config_ich8lan(hw, true);
4709        }
4710        ret_val = e1000_acquire_swflag_ich8lan(hw);
4711        e_dbg("Issuing a global reset to ich8lan\n");
4712        ew32(CTRL, (ctrl | E1000_CTRL_RST));
4713        /* cannot issue a flush here because it hangs the hardware */
4714        msleep(20);
4715
4716        /* Set Phy Config Counter to 50msec */
4717        if (hw->mac.type == e1000_pch2lan) {
4718                reg = er32(FEXTNVM3);
4719                reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4720                reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4721                ew32(FEXTNVM3, reg);
4722        }
4723
4724        if (!ret_val)
4725                clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4726
4727        if (ctrl & E1000_CTRL_PHY_RST) {
4728                ret_val = hw->phy.ops.get_cfg_done(hw);
4729                if (ret_val)
4730                        return ret_val;
4731
4732                ret_val = e1000_post_phy_reset_ich8lan(hw);
4733                if (ret_val)
4734                        return ret_val;
4735        }
4736
4737        /* For PCH, this write will make sure that any noise
4738         * will be detected as a CRC error and be dropped rather than show up
4739         * as a bad packet to the DMA engine.
4740         */
4741        if (hw->mac.type == e1000_pchlan)
4742                ew32(CRC_OFFSET, 0x65656565);
4743
4744        ew32(IMC, 0xffffffff);
4745        er32(ICR);
4746
4747        reg = er32(KABGTXD);
4748        reg |= E1000_KABGTXD_BGSQLBIAS;
4749        ew32(KABGTXD, reg);
4750
4751        return 0;
4752}
4753
4754/**
4755 *  e1000_init_hw_ich8lan - Initialize the hardware
4756 *  @hw: pointer to the HW structure
4757 *
4758 *  Prepares the hardware for transmit and receive by doing the following:
4759 *   - initialize hardware bits
4760 *   - initialize LED identification
4761 *   - setup receive address registers
4762 *   - setup flow control
4763 *   - setup transmit descriptors
4764 *   - clear statistics
4765 **/
4766static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4767{
4768        struct e1000_mac_info *mac = &hw->mac;
4769        u32 ctrl_ext, txdctl, snoop;
4770        s32 ret_val;
4771        u16 i;
4772
4773        e1000_initialize_hw_bits_ich8lan(hw);
4774
4775        /* Initialize identification LED */
4776        ret_val = mac->ops.id_led_init(hw);
4777        /* An error is not fatal and we should not stop init due to this */
4778        if (ret_val)
4779                e_dbg("Error initializing identification LED\n");
4780
4781        /* Setup the receive address. */
4782        e1000e_init_rx_addrs(hw, mac->rar_entry_count);
4783
4784        /* Zero out the Multicast HASH table */
4785        e_dbg("Zeroing the MTA\n");
4786        for (i = 0; i < mac->mta_reg_count; i++)
4787                E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4788
4789        /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4790         * the ME.  Disable wakeup by clearing the host wakeup bit.
4791         * Reset the phy after disabling host wakeup to reset the Rx buffer.
4792         */
4793        if (hw->phy.type == e1000_phy_82578) {
4794                e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
4795                i &= ~BM_WUC_HOST_WU_BIT;
4796                e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4797                ret_val = e1000_phy_hw_reset_ich8lan(hw);
4798                if (ret_val)
4799                        return ret_val;
4800        }
4801
4802        /* Setup link and flow control */
4803        ret_val = mac->ops.setup_link(hw);
4804
4805        /* Set the transmit descriptor write-back policy for both queues */
4806        txdctl = er32(TXDCTL(0));
4807        txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4808                  E1000_TXDCTL_FULL_TX_DESC_WB);
4809        txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4810                  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4811        ew32(TXDCTL(0), txdctl);
4812        txdctl = er32(TXDCTL(1));
4813        txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4814                  E1000_TXDCTL_FULL_TX_DESC_WB);
4815        txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4816                  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4817        ew32(TXDCTL(1), txdctl);
4818
4819        /* ICH8 has opposite polarity of no_snoop bits.
4820         * By default, we should use snoop behavior.
4821         */
4822        if (mac->type == e1000_ich8lan)
4823                snoop = PCIE_ICH8_SNOOP_ALL;
4824        else
4825                snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4826        e1000e_set_pcie_no_snoop(hw, snoop);
4827
4828        ctrl_ext = er32(CTRL_EXT);
4829        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4830        ew32(CTRL_EXT, ctrl_ext);
4831
4832        /* Clear all of the statistics registers (clear on read).  It is
4833         * important that we do this after we have tried to establish link
4834         * because the symbol error count will increment wildly if there
4835         * is no link.
4836         */
4837        e1000_clear_hw_cntrs_ich8lan(hw);
4838
4839        return ret_val;
4840}
4841
4842/**
4843 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4844 *  @hw: pointer to the HW structure
4845 *
4846 *  Sets/Clears required hardware bits necessary for correctly setting up the
4847 *  hardware for transmit and receive.
4848 **/
4849static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4850{
4851        u32 reg;
4852
4853        /* Extended Device Control */
4854        reg = er32(CTRL_EXT);
4855        reg |= BIT(22);
4856        /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4857        if (hw->mac.type >= e1000_pchlan)
4858                reg |= E1000_CTRL_EXT_PHYPDEN;
4859        ew32(CTRL_EXT, reg);
4860
4861        /* Transmit Descriptor Control 0 */
4862        reg = er32(TXDCTL(0));
4863        reg |= BIT(22);
4864        ew32(TXDCTL(0), reg);
4865
4866        /* Transmit Descriptor Control 1 */
4867        reg = er32(TXDCTL(1));
4868        reg |= BIT(22);
4869        ew32(TXDCTL(1), reg);
4870
4871        /* Transmit Arbitration Control 0 */
4872        reg = er32(TARC(0));
4873        if (hw->mac.type == e1000_ich8lan)
4874                reg |= BIT(28) | BIT(29);
4875        reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
4876        ew32(TARC(0), reg);
4877
4878        /* Transmit Arbitration Control 1 */
4879        reg = er32(TARC(1));
4880        if (er32(TCTL) & E1000_TCTL_MULR)
4881                reg &= ~BIT(28);
4882        else
4883                reg |= BIT(28);
4884        reg |= BIT(24) | BIT(26) | BIT(30);
4885        ew32(TARC(1), reg);
4886
4887        /* Device Status */
4888        if (hw->mac.type == e1000_ich8lan) {
4889                reg = er32(STATUS);
4890                reg &= ~BIT(31);
4891                ew32(STATUS, reg);
4892        }
4893
4894        /* work-around descriptor data corruption issue during nfs v2 udp
4895         * traffic, just disable the nfs filtering capability
4896         */
4897        reg = er32(RFCTL);
4898        reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4899
4900        /* Disable IPv6 extension header parsing because some malformed
4901         * IPv6 headers can hang the Rx.
4902         */
4903        if (hw->mac.type == e1000_ich8lan)
4904                reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4905        ew32(RFCTL, reg);
4906
4907        /* Enable ECC on Lynxpoint */
4908        if (hw->mac.type >= e1000_pch_lpt) {
4909                reg = er32(PBECCSTS);
4910                reg |= E1000_PBECCSTS_ECC_ENABLE;
4911                ew32(PBECCSTS, reg);
4912
4913                reg = er32(CTRL);
4914                reg |= E1000_CTRL_MEHE;
4915                ew32(CTRL, reg);
4916        }
4917}
4918
4919/**
4920 *  e1000_setup_link_ich8lan - Setup flow control and link settings
4921 *  @hw: pointer to the HW structure
4922 *
4923 *  Determines which flow control settings to use, then configures flow
4924 *  control.  Calls the appropriate media-specific link configuration
4925 *  function.  Assuming the adapter has a valid link partner, a valid link
4926 *  should be established.  Assumes the hardware has previously been reset
4927 *  and the transmitter and receiver are not enabled.
4928 **/
4929static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4930{
4931        s32 ret_val;
4932
4933        if (hw->phy.ops.check_reset_block(hw))
4934                return 0;
4935
4936        /* ICH parts do not have a word in the NVM to determine
4937         * the default flow control setting, so we explicitly
4938         * set it to full.
4939         */
4940        if (hw->fc.requested_mode == e1000_fc_default) {
4941                /* Workaround h/w hang when Tx flow control enabled */
4942                if (hw->mac.type == e1000_pchlan)
4943                        hw->fc.requested_mode = e1000_fc_rx_pause;
4944                else
4945                        hw->fc.requested_mode = e1000_fc_full;
4946        }
4947
4948        /* Save off the requested flow control mode for use later.  Depending
4949         * on the link partner's capabilities, we may or may not use this mode.
4950         */
4951        hw->fc.current_mode = hw->fc.requested_mode;
4952
4953        e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
4954
4955        /* Continue to configure the copper link. */
4956        ret_val = hw->mac.ops.setup_physical_interface(hw);
4957        if (ret_val)
4958                return ret_val;
4959
4960        ew32(FCTTV, hw->fc.pause_time);
4961        if ((hw->phy.type == e1000_phy_82578) ||
4962            (hw->phy.type == e1000_phy_82579) ||
4963            (hw->phy.type == e1000_phy_i217) ||
4964            (hw->phy.type == e1000_phy_82577)) {
4965                ew32(FCRTV_PCH, hw->fc.refresh_time);
4966
4967                ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
4968                                   hw->fc.pause_time);
4969                if (ret_val)
4970                        return ret_val;
4971        }
4972
4973        return e1000e_set_fc_watermarks(hw);
4974}
4975
4976/**
4977 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4978 *  @hw: pointer to the HW structure
4979 *
4980 *  Configures the kumeran interface to the PHY to wait the appropriate time
4981 *  when polling the PHY, then call the generic setup_copper_link to finish
4982 *  configuring the copper link.
4983 **/
4984static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4985{
4986        u32 ctrl;
4987        s32 ret_val;
4988        u16 reg_data;
4989
4990        ctrl = er32(CTRL);
4991        ctrl |= E1000_CTRL_SLU;
4992        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4993        ew32(CTRL, ctrl);
4994
4995        /* Set the mac to wait the maximum time between each iteration
4996         * and increase the max iterations when polling the phy;
4997         * this fixes erroneous timeouts at 10Mbps.
4998         */
4999        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
5000        if (ret_val)
5001                return ret_val;
5002        ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5003                                       &reg_data);
5004        if (ret_val)
5005                return ret_val;
5006        reg_data |= 0x3F;
5007        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5008                                        reg_data);
5009        if (ret_val)
5010                return ret_val;
5011
5012        switch (hw->phy.type) {
5013        case e1000_phy_igp_3:
5014                ret_val = e1000e_copper_link_setup_igp(hw);
5015                if (ret_val)
5016                        return ret_val;
5017                break;
5018        case e1000_phy_bm:
5019        case e1000_phy_82578:
5020                ret_val = e1000e_copper_link_setup_m88(hw);
5021                if (ret_val)
5022                        return ret_val;
5023                break;
5024        case e1000_phy_82577:
5025        case e1000_phy_82579:
5026                ret_val = e1000_copper_link_setup_82577(hw);
5027                if (ret_val)
5028                        return ret_val;
5029                break;
5030        case e1000_phy_ife:
5031                ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
5032                if (ret_val)
5033                        return ret_val;
5034
5035                reg_data &= ~IFE_PMC_AUTO_MDIX;
5036
5037                switch (hw->phy.mdix) {
5038                case 1:
5039                        reg_data &= ~IFE_PMC_FORCE_MDIX;
5040                        break;
5041                case 2:
5042                        reg_data |= IFE_PMC_FORCE_MDIX;
5043                        break;
5044                case 0:
5045                default:
5046                        reg_data |= IFE_PMC_AUTO_MDIX;
5047                        break;
5048                }
5049                ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
5050                if (ret_val)
5051                        return ret_val;
5052                break;
5053        default:
5054                break;
5055        }
5056
5057        return e1000e_setup_copper_link(hw);
5058}
5059
5060/**
5061 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5062 *  @hw: pointer to the HW structure
5063 *
5064 *  Calls the PHY specific link setup function and then calls the
5065 *  generic setup_copper_link to finish configuring the link for
5066 *  Lynxpoint PCH devices
5067 **/
5068static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5069{
5070        u32 ctrl;
5071        s32 ret_val;
5072
5073        ctrl = er32(CTRL);
5074        ctrl |= E1000_CTRL_SLU;
5075        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5076        ew32(CTRL, ctrl);
5077
5078        ret_val = e1000_copper_link_setup_82577(hw);
5079        if (ret_val)
5080                return ret_val;
5081
5082        return e1000e_setup_copper_link(hw);
5083}
5084
5085/**
5086 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5087 *  @hw: pointer to the HW structure
5088 *  @speed: pointer to store current link speed
5089 *  @duplex: pointer to store the current link duplex
5090 *
5091 *  Calls the generic get_speed_and_duplex to retrieve the current link
5092 *  information and then calls the Kumeran lock loss workaround for links at
5093 *  gigabit speeds.
5094 **/
5095static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5096                                          u16 *duplex)
5097{
5098        s32 ret_val;
5099
5100        ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
5101        if (ret_val)
5102                return ret_val;
5103
5104        if ((hw->mac.type == e1000_ich8lan) &&
5105            (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
5106                ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5107        }
5108
5109        return ret_val;
5110}
5111
5112/**
5113 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5114 *  @hw: pointer to the HW structure
5115 *
5116 *  Work-around for 82566 Kumeran PCS lock loss:
5117 *  On link status change (i.e. PCI reset, speed change) and link is up and
5118 *  speed is gigabit-
5119 *    0) if workaround is optionally disabled do nothing
5120 *    1) wait 1ms for Kumeran link to come up
5121 *    2) check Kumeran Diagnostic register PCS lock loss bit
5122 *    3) if not set the link is locked (all is good), otherwise...
5123 *    4) reset the PHY
5124 *    5) repeat up to 10 times
5125 *  Note: this is only called for IGP3 copper when speed is 1gb.
5126 **/
5127static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5128{
5129        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5130        u32 phy_ctrl;
5131        s32 ret_val;
5132        u16 i, data;
5133        bool link;
5134
5135        if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5136                return 0;
5137
5138        /* Make sure link is up before proceeding.  If not just return.
5139         * Attempting this while link is negotiating fouled up link
5140         * stability
5141         */
5142        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
5143        if (!link)
5144                return 0;
5145
5146        for (i = 0; i < 10; i++) {
5147                /* read once to clear */
5148                ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5149                if (ret_val)
5150                        return ret_val;
5151                /* and again to get new status */
5152                ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5153                if (ret_val)
5154                        return ret_val;
5155
5156                /* check for PCS lock */
5157                if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5158                        return 0;
5159
5160                /* Issue PHY reset */
5161                e1000_phy_hw_reset(hw);
5162                mdelay(5);
5163        }
5164        /* Disable GigE link negotiation */
5165        phy_ctrl = er32(PHY_CTRL);
5166        phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5167                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5168        ew32(PHY_CTRL, phy_ctrl);
5169
5170        /* Call gig speed drop workaround on Gig disable before accessing
5171         * any PHY registers
5172         */
5173        e1000e_gig_downshift_workaround_ich8lan(hw);
5174
5175        /* unable to acquire PCS lock */
5176        return -E1000_ERR_PHY;
5177}
5178
5179/**
5180 *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5181 *  @hw: pointer to the HW structure
5182 *  @state: boolean value used to set the current Kumeran workaround state
5183 *
5184 *  If ICH8, set the current Kumeran workaround state (enabled - true
5185 *  /disabled - false).
5186 **/
5187void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5188                                                  bool state)
5189{
5190        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5191
5192        if (hw->mac.type != e1000_ich8lan) {
5193                e_dbg("Workaround applies to ICH8 only.\n");
5194                return;
5195        }
5196
5197        dev_spec->kmrn_lock_loss_workaround_enabled = state;
5198}
5199
5200/**
5201 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5202 *  @hw: pointer to the HW structure
5203 *
5204 *  Workaround for 82566 power-down on D3 entry:
5205 *    1) disable gigabit link
5206 *    2) write VR power-down enable
5207 *    3) read it back
5208 *  Continue if successful, else issue LCD reset and repeat
5209 **/
5210void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5211{
5212        u32 reg;
5213        u16 data;
5214        u8 retry = 0;
5215
5216        if (hw->phy.type != e1000_phy_igp_3)
5217                return;
5218
5219        /* Try the workaround twice (if needed) */
5220        do {
5221                /* Disable link */
5222                reg = er32(PHY_CTRL);
5223                reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5224                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5225                ew32(PHY_CTRL, reg);
5226
5227                /* Call gig speed drop workaround on Gig disable before
5228                 * accessing any PHY registers
5229                 */
5230                if (hw->mac.type == e1000_ich8lan)
5231                        e1000e_gig_downshift_workaround_ich8lan(hw);
5232
5233                /* Write VR power-down enable */
5234                e1e_rphy(hw, IGP3_VR_CTRL, &data);
5235                data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5236                e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5237
5238                /* Read it back and test */
5239                e1e_rphy(hw, IGP3_VR_CTRL, &data);
5240                data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5241                if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5242                        break;
5243
5244                /* Issue PHY reset and repeat at most one more time */
5245                reg = er32(CTRL);
5246                ew32(CTRL, reg | E1000_CTRL_PHY_RST);
5247                retry++;
5248        } while (retry);
5249}
5250
5251/**
5252 *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5253 *  @hw: pointer to the HW structure
5254 *
5255 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5256 *  LPLU, Gig disable, MDIC PHY reset):
5257 *    1) Set Kumeran Near-end loopback
5258 *    2) Clear Kumeran Near-end loopback
5259 *  Should only be called for ICH8[m] devices with any 1G Phy.
5260 **/
5261void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5262{
5263        s32 ret_val;
5264        u16 reg_data;
5265
5266        if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
5267                return;
5268
5269        ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5270                                       &reg_data);
5271        if (ret_val)
5272                return;
5273        reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5274        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5275                                        reg_data);
5276        if (ret_val)
5277                return;
5278        reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5279        e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
5280}
5281
5282/**
5283 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5284 *  @hw: pointer to the HW structure
5285 *
5286 *  During S0 to Sx transition, it is possible the link remains at gig
5287 *  instead of negotiating to a lower speed.  Before going to Sx, set
5288 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5289 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5290 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5291 *  needs to be written.
5292 *  Parts that support (and are linked to a partner which support) EEE in
5293 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5294 *  than 10Mbps w/o EEE.
5295 **/
5296void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5297{
5298        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5299        u32 phy_ctrl;
5300        s32 ret_val;
5301
5302        phy_ctrl = er32(PHY_CTRL);
5303        phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5304
5305        if (hw->phy.type == e1000_phy_i217) {
5306                u16 phy_reg, device_id = hw->adapter->pdev->device;
5307
5308                if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5309                    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5310                    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5311                    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5312                    (hw->mac.type >= e1000_pch_spt)) {
5313                        u32 fextnvm6 = er32(FEXTNVM6);
5314
5315                        ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5316                }
5317
5318                ret_val = hw->phy.ops.acquire(hw);
5319                if (ret_val)
5320                        goto out;
5321
5322                if (!dev_spec->eee_disable) {
5323                        u16 eee_advert;
5324
5325                        ret_val =
5326                            e1000_read_emi_reg_locked(hw,
5327                                                      I217_EEE_ADVERTISEMENT,
5328                                                      &eee_advert);
5329                        if (ret_val)
5330                                goto release;
5331
5332                        /* Disable LPLU if both link partners support 100BaseT
5333                         * EEE and 100Full is advertised on both ends of the
5334                         * link, and enable Auto Enable LPI since there will
5335                         * be no driver to enable LPI while in Sx.
5336                         */
5337                        if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5338                            (dev_spec->eee_lp_ability &
5339                             I82579_EEE_100_SUPPORTED) &&
5340                            (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5341                                phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5342                                              E1000_PHY_CTRL_NOND0A_LPLU);
5343
5344                                /* Set Auto Enable LPI after link up */
5345                                e1e_rphy_locked(hw,
5346                                                I217_LPI_GPIO_CTRL, &phy_reg);
5347                                phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5348                                e1e_wphy_locked(hw,
5349                                                I217_LPI_GPIO_CTRL, phy_reg);
5350                        }
5351                }
5352
5353                /* For i217 Intel Rapid Start Technology support,
5354                 * when the system is going into Sx and no manageability engine
5355                 * is present, the driver must configure proxy to reset only on
5356                 * power good.  LPI (Low Power Idle) state must also reset only
5357                 * on power good, as well as the MTA (Multicast table array).
5358                 * The SMBus release must also be disabled on LCD reset.
5359                 */
5360                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5361                        /* Enable proxy to reset only on power good. */
5362                        e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
5363                        phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5364                        e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
5365
5366                        /* Set bit enable LPI (EEE) to reset only on
5367                         * power good.
5368                         */
5369                        e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
5370                        phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5371                        e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
5372
5373                        /* Disable the SMB release on LCD reset. */
5374                        e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5375                        phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5376                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5377                }
5378
5379                /* Enable MTA to reset for Intel Rapid Start Technology
5380                 * Support
5381                 */
5382                e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5383                phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5384                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5385
5386release:
5387                hw->phy.ops.release(hw);
5388        }
5389out:
5390        ew32(PHY_CTRL, phy_ctrl);
5391
5392        if (hw->mac.type == e1000_ich8lan)
5393                e1000e_gig_downshift_workaround_ich8lan(hw);
5394
5395        if (hw->mac.type >= e1000_pchlan) {
5396                e1000_oem_bits_config_ich8lan(hw, false);
5397
5398                /* Reset PHY to activate OEM bits on 82577/8 */
5399                if (hw->mac.type == e1000_pchlan)
5400                        e1000e_phy_hw_reset_generic(hw);
5401
5402                ret_val = hw->phy.ops.acquire(hw);
5403                if (ret_val)
5404                        return;
5405                e1000_write_smbus_addr(hw);
5406                hw->phy.ops.release(hw);
5407        }
5408}
5409
5410/**
5411 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5412 *  @hw: pointer to the HW structure
5413 *
5414 *  During Sx to S0 transitions on non-managed devices or managed devices
5415 *  on which PHY resets are not blocked, if the PHY registers cannot be
5416 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5417 *  the PHY.
5418 *  On i217, setup Intel Rapid Start Technology.
5419 **/
5420void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5421{
5422        s32 ret_val;
5423
5424        if (hw->mac.type < e1000_pch2lan)
5425                return;
5426
5427        ret_val = e1000_init_phy_workarounds_pchlan(hw);
5428        if (ret_val) {
5429                e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
5430                return;
5431        }
5432
5433        /* For i217 Intel Rapid Start Technology support when the system
5434         * is transitioning from Sx and no manageability engine is present
5435         * configure SMBus to restore on reset, disable proxy, and enable
5436         * the reset on MTA (Multicast table array).
5437         */
5438        if (hw->phy.type == e1000_phy_i217) {
5439                u16 phy_reg;
5440
5441                ret_val = hw->phy.ops.acquire(hw);
5442                if (ret_val) {
5443                        e_dbg("Failed to setup iRST\n");
5444                        return;
5445                }
5446
5447                /* Clear Auto Enable LPI after link up */
5448                e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5449                phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5450                e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5451
5452                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5453                        /* Restore clear on SMB if no manageability engine
5454                         * is present
5455                         */
5456                        ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5457                        if (ret_val)
5458                                goto release;
5459                        phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5460                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5461
5462                        /* Disable Proxy */
5463                        e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
5464                }
5465                /* Enable reset on MTA */
5466                ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5467                if (ret_val)
5468                        goto release;
5469                phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5470                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5471release:
5472                if (ret_val)
5473                        e_dbg("Error %d in resume workarounds\n", ret_val);
5474                hw->phy.ops.release(hw);
5475        }
5476}
5477
5478/**
5479 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5480 *  @hw: pointer to the HW structure
5481 *
5482 *  Return the LED back to the default configuration.
5483 **/
5484static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5485{
5486        if (hw->phy.type == e1000_phy_ife)
5487                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
5488
5489        ew32(LEDCTL, hw->mac.ledctl_default);
5490        return 0;
5491}
5492
5493/**
5494 *  e1000_led_on_ich8lan - Turn LEDs on
5495 *  @hw: pointer to the HW structure
5496 *
5497 *  Turn on the LEDs.
5498 **/
5499static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5500{
5501        if (hw->phy.type == e1000_phy_ife)
5502                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5503                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5504
5505        ew32(LEDCTL, hw->mac.ledctl_mode2);
5506        return 0;
5507}
5508
5509/**
5510 *  e1000_led_off_ich8lan - Turn LEDs off
5511 *  @hw: pointer to the HW structure
5512 *
5513 *  Turn off the LEDs.
5514 **/
5515static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5516{
5517        if (hw->phy.type == e1000_phy_ife)
5518                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5519                                (IFE_PSCL_PROBE_MODE |
5520                                 IFE_PSCL_PROBE_LEDS_OFF));
5521
5522        ew32(LEDCTL, hw->mac.ledctl_mode1);
5523        return 0;
5524}
5525
5526/**
5527 *  e1000_setup_led_pchlan - Configures SW controllable LED
5528 *  @hw: pointer to the HW structure
5529 *
5530 *  This prepares the SW controllable LED for use.
5531 **/
5532static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5533{
5534        return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
5535}
5536
5537/**
5538 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5539 *  @hw: pointer to the HW structure
5540 *
5541 *  Return the LED back to the default configuration.
5542 **/
5543static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5544{
5545        return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
5546}
5547
5548/**
5549 *  e1000_led_on_pchlan - Turn LEDs on
5550 *  @hw: pointer to the HW structure
5551 *
5552 *  Turn on the LEDs.
5553 **/
5554static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5555{
5556        u16 data = (u16)hw->mac.ledctl_mode2;
5557        u32 i, led;
5558
5559        /* If no link, then turn LED on by setting the invert bit
5560         * for each LED that's mode is "link_up" in ledctl_mode2.
5561         */
5562        if (!(er32(STATUS) & E1000_STATUS_LU)) {
5563                for (i = 0; i < 3; i++) {
5564                        led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5565                        if ((led & E1000_PHY_LED0_MODE_MASK) !=
5566                            E1000_LEDCTL_MODE_LINK_UP)
5567                                continue;
5568                        if (led & E1000_PHY_LED0_IVRT)
5569                                data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5570                        else
5571                                data |= (E1000_PHY_LED0_IVRT << (i * 5));
5572                }
5573        }
5574
5575        return e1e_wphy(hw, HV_LED_CONFIG, data);
5576}
5577
5578/**
5579 *  e1000_led_off_pchlan - Turn LEDs off
5580 *  @hw: pointer to the HW structure
5581 *
5582 *  Turn off the LEDs.
5583 **/
5584static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5585{
5586        u16 data = (u16)hw->mac.ledctl_mode1;
5587        u32 i, led;
5588
5589        /* If no link, then turn LED off by clearing the invert bit
5590         * for each LED that's mode is "link_up" in ledctl_mode1.
5591         */
5592        if (!(er32(STATUS) & E1000_STATUS_LU)) {
5593                for (i = 0; i < 3; i++) {
5594                        led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5595                        if ((led & E1000_PHY_LED0_MODE_MASK) !=
5596                            E1000_LEDCTL_MODE_LINK_UP)
5597                                continue;
5598                        if (led & E1000_PHY_LED0_IVRT)
5599                                data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5600                        else
5601                                data |= (E1000_PHY_LED0_IVRT << (i * 5));
5602                }
5603        }
5604
5605        return e1e_wphy(hw, HV_LED_CONFIG, data);
5606}
5607
5608/**
5609 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5610 *  @hw: pointer to the HW structure
5611 *
5612 *  Read appropriate register for the config done bit for completion status
5613 *  and configure the PHY through s/w for EEPROM-less parts.
5614 *
5615 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5616 *  config done bit, so only an error is logged and continues.  If we were
5617 *  to return with error, EEPROM-less silicon would not be able to be reset
5618 *  or change link.
5619 **/
5620static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5621{
5622        s32 ret_val = 0;
5623        u32 bank = 0;
5624        u32 status;
5625
5626        e1000e_get_cfg_done_generic(hw);
5627
5628        /* Wait for indication from h/w that it has completed basic config */
5629        if (hw->mac.type >= e1000_ich10lan) {
5630                e1000_lan_init_done_ich8lan(hw);
5631        } else {
5632                ret_val = e1000e_get_auto_rd_done(hw);
5633                if (ret_val) {
5634                        /* When auto config read does not complete, do not
5635                         * return with an error. This can happen in situations
5636                         * where there is no eeprom and prevents getting link.
5637                         */
5638                        e_dbg("Auto Read Done did not complete\n");
5639                        ret_val = 0;
5640                }
5641        }
5642
5643        /* Clear PHY Reset Asserted bit */
5644        status = er32(STATUS);
5645        if (status & E1000_STATUS_PHYRA)
5646                ew32(STATUS, status & ~E1000_STATUS_PHYRA);
5647        else
5648                e_dbg("PHY Reset Asserted not set - needs delay\n");
5649
5650        /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5651        if (hw->mac.type <= e1000_ich9lan) {
5652                if (!(er32(EECD) & E1000_EECD_PRES) &&
5653                    (hw->phy.type == e1000_phy_igp_3)) {
5654                        e1000e_phy_init_script_igp3(hw);
5655                }
5656        } else {
5657                if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5658                        /* Maybe we should do a basic PHY config */
5659                        e_dbg("EEPROM not present\n");
5660                        ret_val = -E1000_ERR_CONFIG;
5661                }
5662        }
5663
5664        return ret_val;
5665}
5666
5667/**
5668 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5669 * @hw: pointer to the HW structure
5670 *
5671 * In the case of a PHY power down to save power, or to turn off link during a
5672 * driver unload, or wake on lan is not enabled, remove the link.
5673 **/
5674static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5675{
5676        /* If the management interface is not enabled, then power down */
5677        if (!(hw->mac.ops.check_mng_mode(hw) ||
5678              hw->phy.ops.check_reset_block(hw)))
5679                e1000_power_down_phy_copper(hw);
5680}
5681
5682/**
5683 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5684 *  @hw: pointer to the HW structure
5685 *
5686 *  Clears hardware counters specific to the silicon family and calls
5687 *  clear_hw_cntrs_generic to clear all general purpose counters.
5688 **/
5689static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5690{
5691        u16 phy_data;
5692        s32 ret_val;
5693
5694        e1000e_clear_hw_cntrs_base(hw);
5695
5696        er32(ALGNERRC);
5697        er32(RXERRC);
5698        er32(TNCRS);
5699        er32(CEXTERR);
5700        er32(TSCTC);
5701        er32(TSCTFC);
5702
5703        er32(MGTPRC);
5704        er32(MGTPDC);
5705        er32(MGTPTC);
5706
5707        er32(IAC);
5708        er32(ICRXOC);
5709
5710        /* Clear PHY statistics registers */
5711        if ((hw->phy.type == e1000_phy_82578) ||
5712            (hw->phy.type == e1000_phy_82579) ||
5713            (hw->phy.type == e1000_phy_i217) ||
5714            (hw->phy.type == e1000_phy_82577)) {
5715                ret_val = hw->phy.ops.acquire(hw);
5716                if (ret_val)
5717                        return;
5718                ret_val = hw->phy.ops.set_page(hw,
5719                                               HV_STATS_PAGE << IGP_PAGE_SHIFT);
5720                if (ret_val)
5721                        goto release;
5722                hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5723                hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5724                hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5725                hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5726                hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5727                hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5728                hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5729                hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5730                hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5731                hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5732                hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5733                hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5734                hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5735                hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5736release:
5737                hw->phy.ops.release(hw);
5738        }
5739}
5740
5741static const struct e1000_mac_operations ich8_mac_ops = {
5742        /* check_mng_mode dependent on mac type */
5743        .check_for_link         = e1000_check_for_copper_link_ich8lan,
5744        /* cleanup_led dependent on mac type */
5745        .clear_hw_cntrs         = e1000_clear_hw_cntrs_ich8lan,
5746        .get_bus_info           = e1000_get_bus_info_ich8lan,
5747        .set_lan_id             = e1000_set_lan_id_single_port,
5748        .get_link_up_info       = e1000_get_link_up_info_ich8lan,
5749        /* led_on dependent on mac type */
5750        /* led_off dependent on mac type */
5751        .update_mc_addr_list    = e1000e_update_mc_addr_list_generic,
5752        .reset_hw               = e1000_reset_hw_ich8lan,
5753        .init_hw                = e1000_init_hw_ich8lan,
5754        .setup_link             = e1000_setup_link_ich8lan,
5755        .setup_physical_interface = e1000_setup_copper_link_ich8lan,
5756        /* id_led_init dependent on mac type */
5757        .config_collision_dist  = e1000e_config_collision_dist_generic,
5758        .rar_set                = e1000e_rar_set_generic,
5759        .rar_get_count          = e1000e_rar_get_count_generic,
5760};
5761
5762static const struct e1000_phy_operations ich8_phy_ops = {
5763        .acquire                = e1000_acquire_swflag_ich8lan,
5764        .check_reset_block      = e1000_check_reset_block_ich8lan,
5765        .commit                 = NULL,
5766        .get_cfg_done           = e1000_get_cfg_done_ich8lan,
5767        .get_cable_length       = e1000e_get_cable_length_igp_2,
5768        .read_reg               = e1000e_read_phy_reg_igp,
5769        .release                = e1000_release_swflag_ich8lan,
5770        .reset                  = e1000_phy_hw_reset_ich8lan,
5771        .set_d0_lplu_state      = e1000_set_d0_lplu_state_ich8lan,
5772        .set_d3_lplu_state      = e1000_set_d3_lplu_state_ich8lan,
5773        .write_reg              = e1000e_write_phy_reg_igp,
5774};
5775
5776static const struct e1000_nvm_operations ich8_nvm_ops = {
5777        .acquire                = e1000_acquire_nvm_ich8lan,
5778        .read                   = e1000_read_nvm_ich8lan,
5779        .release                = e1000_release_nvm_ich8lan,
5780        .reload                 = e1000e_reload_nvm_generic,
5781        .update                 = e1000_update_nvm_checksum_ich8lan,
5782        .valid_led_default      = e1000_valid_led_default_ich8lan,
5783        .validate               = e1000_validate_nvm_checksum_ich8lan,
5784        .write                  = e1000_write_nvm_ich8lan,
5785};
5786
5787static const struct e1000_nvm_operations spt_nvm_ops = {
5788        .acquire                = e1000_acquire_nvm_ich8lan,
5789        .release                = e1000_release_nvm_ich8lan,
5790        .read                   = e1000_read_nvm_spt,
5791        .update                 = e1000_update_nvm_checksum_spt,
5792        .reload                 = e1000e_reload_nvm_generic,
5793        .valid_led_default      = e1000_valid_led_default_ich8lan,
5794        .validate               = e1000_validate_nvm_checksum_ich8lan,
5795        .write                  = e1000_write_nvm_ich8lan,
5796};
5797
5798const struct e1000_info e1000_ich8_info = {
5799        .mac                    = e1000_ich8lan,
5800        .flags                  = FLAG_HAS_WOL
5801                                  | FLAG_IS_ICH
5802                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5803                                  | FLAG_HAS_AMT
5804                                  | FLAG_HAS_FLASH
5805                                  | FLAG_APME_IN_WUC,
5806        .pba                    = 8,
5807        .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
5808        .get_variants           = e1000_get_variants_ich8lan,
5809        .mac_ops                = &ich8_mac_ops,
5810        .phy_ops                = &ich8_phy_ops,
5811        .nvm_ops                = &ich8_nvm_ops,
5812};
5813
5814const struct e1000_info e1000_ich9_info = {
5815        .mac                    = e1000_ich9lan,
5816        .flags                  = FLAG_HAS_JUMBO_FRAMES
5817                                  | FLAG_IS_ICH
5818                                  | FLAG_HAS_WOL
5819                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5820                                  | FLAG_HAS_AMT
5821                                  | FLAG_HAS_FLASH
5822                                  | FLAG_APME_IN_WUC,
5823        .pba                    = 18,
5824        .max_hw_frame_size      = DEFAULT_JUMBO,
5825        .get_variants           = e1000_get_variants_ich8lan,
5826        .mac_ops                = &ich8_mac_ops,
5827        .phy_ops                = &ich8_phy_ops,
5828        .nvm_ops                = &ich8_nvm_ops,
5829};
5830
5831const struct e1000_info e1000_ich10_info = {
5832        .mac                    = e1000_ich10lan,
5833        .flags                  = FLAG_HAS_JUMBO_FRAMES
5834                                  | FLAG_IS_ICH
5835                                  | FLAG_HAS_WOL
5836                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5837                                  | FLAG_HAS_AMT
5838                                  | FLAG_HAS_FLASH
5839                                  | FLAG_APME_IN_WUC,
5840        .pba                    = 18,
5841        .max_hw_frame_size      = DEFAULT_JUMBO,
5842        .get_variants           = e1000_get_variants_ich8lan,
5843        .mac_ops                = &ich8_mac_ops,
5844        .phy_ops                = &ich8_phy_ops,
5845        .nvm_ops                = &ich8_nvm_ops,
5846};
5847
5848const struct e1000_info e1000_pch_info = {
5849        .mac                    = e1000_pchlan,
5850        .flags                  = FLAG_IS_ICH
5851                                  | FLAG_HAS_WOL
5852                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5853                                  | FLAG_HAS_AMT
5854                                  | FLAG_HAS_FLASH
5855                                  | FLAG_HAS_JUMBO_FRAMES
5856                                  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
5857                                  | FLAG_APME_IN_WUC,
5858        .flags2                 = FLAG2_HAS_PHY_STATS,
5859        .pba                    = 26,
5860        .max_hw_frame_size      = 4096,
5861        .get_variants           = e1000_get_variants_ich8lan,
5862        .mac_ops                = &ich8_mac_ops,
5863        .phy_ops                = &ich8_phy_ops,
5864        .nvm_ops                = &ich8_nvm_ops,
5865};
5866
5867const struct e1000_info e1000_pch2_info = {
5868        .mac                    = e1000_pch2lan,
5869        .flags                  = FLAG_IS_ICH
5870                                  | FLAG_HAS_WOL
5871                                  | FLAG_HAS_HW_TIMESTAMP
5872                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5873                                  | FLAG_HAS_AMT
5874                                  | FLAG_HAS_FLASH
5875                                  | FLAG_HAS_JUMBO_FRAMES
5876                                  | FLAG_APME_IN_WUC,
5877        .flags2                 = FLAG2_HAS_PHY_STATS
5878                                  | FLAG2_HAS_EEE
5879                                  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5880        .pba                    = 26,
5881        .max_hw_frame_size      = 9022,
5882        .get_variants           = e1000_get_variants_ich8lan,
5883        .mac_ops                = &ich8_mac_ops,
5884        .phy_ops                = &ich8_phy_ops,
5885        .nvm_ops                = &ich8_nvm_ops,
5886};
5887
5888const struct e1000_info e1000_pch_lpt_info = {
5889        .mac                    = e1000_pch_lpt,
5890        .flags                  = FLAG_IS_ICH
5891                                  | FLAG_HAS_WOL
5892                                  | FLAG_HAS_HW_TIMESTAMP
5893                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5894                                  | FLAG_HAS_AMT
5895                                  | FLAG_HAS_FLASH
5896                                  | FLAG_HAS_JUMBO_FRAMES
5897                                  | FLAG_APME_IN_WUC,
5898        .flags2                 = FLAG2_HAS_PHY_STATS
5899                                  | FLAG2_HAS_EEE
5900                                  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5901        .pba                    = 26,
5902        .max_hw_frame_size      = 9022,
5903        .get_variants           = e1000_get_variants_ich8lan,
5904        .mac_ops                = &ich8_mac_ops,
5905        .phy_ops                = &ich8_phy_ops,
5906        .nvm_ops                = &ich8_nvm_ops,
5907};
5908
5909const struct e1000_info e1000_pch_spt_info = {
5910        .mac                    = e1000_pch_spt,
5911        .flags                  = FLAG_IS_ICH
5912                                  | FLAG_HAS_WOL
5913                                  | FLAG_HAS_HW_TIMESTAMP
5914                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5915                                  | FLAG_HAS_AMT
5916                                  | FLAG_HAS_FLASH
5917                                  | FLAG_HAS_JUMBO_FRAMES
5918                                  | FLAG_APME_IN_WUC,
5919        .flags2                 = FLAG2_HAS_PHY_STATS
5920                                  | FLAG2_HAS_EEE,
5921        .pba                    = 26,
5922        .max_hw_frame_size      = 9022,
5923        .get_variants           = e1000_get_variants_ich8lan,
5924        .mac_ops                = &ich8_mac_ops,
5925        .phy_ops                = &ich8_phy_ops,
5926        .nvm_ops                = &spt_nvm_ops,
5927};
5928
5929const struct e1000_info e1000_pch_cnp_info = {
5930        .mac                    = e1000_pch_cnp,
5931        .flags                  = FLAG_IS_ICH
5932                                  | FLAG_HAS_WOL
5933                                  | FLAG_HAS_HW_TIMESTAMP
5934                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5935                                  | FLAG_HAS_AMT
5936                                  | FLAG_HAS_FLASH
5937                                  | FLAG_HAS_JUMBO_FRAMES
5938                                  | FLAG_APME_IN_WUC,
5939        .flags2                 = FLAG2_HAS_PHY_STATS
5940                                  | FLAG2_HAS_EEE,
5941        .pba                    = 26,
5942        .max_hw_frame_size      = 9022,
5943        .get_variants           = e1000_get_variants_ich8lan,
5944        .mac_ops                = &ich8_mac_ops,
5945        .phy_ops                = &ich8_phy_ops,
5946        .nvm_ops                = &spt_nvm_ops,
5947};
5948