linux/drivers/net/ethernet/intel/e1000e/ich8lan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4/* 82562G 10/100 Network Connection
   5 * 82562G-2 10/100 Network Connection
   6 * 82562GT 10/100 Network Connection
   7 * 82562GT-2 10/100 Network Connection
   8 * 82562V 10/100 Network Connection
   9 * 82562V-2 10/100 Network Connection
  10 * 82566DC-2 Gigabit Network Connection
  11 * 82566DC Gigabit Network Connection
  12 * 82566DM-2 Gigabit Network Connection
  13 * 82566DM Gigabit Network Connection
  14 * 82566MC Gigabit Network Connection
  15 * 82566MM Gigabit Network Connection
  16 * 82567LM Gigabit Network Connection
  17 * 82567LF Gigabit Network Connection
  18 * 82567V Gigabit Network Connection
  19 * 82567LM-2 Gigabit Network Connection
  20 * 82567LF-2 Gigabit Network Connection
  21 * 82567V-2 Gigabit Network Connection
  22 * 82567LF-3 Gigabit Network Connection
  23 * 82567LM-3 Gigabit Network Connection
  24 * 82567LM-4 Gigabit Network Connection
  25 * 82577LM Gigabit Network Connection
  26 * 82577LC Gigabit Network Connection
  27 * 82578DM Gigabit Network Connection
  28 * 82578DC Gigabit Network Connection
  29 * 82579LM Gigabit Network Connection
  30 * 82579V Gigabit Network Connection
  31 * Ethernet Connection I217-LM
  32 * Ethernet Connection I217-V
  33 * Ethernet Connection I218-V
  34 * Ethernet Connection I218-LM
  35 * Ethernet Connection (2) I218-LM
  36 * Ethernet Connection (2) I218-V
  37 * Ethernet Connection (3) I218-LM
  38 * Ethernet Connection (3) I218-V
  39 */
  40
  41#include "e1000.h"
  42
  43/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
  44/* Offset 04h HSFSTS */
  45union ich8_hws_flash_status {
  46        struct ich8_hsfsts {
  47                u16 flcdone:1;  /* bit 0 Flash Cycle Done */
  48                u16 flcerr:1;   /* bit 1 Flash Cycle Error */
  49                u16 dael:1;     /* bit 2 Direct Access error Log */
  50                u16 berasesz:2; /* bit 4:3 Sector Erase Size */
  51                u16 flcinprog:1;        /* bit 5 flash cycle in Progress */
  52                u16 reserved1:2;        /* bit 13:6 Reserved */
  53                u16 reserved2:6;        /* bit 13:6 Reserved */
  54                u16 fldesvalid:1;       /* bit 14 Flash Descriptor Valid */
  55                u16 flockdn:1;  /* bit 15 Flash Config Lock-Down */
  56        } hsf_status;
  57        u16 regval;
  58};
  59
  60/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
  61/* Offset 06h FLCTL */
  62union ich8_hws_flash_ctrl {
  63        struct ich8_hsflctl {
  64                u16 flcgo:1;    /* 0 Flash Cycle Go */
  65                u16 flcycle:2;  /* 2:1 Flash Cycle */
  66                u16 reserved:5; /* 7:3 Reserved  */
  67                u16 fldbcount:2;        /* 9:8 Flash Data Byte Count */
  68                u16 flockdn:6;  /* 15:10 Reserved */
  69        } hsf_ctrl;
  70        u16 regval;
  71};
  72
  73/* ICH Flash Region Access Permissions */
  74union ich8_hws_flash_regacc {
  75        struct ich8_flracc {
  76                u32 grra:8;     /* 0:7 GbE region Read Access */
  77                u32 grwa:8;     /* 8:15 GbE region Write Access */
  78                u32 gmrag:8;    /* 23:16 GbE Master Read Access Grant */
  79                u32 gmwag:8;    /* 31:24 GbE Master Write Access Grant */
  80        } hsf_flregacc;
  81        u16 regval;
  82};
  83
  84/* ICH Flash Protected Region */
  85union ich8_flash_protected_range {
  86        struct ich8_pr {
  87                u32 base:13;    /* 0:12 Protected Range Base */
  88                u32 reserved1:2;        /* 13:14 Reserved */
  89                u32 rpe:1;      /* 15 Read Protection Enable */
  90                u32 limit:13;   /* 16:28 Protected Range Limit */
  91                u32 reserved2:2;        /* 29:30 Reserved */
  92                u32 wpe:1;      /* 31 Write Protection Enable */
  93        } range;
  94        u32 regval;
  95};
  96
  97static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
  98static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
  99static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
 100static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 101                                                u32 offset, u8 byte);
 102static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
 103                                         u8 *data);
 104static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
 105                                         u16 *data);
 106static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 107                                         u8 size, u16 *data);
 108static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
 109                                           u32 *data);
 110static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
 111                                          u32 offset, u32 *data);
 112static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
 113                                            u32 offset, u32 data);
 114static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
 115                                                 u32 offset, u32 dword);
 116static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 117static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
 118static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
 119static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
 120static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
 121static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
 122static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 123static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 124static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
 125static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
 126static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
 127static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
 128static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 129static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 130static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 131static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
 132static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
 133static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
 134static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
 135static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 136static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 137static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
 138static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
 139static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
 140
 141static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 142{
 143        return readw(hw->flash_address + reg);
 144}
 145
 146static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
 147{
 148        return readl(hw->flash_address + reg);
 149}
 150
 151static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
 152{
 153        writew(val, hw->flash_address + reg);
 154}
 155
 156static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
 157{
 158        writel(val, hw->flash_address + reg);
 159}
 160
 161#define er16flash(reg)          __er16flash(hw, (reg))
 162#define er32flash(reg)          __er32flash(hw, (reg))
 163#define ew16flash(reg, val)     __ew16flash(hw, (reg), (val))
 164#define ew32flash(reg, val)     __ew32flash(hw, (reg), (val))
 165
 166/**
 167 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
 168 *  @hw: pointer to the HW structure
 169 *
 170 *  Test access to the PHY registers by reading the PHY ID registers.  If
 171 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
 172 *  otherwise assume the read PHY ID is correct if it is valid.
 173 *
 174 *  Assumes the sw/fw/hw semaphore is already acquired.
 175 **/
 176static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 177{
 178        u16 phy_reg = 0;
 179        u32 phy_id = 0;
 180        s32 ret_val = 0;
 181        u16 retry_count;
 182        u32 mac_reg = 0;
 183
 184        for (retry_count = 0; retry_count < 2; retry_count++) {
 185                ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
 186                if (ret_val || (phy_reg == 0xFFFF))
 187                        continue;
 188                phy_id = (u32)(phy_reg << 16);
 189
 190                ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
 191                if (ret_val || (phy_reg == 0xFFFF)) {
 192                        phy_id = 0;
 193                        continue;
 194                }
 195                phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
 196                break;
 197        }
 198
 199        if (hw->phy.id) {
 200                if (hw->phy.id == phy_id)
 201                        goto out;
 202        } else if (phy_id) {
 203                hw->phy.id = phy_id;
 204                hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
 205                goto out;
 206        }
 207
 208        /* In case the PHY needs to be in mdio slow mode,
 209         * set slow mode and try to get the PHY id again.
 210         */
 211        if (hw->mac.type < e1000_pch_lpt) {
 212                hw->phy.ops.release(hw);
 213                ret_val = e1000_set_mdio_slow_mode_hv(hw);
 214                if (!ret_val)
 215                        ret_val = e1000e_get_phy_id(hw);
 216                hw->phy.ops.acquire(hw);
 217        }
 218
 219        if (ret_val)
 220                return false;
 221out:
 222        if (hw->mac.type >= e1000_pch_lpt) {
 223                /* Only unforce SMBus if ME is not active */
 224                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
 225                        /* Unforce SMBus mode in PHY */
 226                        e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
 227                        phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
 228                        e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
 229
 230                        /* Unforce SMBus mode in MAC */
 231                        mac_reg = er32(CTRL_EXT);
 232                        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 233                        ew32(CTRL_EXT, mac_reg);
 234                }
 235        }
 236
 237        return true;
 238}
 239
 240/**
 241 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
 242 *  @hw: pointer to the HW structure
 243 *
 244 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
 245 *  used to reset the PHY to a quiescent state when necessary.
 246 **/
 247static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
 248{
 249        u32 mac_reg;
 250
 251        /* Set Phy Config Counter to 50msec */
 252        mac_reg = er32(FEXTNVM3);
 253        mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
 254        mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
 255        ew32(FEXTNVM3, mac_reg);
 256
 257        /* Toggle LANPHYPC Value bit */
 258        mac_reg = er32(CTRL);
 259        mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
 260        mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
 261        ew32(CTRL, mac_reg);
 262        e1e_flush();
 263        usleep_range(10, 20);
 264        mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
 265        ew32(CTRL, mac_reg);
 266        e1e_flush();
 267
 268        if (hw->mac.type < e1000_pch_lpt) {
 269                msleep(50);
 270        } else {
 271                u16 count = 20;
 272
 273                do {
 274                        usleep_range(5000, 6000);
 275                } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
 276
 277                msleep(30);
 278        }
 279}
 280
 281/**
 282 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
 283 *  @hw: pointer to the HW structure
 284 *
 285 *  Workarounds/flow necessary for PHY initialization during driver load
 286 *  and resume paths.
 287 **/
 288static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 289{
 290        struct e1000_adapter *adapter = hw->adapter;
 291        u32 mac_reg, fwsm = er32(FWSM);
 292        s32 ret_val;
 293
 294        /* Gate automatic PHY configuration by hardware on managed and
 295         * non-managed 82579 and newer adapters.
 296         */
 297        e1000_gate_hw_phy_config_ich8lan(hw, true);
 298
 299        /* It is not possible to be certain of the current state of ULP
 300         * so forcibly disable it.
 301         */
 302        hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
 303        ret_val = e1000_disable_ulp_lpt_lp(hw, true);
 304        if (ret_val)
 305                e_warn("Failed to disable ULP\n");
 306
 307        ret_val = hw->phy.ops.acquire(hw);
 308        if (ret_val) {
 309                e_dbg("Failed to initialize PHY flow\n");
 310                goto out;
 311        }
 312
 313        /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
 314         * inaccessible and resetting the PHY is not blocked, toggle the
 315         * LANPHYPC Value bit to force the interconnect to PCIe mode.
 316         */
 317        switch (hw->mac.type) {
 318        case e1000_pch_lpt:
 319        case e1000_pch_spt:
 320        case e1000_pch_cnp:
 321        case e1000_pch_tgp:
 322        case e1000_pch_adp:
 323        case e1000_pch_mtp:
 324                if (e1000_phy_is_accessible_pchlan(hw))
 325                        break;
 326
 327                /* Before toggling LANPHYPC, see if PHY is accessible by
 328                 * forcing MAC to SMBus mode first.
 329                 */
 330                mac_reg = er32(CTRL_EXT);
 331                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
 332                ew32(CTRL_EXT, mac_reg);
 333
 334                /* Wait 50 milliseconds for MAC to finish any retries
 335                 * that it might be trying to perform from previous
 336                 * attempts to acknowledge any phy read requests.
 337                 */
 338                msleep(50);
 339
 340                fallthrough;
 341        case e1000_pch2lan:
 342                if (e1000_phy_is_accessible_pchlan(hw))
 343                        break;
 344
 345                fallthrough;
 346        case e1000_pchlan:
 347                if ((hw->mac.type == e1000_pchlan) &&
 348                    (fwsm & E1000_ICH_FWSM_FW_VALID))
 349                        break;
 350
 351                if (hw->phy.ops.check_reset_block(hw)) {
 352                        e_dbg("Required LANPHYPC toggle blocked by ME\n");
 353                        ret_val = -E1000_ERR_PHY;
 354                        break;
 355                }
 356
 357                /* Toggle LANPHYPC Value bit */
 358                e1000_toggle_lanphypc_pch_lpt(hw);
 359                if (hw->mac.type >= e1000_pch_lpt) {
 360                        if (e1000_phy_is_accessible_pchlan(hw))
 361                                break;
 362
 363                        /* Toggling LANPHYPC brings the PHY out of SMBus mode
 364                         * so ensure that the MAC is also out of SMBus mode
 365                         */
 366                        mac_reg = er32(CTRL_EXT);
 367                        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 368                        ew32(CTRL_EXT, mac_reg);
 369
 370                        if (e1000_phy_is_accessible_pchlan(hw))
 371                                break;
 372
 373                        ret_val = -E1000_ERR_PHY;
 374                }
 375                break;
 376        default:
 377                break;
 378        }
 379
 380        hw->phy.ops.release(hw);
 381        if (!ret_val) {
 382
 383                /* Check to see if able to reset PHY.  Print error if not */
 384                if (hw->phy.ops.check_reset_block(hw)) {
 385                        e_err("Reset blocked by ME\n");
 386                        goto out;
 387                }
 388
 389                /* Reset the PHY before any access to it.  Doing so, ensures
 390                 * that the PHY is in a known good state before we read/write
 391                 * PHY registers.  The generic reset is sufficient here,
 392                 * because we haven't determined the PHY type yet.
 393                 */
 394                ret_val = e1000e_phy_hw_reset_generic(hw);
 395                if (ret_val)
 396                        goto out;
 397
 398                /* On a successful reset, possibly need to wait for the PHY
 399                 * to quiesce to an accessible state before returning control
 400                 * to the calling function.  If the PHY does not quiesce, then
 401                 * return E1000E_BLK_PHY_RESET, as this is the condition that
 402                 *  the PHY is in.
 403                 */
 404                ret_val = hw->phy.ops.check_reset_block(hw);
 405                if (ret_val)
 406                        e_err("ME blocked access to PHY after reset\n");
 407        }
 408
 409out:
 410        /* Ungate automatic PHY configuration on non-managed 82579 */
 411        if ((hw->mac.type == e1000_pch2lan) &&
 412            !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
 413                usleep_range(10000, 11000);
 414                e1000_gate_hw_phy_config_ich8lan(hw, false);
 415        }
 416
 417        return ret_val;
 418}
 419
 420/**
 421 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
 422 *  @hw: pointer to the HW structure
 423 *
 424 *  Initialize family-specific PHY parameters and function pointers.
 425 **/
 426static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 427{
 428        struct e1000_phy_info *phy = &hw->phy;
 429        s32 ret_val;
 430
 431        phy->addr = 1;
 432        phy->reset_delay_us = 100;
 433
 434        phy->ops.set_page = e1000_set_page_igp;
 435        phy->ops.read_reg = e1000_read_phy_reg_hv;
 436        phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
 437        phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
 438        phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
 439        phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
 440        phy->ops.write_reg = e1000_write_phy_reg_hv;
 441        phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
 442        phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
 443        phy->ops.power_up = e1000_power_up_phy_copper;
 444        phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 445        phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 446
 447        phy->id = e1000_phy_unknown;
 448
 449        ret_val = e1000_init_phy_workarounds_pchlan(hw);
 450        if (ret_val)
 451                return ret_val;
 452
 453        if (phy->id == e1000_phy_unknown)
 454                switch (hw->mac.type) {
 455                default:
 456                        ret_val = e1000e_get_phy_id(hw);
 457                        if (ret_val)
 458                                return ret_val;
 459                        if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
 460                                break;
 461                        fallthrough;
 462                case e1000_pch2lan:
 463                case e1000_pch_lpt:
 464                case e1000_pch_spt:
 465                case e1000_pch_cnp:
 466                case e1000_pch_tgp:
 467                case e1000_pch_adp:
 468                case e1000_pch_mtp:
 469                        /* In case the PHY needs to be in mdio slow mode,
 470                         * set slow mode and try to get the PHY id again.
 471                         */
 472                        ret_val = e1000_set_mdio_slow_mode_hv(hw);
 473                        if (ret_val)
 474                                return ret_val;
 475                        ret_val = e1000e_get_phy_id(hw);
 476                        if (ret_val)
 477                                return ret_val;
 478                        break;
 479                }
 480        phy->type = e1000e_get_phy_type_from_id(phy->id);
 481
 482        switch (phy->type) {
 483        case e1000_phy_82577:
 484        case e1000_phy_82579:
 485        case e1000_phy_i217:
 486                phy->ops.check_polarity = e1000_check_polarity_82577;
 487                phy->ops.force_speed_duplex =
 488                    e1000_phy_force_speed_duplex_82577;
 489                phy->ops.get_cable_length = e1000_get_cable_length_82577;
 490                phy->ops.get_info = e1000_get_phy_info_82577;
 491                phy->ops.commit = e1000e_phy_sw_reset;
 492                break;
 493        case e1000_phy_82578:
 494                phy->ops.check_polarity = e1000_check_polarity_m88;
 495                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 496                phy->ops.get_cable_length = e1000e_get_cable_length_m88;
 497                phy->ops.get_info = e1000e_get_phy_info_m88;
 498                break;
 499        default:
 500                ret_val = -E1000_ERR_PHY;
 501                break;
 502        }
 503
 504        return ret_val;
 505}
 506
 507/**
 508 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
 509 *  @hw: pointer to the HW structure
 510 *
 511 *  Initialize family-specific PHY parameters and function pointers.
 512 **/
 513static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
 514{
 515        struct e1000_phy_info *phy = &hw->phy;
 516        s32 ret_val;
 517        u16 i = 0;
 518
 519        phy->addr = 1;
 520        phy->reset_delay_us = 100;
 521
 522        phy->ops.power_up = e1000_power_up_phy_copper;
 523        phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 524
 525        /* We may need to do this twice - once for IGP and if that fails,
 526         * we'll set BM func pointers and try again
 527         */
 528        ret_val = e1000e_determine_phy_address(hw);
 529        if (ret_val) {
 530                phy->ops.write_reg = e1000e_write_phy_reg_bm;
 531                phy->ops.read_reg = e1000e_read_phy_reg_bm;
 532                ret_val = e1000e_determine_phy_address(hw);
 533                if (ret_val) {
 534                        e_dbg("Cannot determine PHY addr. Erroring out\n");
 535                        return ret_val;
 536                }
 537        }
 538
 539        phy->id = 0;
 540        while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
 541               (i++ < 100)) {
 542                usleep_range(1000, 1100);
 543                ret_val = e1000e_get_phy_id(hw);
 544                if (ret_val)
 545                        return ret_val;
 546        }
 547
 548        /* Verify phy id */
 549        switch (phy->id) {
 550        case IGP03E1000_E_PHY_ID:
 551                phy->type = e1000_phy_igp_3;
 552                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 553                phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
 554                phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
 555                phy->ops.get_info = e1000e_get_phy_info_igp;
 556                phy->ops.check_polarity = e1000_check_polarity_igp;
 557                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
 558                break;
 559        case IFE_E_PHY_ID:
 560        case IFE_PLUS_E_PHY_ID:
 561        case IFE_C_E_PHY_ID:
 562                phy->type = e1000_phy_ife;
 563                phy->autoneg_mask = E1000_ALL_NOT_GIG;
 564                phy->ops.get_info = e1000_get_phy_info_ife;
 565                phy->ops.check_polarity = e1000_check_polarity_ife;
 566                phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
 567                break;
 568        case BME1000_E_PHY_ID:
 569                phy->type = e1000_phy_bm;
 570                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 571                phy->ops.read_reg = e1000e_read_phy_reg_bm;
 572                phy->ops.write_reg = e1000e_write_phy_reg_bm;
 573                phy->ops.commit = e1000e_phy_sw_reset;
 574                phy->ops.get_info = e1000e_get_phy_info_m88;
 575                phy->ops.check_polarity = e1000_check_polarity_m88;
 576                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 577                break;
 578        default:
 579                return -E1000_ERR_PHY;
 580        }
 581
 582        return 0;
 583}
 584
 585/**
 586 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
 587 *  @hw: pointer to the HW structure
 588 *
 589 *  Initialize family-specific NVM parameters and function
 590 *  pointers.
 591 **/
 592static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 593{
 594        struct e1000_nvm_info *nvm = &hw->nvm;
 595        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 596        u32 gfpreg, sector_base_addr, sector_end_addr;
 597        u16 i;
 598        u32 nvm_size;
 599
 600        nvm->type = e1000_nvm_flash_sw;
 601
 602        if (hw->mac.type >= e1000_pch_spt) {
 603                /* in SPT, gfpreg doesn't exist. NVM size is taken from the
 604                 * STRAP register. This is because in SPT the GbE Flash region
 605                 * is no longer accessed through the flash registers. Instead,
 606                 * the mechanism has changed, and the Flash region access
 607                 * registers are now implemented in GbE memory space.
 608                 */
 609                nvm->flash_base_addr = 0;
 610                nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
 611                    * NVM_SIZE_MULTIPLIER;
 612                nvm->flash_bank_size = nvm_size / 2;
 613                /* Adjust to word count */
 614                nvm->flash_bank_size /= sizeof(u16);
 615                /* Set the base address for flash register access */
 616                hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
 617        } else {
 618                /* Can't read flash registers if register set isn't mapped. */
 619                if (!hw->flash_address) {
 620                        e_dbg("ERROR: Flash registers not mapped\n");
 621                        return -E1000_ERR_CONFIG;
 622                }
 623
 624                gfpreg = er32flash(ICH_FLASH_GFPREG);
 625
 626                /* sector_X_addr is a "sector"-aligned address (4096 bytes)
 627                 * Add 1 to sector_end_addr since this sector is included in
 628                 * the overall size.
 629                 */
 630                sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
 631                sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
 632
 633                /* flash_base_addr is byte-aligned */
 634                nvm->flash_base_addr = sector_base_addr
 635                    << FLASH_SECTOR_ADDR_SHIFT;
 636
 637                /* find total size of the NVM, then cut in half since the total
 638                 * size represents two separate NVM banks.
 639                 */
 640                nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
 641                                        << FLASH_SECTOR_ADDR_SHIFT);
 642                nvm->flash_bank_size /= 2;
 643                /* Adjust to word count */
 644                nvm->flash_bank_size /= sizeof(u16);
 645        }
 646
 647        nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
 648
 649        /* Clear shadow ram */
 650        for (i = 0; i < nvm->word_size; i++) {
 651                dev_spec->shadow_ram[i].modified = false;
 652                dev_spec->shadow_ram[i].value = 0xFFFF;
 653        }
 654
 655        return 0;
 656}
 657
 658/**
 659 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
 660 *  @hw: pointer to the HW structure
 661 *
 662 *  Initialize family-specific MAC parameters and function
 663 *  pointers.
 664 **/
 665static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
 666{
 667        struct e1000_mac_info *mac = &hw->mac;
 668
 669        /* Set media type function pointer */
 670        hw->phy.media_type = e1000_media_type_copper;
 671
 672        /* Set mta register count */
 673        mac->mta_reg_count = 32;
 674        /* Set rar entry count */
 675        mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
 676        if (mac->type == e1000_ich8lan)
 677                mac->rar_entry_count--;
 678        /* FWSM register */
 679        mac->has_fwsm = true;
 680        /* ARC subsystem not supported */
 681        mac->arc_subsystem_valid = false;
 682        /* Adaptive IFS supported */
 683        mac->adaptive_ifs = true;
 684
 685        /* LED and other operations */
 686        switch (mac->type) {
 687        case e1000_ich8lan:
 688        case e1000_ich9lan:
 689        case e1000_ich10lan:
 690                /* check management mode */
 691                mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
 692                /* ID LED init */
 693                mac->ops.id_led_init = e1000e_id_led_init_generic;
 694                /* blink LED */
 695                mac->ops.blink_led = e1000e_blink_led_generic;
 696                /* setup LED */
 697                mac->ops.setup_led = e1000e_setup_led_generic;
 698                /* cleanup LED */
 699                mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
 700                /* turn on/off LED */
 701                mac->ops.led_on = e1000_led_on_ich8lan;
 702                mac->ops.led_off = e1000_led_off_ich8lan;
 703                break;
 704        case e1000_pch2lan:
 705                mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
 706                mac->ops.rar_set = e1000_rar_set_pch2lan;
 707                fallthrough;
 708        case e1000_pch_lpt:
 709        case e1000_pch_spt:
 710        case e1000_pch_cnp:
 711        case e1000_pch_tgp:
 712        case e1000_pch_adp:
 713        case e1000_pch_mtp:
 714        case e1000_pchlan:
 715                /* check management mode */
 716                mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
 717                /* ID LED init */
 718                mac->ops.id_led_init = e1000_id_led_init_pchlan;
 719                /* setup LED */
 720                mac->ops.setup_led = e1000_setup_led_pchlan;
 721                /* cleanup LED */
 722                mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
 723                /* turn on/off LED */
 724                mac->ops.led_on = e1000_led_on_pchlan;
 725                mac->ops.led_off = e1000_led_off_pchlan;
 726                break;
 727        default:
 728                break;
 729        }
 730
 731        if (mac->type >= e1000_pch_lpt) {
 732                mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
 733                mac->ops.rar_set = e1000_rar_set_pch_lpt;
 734                mac->ops.setup_physical_interface =
 735                    e1000_setup_copper_link_pch_lpt;
 736                mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
 737        }
 738
 739        /* Enable PCS Lock-loss workaround for ICH8 */
 740        if (mac->type == e1000_ich8lan)
 741                e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 742
 743        return 0;
 744}
 745
 746/**
 747 *  __e1000_access_emi_reg_locked - Read/write EMI register
 748 *  @hw: pointer to the HW structure
 749 *  @address: EMI address to program
 750 *  @data: pointer to value to read/write from/to the EMI address
 751 *  @read: boolean flag to indicate read or write
 752 *
 753 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
 754 **/
 755static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
 756                                         u16 *data, bool read)
 757{
 758        s32 ret_val;
 759
 760        ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
 761        if (ret_val)
 762                return ret_val;
 763
 764        if (read)
 765                ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
 766        else
 767                ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
 768
 769        return ret_val;
 770}
 771
 772/**
 773 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
 774 *  @hw: pointer to the HW structure
 775 *  @addr: EMI address to program
 776 *  @data: value to be read from the EMI address
 777 *
 778 *  Assumes the SW/FW/HW Semaphore is already acquired.
 779 **/
 780s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
 781{
 782        return __e1000_access_emi_reg_locked(hw, addr, data, true);
 783}
 784
 785/**
 786 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
 787 *  @hw: pointer to the HW structure
 788 *  @addr: EMI address to program
 789 *  @data: value to be written to the EMI address
 790 *
 791 *  Assumes the SW/FW/HW Semaphore is already acquired.
 792 **/
 793s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
 794{
 795        return __e1000_access_emi_reg_locked(hw, addr, &data, false);
 796}
 797
 798/**
 799 *  e1000_set_eee_pchlan - Enable/disable EEE support
 800 *  @hw: pointer to the HW structure
 801 *
 802 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
 803 *  the link and the EEE capabilities of the link partner.  The LPI Control
 804 *  register bits will remain set only if/when link is up.
 805 *
 806 *  EEE LPI must not be asserted earlier than one second after link is up.
 807 *  On 82579, EEE LPI should not be enabled until such time otherwise there
 808 *  can be link issues with some switches.  Other devices can have EEE LPI
 809 *  enabled immediately upon link up since they have a timer in hardware which
 810 *  prevents LPI from being asserted too early.
 811 **/
 812s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 813{
 814        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 815        s32 ret_val;
 816        u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
 817
 818        switch (hw->phy.type) {
 819        case e1000_phy_82579:
 820                lpa = I82579_EEE_LP_ABILITY;
 821                pcs_status = I82579_EEE_PCS_STATUS;
 822                adv_addr = I82579_EEE_ADVERTISEMENT;
 823                break;
 824        case e1000_phy_i217:
 825                lpa = I217_EEE_LP_ABILITY;
 826                pcs_status = I217_EEE_PCS_STATUS;
 827                adv_addr = I217_EEE_ADVERTISEMENT;
 828                break;
 829        default:
 830                return 0;
 831        }
 832
 833        ret_val = hw->phy.ops.acquire(hw);
 834        if (ret_val)
 835                return ret_val;
 836
 837        ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
 838        if (ret_val)
 839                goto release;
 840
 841        /* Clear bits that enable EEE in various speeds */
 842        lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
 843
 844        /* Enable EEE if not disabled by user */
 845        if (!dev_spec->eee_disable) {
 846                /* Save off link partner's EEE ability */
 847                ret_val = e1000_read_emi_reg_locked(hw, lpa,
 848                                                    &dev_spec->eee_lp_ability);
 849                if (ret_val)
 850                        goto release;
 851
 852                /* Read EEE advertisement */
 853                ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
 854                if (ret_val)
 855                        goto release;
 856
 857                /* Enable EEE only for speeds in which the link partner is
 858                 * EEE capable and for which we advertise EEE.
 859                 */
 860                if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
 861                        lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
 862
 863                if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
 864                        e1e_rphy_locked(hw, MII_LPA, &data);
 865                        if (data & LPA_100FULL)
 866                                lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
 867                        else
 868                                /* EEE is not supported in 100Half, so ignore
 869                                 * partner's EEE in 100 ability if full-duplex
 870                                 * is not advertised.
 871                                 */
 872                                dev_spec->eee_lp_ability &=
 873                                    ~I82579_EEE_100_SUPPORTED;
 874                }
 875        }
 876
 877        if (hw->phy.type == e1000_phy_82579) {
 878                ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 879                                                    &data);
 880                if (ret_val)
 881                        goto release;
 882
 883                data &= ~I82579_LPI_100_PLL_SHUT;
 884                ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 885                                                     data);
 886        }
 887
 888        /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
 889        ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
 890        if (ret_val)
 891                goto release;
 892
 893        ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
 894release:
 895        hw->phy.ops.release(hw);
 896
 897        return ret_val;
 898}
 899
 900/**
 901 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
 902 *  @hw:   pointer to the HW structure
 903 *  @link: link up bool flag
 904 *
 905 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
 906 *  preventing further DMA write requests.  Workaround the issue by disabling
 907 *  the de-assertion of the clock request when in 1Gpbs mode.
 908 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
 909 *  speeds in order to avoid Tx hangs.
 910 **/
 911static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
 912{
 913        u32 fextnvm6 = er32(FEXTNVM6);
 914        u32 status = er32(STATUS);
 915        s32 ret_val = 0;
 916        u16 reg;
 917
 918        if (link && (status & E1000_STATUS_SPEED_1000)) {
 919                ret_val = hw->phy.ops.acquire(hw);
 920                if (ret_val)
 921                        return ret_val;
 922
 923                ret_val =
 924                    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
 925                                                &reg);
 926                if (ret_val)
 927                        goto release;
 928
 929                ret_val =
 930                    e1000e_write_kmrn_reg_locked(hw,
 931                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
 932                                                 reg &
 933                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
 934                if (ret_val)
 935                        goto release;
 936
 937                usleep_range(10, 20);
 938
 939                ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
 940
 941                ret_val =
 942                    e1000e_write_kmrn_reg_locked(hw,
 943                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
 944                                                 reg);
 945release:
 946                hw->phy.ops.release(hw);
 947        } else {
 948                /* clear FEXTNVM6 bit 8 on link down or 10/100 */
 949                fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
 950
 951                if ((hw->phy.revision > 5) || !link ||
 952                    ((status & E1000_STATUS_SPEED_100) &&
 953                     (status & E1000_STATUS_FD)))
 954                        goto update_fextnvm6;
 955
 956                ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
 957                if (ret_val)
 958                        return ret_val;
 959
 960                /* Clear link status transmit timeout */
 961                reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
 962
 963                if (status & E1000_STATUS_SPEED_100) {
 964                        /* Set inband Tx timeout to 5x10us for 100Half */
 965                        reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 966
 967                        /* Do not extend the K1 entry latency for 100Half */
 968                        fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 969                } else {
 970                        /* Set inband Tx timeout to 50x10us for 10Full/Half */
 971                        reg |= 50 <<
 972                            I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 973
 974                        /* Extend the K1 entry latency for 10 Mbps */
 975                        fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 976                }
 977
 978                ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
 979                if (ret_val)
 980                        return ret_val;
 981
 982update_fextnvm6:
 983                ew32(FEXTNVM6, fextnvm6);
 984        }
 985
 986        return ret_val;
 987}
 988
 989/**
 990 *  e1000_platform_pm_pch_lpt - Set platform power management values
 991 *  @hw: pointer to the HW structure
 992 *  @link: bool indicating link status
 993 *
 994 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
 995 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
 996 *  when link is up (which must not exceed the maximum latency supported
 997 *  by the platform), otherwise specify there is no LTR requirement.
 998 *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
 999 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1000 *  Capability register set, on this device LTR is set by writing the
1001 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1002 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1003 *  message to the PMC.
1004 **/
1005static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1006{
1007        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1008            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1009        u16 lat_enc = 0;        /* latency encoded */
1010
1011        if (link) {
1012                u16 speed, duplex, scale = 0;
1013                u16 max_snoop, max_nosnoop;
1014                u16 max_ltr_enc;        /* max LTR latency encoded */
1015                u64 value;
1016                u32 rxa;
1017
1018                if (!hw->adapter->max_frame_size) {
1019                        e_dbg("max_frame_size not set.\n");
1020                        return -E1000_ERR_CONFIG;
1021                }
1022
1023                hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1024                if (!speed) {
1025                        e_dbg("Speed not set.\n");
1026                        return -E1000_ERR_CONFIG;
1027                }
1028
1029                /* Rx Packet Buffer Allocation size (KB) */
1030                rxa = er32(PBA) & E1000_PBA_RXA_MASK;
1031
1032                /* Determine the maximum latency tolerated by the device.
1033                 *
1034                 * Per the PCIe spec, the tolerated latencies are encoded as
1035                 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1036                 * a 10-bit value (0-1023) to provide a range from 1 ns to
1037                 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1038                 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1039                 */
1040                rxa *= 512;
1041                value = (rxa > hw->adapter->max_frame_size) ?
1042                        (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
1043                        0;
1044
1045                while (value > PCI_LTR_VALUE_MASK) {
1046                        scale++;
1047                        value = DIV_ROUND_UP(value, BIT(5));
1048                }
1049                if (scale > E1000_LTRV_SCALE_MAX) {
1050                        e_dbg("Invalid LTR latency scale %d\n", scale);
1051                        return -E1000_ERR_CONFIG;
1052                }
1053                lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
1054
1055                /* Determine the maximum latency tolerated by the platform */
1056                pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
1057                                     &max_snoop);
1058                pci_read_config_word(hw->adapter->pdev,
1059                                     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1060                max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
1061
1062                if (lat_enc > max_ltr_enc)
1063                        lat_enc = max_ltr_enc;
1064        }
1065
1066        /* Set Snoop and No-Snoop latencies the same */
1067        reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1068        ew32(LTRV, reg);
1069
1070        return 0;
1071}
1072
1073/**
1074 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1075 *  @hw: pointer to the HW structure
1076 *  @to_sx: boolean indicating a system power state transition to Sx
1077 *
1078 *  When link is down, configure ULP mode to significantly reduce the power
1079 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1080 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1081 *  system, configure the ULP mode by software.
1082 */
1083s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1084{
1085        u32 mac_reg;
1086        s32 ret_val = 0;
1087        u16 phy_reg;
1088        u16 oem_reg = 0;
1089
1090        if ((hw->mac.type < e1000_pch_lpt) ||
1091            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1092            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1093            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1094            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1095            (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1096                return 0;
1097
1098        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1099                /* Request ME configure ULP mode in the PHY */
1100                mac_reg = er32(H2ME);
1101                mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1102                ew32(H2ME, mac_reg);
1103
1104                goto out;
1105        }
1106
1107        if (!to_sx) {
1108                int i = 0;
1109
1110                /* Poll up to 5 seconds for Cable Disconnected indication */
1111                while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1112                        /* Bail if link is re-acquired */
1113                        if (er32(STATUS) & E1000_STATUS_LU)
1114                                return -E1000_ERR_PHY;
1115
1116                        if (i++ == 100)
1117                                break;
1118
1119                        msleep(50);
1120                }
1121                e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1122                      (er32(FEXT) &
1123                       E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1124        }
1125
1126        ret_val = hw->phy.ops.acquire(hw);
1127        if (ret_val)
1128                goto out;
1129
1130        /* Force SMBus mode in PHY */
1131        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1132        if (ret_val)
1133                goto release;
1134        phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1135        e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1136
1137        /* Force SMBus mode in MAC */
1138        mac_reg = er32(CTRL_EXT);
1139        mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1140        ew32(CTRL_EXT, mac_reg);
1141
1142        /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1143         * LPLU and disable Gig speed when entering ULP
1144         */
1145        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1146                ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1147                                                       &oem_reg);
1148                if (ret_val)
1149                        goto release;
1150
1151                phy_reg = oem_reg;
1152                phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1153
1154                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1155                                                        phy_reg);
1156
1157                if (ret_val)
1158                        goto release;
1159        }
1160
1161        /* Set Inband ULP Exit, Reset to SMBus mode and
1162         * Disable SMBus Release on PERST# in PHY
1163         */
1164        ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1165        if (ret_val)
1166                goto release;
1167        phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1168                    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1169        if (to_sx) {
1170                if (er32(WUFC) & E1000_WUFC_LNKC)
1171                        phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1172                else
1173                        phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1174
1175                phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1176                phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1177        } else {
1178                phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1179                phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1180                phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1181        }
1182        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1183
1184        /* Set Disable SMBus Release on PERST# in MAC */
1185        mac_reg = er32(FEXTNVM7);
1186        mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1187        ew32(FEXTNVM7, mac_reg);
1188
1189        /* Commit ULP changes in PHY by starting auto ULP configuration */
1190        phy_reg |= I218_ULP_CONFIG1_START;
1191        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1192
1193        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1194            to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
1195                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1196                                                        oem_reg);
1197                if (ret_val)
1198                        goto release;
1199        }
1200
1201release:
1202        hw->phy.ops.release(hw);
1203out:
1204        if (ret_val)
1205                e_dbg("Error in ULP enable flow: %d\n", ret_val);
1206        else
1207                hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1208
1209        return ret_val;
1210}
1211
1212/**
1213 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1214 *  @hw: pointer to the HW structure
1215 *  @force: boolean indicating whether or not to force disabling ULP
1216 *
1217 *  Un-configure ULP mode when link is up, the system is transitioned from
1218 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1219 *  system, poll for an indication from ME that ULP has been un-configured.
1220 *  If not on an ME enabled system, un-configure the ULP mode by software.
1221 *
1222 *  During nominal operation, this function is called when link is acquired
1223 *  to disable ULP mode (force=false); otherwise, for example when unloading
1224 *  the driver or during Sx->S0 transitions, this is called with force=true
1225 *  to forcibly disable ULP.
1226 */
1227static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1228{
1229        s32 ret_val = 0;
1230        u32 mac_reg;
1231        u16 phy_reg;
1232        int i = 0;
1233
1234        if ((hw->mac.type < e1000_pch_lpt) ||
1235            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1236            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1237            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1238            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1239            (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1240                return 0;
1241
1242        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1243                struct e1000_adapter *adapter = hw->adapter;
1244                bool firmware_bug = false;
1245
1246                if (force) {
1247                        /* Request ME un-configure ULP mode in the PHY */
1248                        mac_reg = er32(H2ME);
1249                        mac_reg &= ~E1000_H2ME_ULP;
1250                        mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1251                        ew32(H2ME, mac_reg);
1252                }
1253
1254                /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
1255                 * If this takes more than 1 second, show a warning indicating a
1256                 * firmware bug
1257                 */
1258                while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1259                        if (i++ == 250) {
1260                                ret_val = -E1000_ERR_PHY;
1261                                goto out;
1262                        }
1263                        if (i > 100 && !firmware_bug)
1264                                firmware_bug = true;
1265
1266                        usleep_range(10000, 11000);
1267                }
1268                if (firmware_bug)
1269                        e_warn("ULP_CONFIG_DONE took %dmsec.  This is a firmware bug\n", i * 10);
1270                else
1271                        e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1272
1273                if (force) {
1274                        mac_reg = er32(H2ME);
1275                        mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1276                        ew32(H2ME, mac_reg);
1277                } else {
1278                        /* Clear H2ME.ULP after ME ULP configuration */
1279                        mac_reg = er32(H2ME);
1280                        mac_reg &= ~E1000_H2ME_ULP;
1281                        ew32(H2ME, mac_reg);
1282                }
1283
1284                goto out;
1285        }
1286
1287        ret_val = hw->phy.ops.acquire(hw);
1288        if (ret_val)
1289                goto out;
1290
1291        if (force)
1292                /* Toggle LANPHYPC Value bit */
1293                e1000_toggle_lanphypc_pch_lpt(hw);
1294
1295        /* Unforce SMBus mode in PHY */
1296        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1297        if (ret_val) {
1298                /* The MAC might be in PCIe mode, so temporarily force to
1299                 * SMBus mode in order to access the PHY.
1300                 */
1301                mac_reg = er32(CTRL_EXT);
1302                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1303                ew32(CTRL_EXT, mac_reg);
1304
1305                msleep(50);
1306
1307                ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1308                                                       &phy_reg);
1309                if (ret_val)
1310                        goto release;
1311        }
1312        phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1313        e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1314
1315        /* Unforce SMBus mode in MAC */
1316        mac_reg = er32(CTRL_EXT);
1317        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1318        ew32(CTRL_EXT, mac_reg);
1319
1320        /* When ULP mode was previously entered, K1 was disabled by the
1321         * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1322         */
1323        ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1324        if (ret_val)
1325                goto release;
1326        phy_reg |= HV_PM_CTRL_K1_ENABLE;
1327        e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1328
1329        /* Clear ULP enabled configuration */
1330        ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1331        if (ret_val)
1332                goto release;
1333        phy_reg &= ~(I218_ULP_CONFIG1_IND |
1334                     I218_ULP_CONFIG1_STICKY_ULP |
1335                     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1336                     I218_ULP_CONFIG1_WOL_HOST |
1337                     I218_ULP_CONFIG1_INBAND_EXIT |
1338                     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1339                     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1340                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1341        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1342
1343        /* Commit ULP changes by starting auto ULP configuration */
1344        phy_reg |= I218_ULP_CONFIG1_START;
1345        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1346
1347        /* Clear Disable SMBus Release on PERST# in MAC */
1348        mac_reg = er32(FEXTNVM7);
1349        mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1350        ew32(FEXTNVM7, mac_reg);
1351
1352release:
1353        hw->phy.ops.release(hw);
1354        if (force) {
1355                e1000_phy_hw_reset(hw);
1356                msleep(50);
1357        }
1358out:
1359        if (ret_val)
1360                e_dbg("Error in ULP disable flow: %d\n", ret_val);
1361        else
1362                hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1363
1364        return ret_val;
1365}
1366
1367/**
1368 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1369 *  @hw: pointer to the HW structure
1370 *
1371 *  Checks to see of the link status of the hardware has changed.  If a
1372 *  change in link status has been detected, then we read the PHY registers
1373 *  to get the current speed/duplex if link exists.
1374 **/
1375static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1376{
1377        struct e1000_mac_info *mac = &hw->mac;
1378        s32 ret_val, tipg_reg = 0;
1379        u16 emi_addr, emi_val = 0;
1380        bool link;
1381        u16 phy_reg;
1382
1383        /* We only want to go out to the PHY registers to see if Auto-Neg
1384         * has completed and/or if our link status has changed.  The
1385         * get_link_status flag is set upon receiving a Link Status
1386         * Change or Rx Sequence Error interrupt.
1387         */
1388        if (!mac->get_link_status)
1389                return 0;
1390        mac->get_link_status = false;
1391
1392        /* First we want to see if the MII Status Register reports
1393         * link.  If so, then we want to get the current speed/duplex
1394         * of the PHY.
1395         */
1396        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1397        if (ret_val)
1398                goto out;
1399
1400        if (hw->mac.type == e1000_pchlan) {
1401                ret_val = e1000_k1_gig_workaround_hv(hw, link);
1402                if (ret_val)
1403                        goto out;
1404        }
1405
1406        /* When connected at 10Mbps half-duplex, some parts are excessively
1407         * aggressive resulting in many collisions. To avoid this, increase
1408         * the IPG and reduce Rx latency in the PHY.
1409         */
1410        if ((hw->mac.type >= e1000_pch2lan) && link) {
1411                u16 speed, duplex;
1412
1413                e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
1414                tipg_reg = er32(TIPG);
1415                tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1416
1417                if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1418                        tipg_reg |= 0xFF;
1419                        /* Reduce Rx latency in analog PHY */
1420                        emi_val = 0;
1421                } else if (hw->mac.type >= e1000_pch_spt &&
1422                           duplex == FULL_DUPLEX && speed != SPEED_1000) {
1423                        tipg_reg |= 0xC;
1424                        emi_val = 1;
1425                } else {
1426
1427                        /* Roll back the default values */
1428                        tipg_reg |= 0x08;
1429                        emi_val = 1;
1430                }
1431
1432                ew32(TIPG, tipg_reg);
1433
1434                ret_val = hw->phy.ops.acquire(hw);
1435                if (ret_val)
1436                        goto out;
1437
1438                if (hw->mac.type == e1000_pch2lan)
1439                        emi_addr = I82579_RX_CONFIG;
1440                else
1441                        emi_addr = I217_RX_CONFIG;
1442                ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1443
1444                if (hw->mac.type >= e1000_pch_lpt) {
1445                        u16 phy_reg;
1446
1447                        e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
1448                        phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1449                        if (speed == SPEED_100 || speed == SPEED_10)
1450                                phy_reg |= 0x3E8;
1451                        else
1452                                phy_reg |= 0xFA;
1453                        e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
1454
1455                        if (speed == SPEED_1000) {
1456                                hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1457                                                            &phy_reg);
1458
1459                                phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1460
1461                                hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1462                                                             phy_reg);
1463                        }
1464                }
1465                hw->phy.ops.release(hw);
1466
1467                if (ret_val)
1468                        goto out;
1469
1470                if (hw->mac.type >= e1000_pch_spt) {
1471                        u16 data;
1472                        u16 ptr_gap;
1473
1474                        if (speed == SPEED_1000) {
1475                                ret_val = hw->phy.ops.acquire(hw);
1476                                if (ret_val)
1477                                        goto out;
1478
1479                                ret_val = e1e_rphy_locked(hw,
1480                                                          PHY_REG(776, 20),
1481                                                          &data);
1482                                if (ret_val) {
1483                                        hw->phy.ops.release(hw);
1484                                        goto out;
1485                                }
1486
1487                                ptr_gap = (data & (0x3FF << 2)) >> 2;
1488                                if (ptr_gap < 0x18) {
1489                                        data &= ~(0x3FF << 2);
1490                                        data |= (0x18 << 2);
1491                                        ret_val =
1492                                            e1e_wphy_locked(hw,
1493                                                            PHY_REG(776, 20),
1494                                                            data);
1495                                }
1496                                hw->phy.ops.release(hw);
1497                                if (ret_val)
1498                                        goto out;
1499                        } else {
1500                                ret_val = hw->phy.ops.acquire(hw);
1501                                if (ret_val)
1502                                        goto out;
1503
1504                                ret_val = e1e_wphy_locked(hw,
1505                                                          PHY_REG(776, 20),
1506                                                          0xC023);
1507                                hw->phy.ops.release(hw);
1508                                if (ret_val)
1509                                        goto out;
1510
1511                        }
1512                }
1513        }
1514
1515        /* I217 Packet Loss issue:
1516         * ensure that FEXTNVM4 Beacon Duration is set correctly
1517         * on power up.
1518         * Set the Beacon Duration for I217 to 8 usec
1519         */
1520        if (hw->mac.type >= e1000_pch_lpt) {
1521                u32 mac_reg;
1522
1523                mac_reg = er32(FEXTNVM4);
1524                mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1525                mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1526                ew32(FEXTNVM4, mac_reg);
1527        }
1528
1529        /* Work-around I218 hang issue */
1530        if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1531            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1532            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1533            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1534                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1535                if (ret_val)
1536                        goto out;
1537        }
1538        if (hw->mac.type >= e1000_pch_lpt) {
1539                /* Set platform power management values for
1540                 * Latency Tolerance Reporting (LTR)
1541                 */
1542                ret_val = e1000_platform_pm_pch_lpt(hw, link);
1543                if (ret_val)
1544                        goto out;
1545        }
1546
1547        /* Clear link partner's EEE ability */
1548        hw->dev_spec.ich8lan.eee_lp_ability = 0;
1549
1550        if (hw->mac.type >= e1000_pch_lpt) {
1551                u32 fextnvm6 = er32(FEXTNVM6);
1552
1553                if (hw->mac.type == e1000_pch_spt) {
1554                        /* FEXTNVM6 K1-off workaround - for SPT only */
1555                        u32 pcieanacfg = er32(PCIEANACFG);
1556
1557                        if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1558                                fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1559                        else
1560                                fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1561                }
1562
1563                ew32(FEXTNVM6, fextnvm6);
1564        }
1565
1566        if (!link)
1567                goto out;
1568
1569        switch (hw->mac.type) {
1570        case e1000_pch2lan:
1571                ret_val = e1000_k1_workaround_lv(hw);
1572                if (ret_val)
1573                        return ret_val;
1574                fallthrough;
1575        case e1000_pchlan:
1576                if (hw->phy.type == e1000_phy_82578) {
1577                        ret_val = e1000_link_stall_workaround_hv(hw);
1578                        if (ret_val)
1579                                return ret_val;
1580                }
1581
1582                /* Workaround for PCHx parts in half-duplex:
1583                 * Set the number of preambles removed from the packet
1584                 * when it is passed from the PHY to the MAC to prevent
1585                 * the MAC from misinterpreting the packet type.
1586                 */
1587                e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1588                phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1589
1590                if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
1591                        phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1592
1593                e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1594                break;
1595        default:
1596                break;
1597        }
1598
1599        /* Check if there was DownShift, must be checked
1600         * immediately after link-up
1601         */
1602        e1000e_check_downshift(hw);
1603
1604        /* Enable/Disable EEE after link up */
1605        if (hw->phy.type > e1000_phy_82579) {
1606                ret_val = e1000_set_eee_pchlan(hw);
1607                if (ret_val)
1608                        return ret_val;
1609        }
1610
1611        /* If we are forcing speed/duplex, then we simply return since
1612         * we have already determined whether we have link or not.
1613         */
1614        if (!mac->autoneg)
1615                return -E1000_ERR_CONFIG;
1616
1617        /* Auto-Neg is enabled.  Auto Speed Detection takes care
1618         * of MAC speed/duplex configuration.  So we only need to
1619         * configure Collision Distance in the MAC.
1620         */
1621        mac->ops.config_collision_dist(hw);
1622
1623        /* Configure Flow Control now that Auto-Neg has completed.
1624         * First, we need to restore the desired flow control
1625         * settings because we may have had to re-autoneg with a
1626         * different link partner.
1627         */
1628        ret_val = e1000e_config_fc_after_link_up(hw);
1629        if (ret_val)
1630                e_dbg("Error configuring flow control\n");
1631
1632        return ret_val;
1633
1634out:
1635        mac->get_link_status = true;
1636        return ret_val;
1637}
1638
1639static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1640{
1641        struct e1000_hw *hw = &adapter->hw;
1642        s32 rc;
1643
1644        rc = e1000_init_mac_params_ich8lan(hw);
1645        if (rc)
1646                return rc;
1647
1648        rc = e1000_init_nvm_params_ich8lan(hw);
1649        if (rc)
1650                return rc;
1651
1652        switch (hw->mac.type) {
1653        case e1000_ich8lan:
1654        case e1000_ich9lan:
1655        case e1000_ich10lan:
1656                rc = e1000_init_phy_params_ich8lan(hw);
1657                break;
1658        case e1000_pchlan:
1659        case e1000_pch2lan:
1660        case e1000_pch_lpt:
1661        case e1000_pch_spt:
1662        case e1000_pch_cnp:
1663        case e1000_pch_tgp:
1664        case e1000_pch_adp:
1665        case e1000_pch_mtp:
1666                rc = e1000_init_phy_params_pchlan(hw);
1667                break;
1668        default:
1669                break;
1670        }
1671        if (rc)
1672                return rc;
1673
1674        /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1675         * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1676         */
1677        if ((adapter->hw.phy.type == e1000_phy_ife) ||
1678            ((adapter->hw.mac.type >= e1000_pch2lan) &&
1679             (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1680                adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
1681                adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
1682
1683                hw->mac.ops.blink_led = NULL;
1684        }
1685
1686        if ((adapter->hw.mac.type == e1000_ich8lan) &&
1687            (adapter->hw.phy.type != e1000_phy_ife))
1688                adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
1689
1690        /* Enable workaround for 82579 w/ ME enabled */
1691        if ((adapter->hw.mac.type == e1000_pch2lan) &&
1692            (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1693                adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1694
1695        return 0;
1696}
1697
1698static DEFINE_MUTEX(nvm_mutex);
1699
1700/**
1701 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1702 *  @hw: pointer to the HW structure
1703 *
1704 *  Acquires the mutex for performing NVM operations.
1705 **/
1706static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1707{
1708        mutex_lock(&nvm_mutex);
1709
1710        return 0;
1711}
1712
1713/**
1714 *  e1000_release_nvm_ich8lan - Release NVM mutex
1715 *  @hw: pointer to the HW structure
1716 *
1717 *  Releases the mutex used while performing NVM operations.
1718 **/
1719static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1720{
1721        mutex_unlock(&nvm_mutex);
1722}
1723
1724/**
1725 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1726 *  @hw: pointer to the HW structure
1727 *
1728 *  Acquires the software control flag for performing PHY and select
1729 *  MAC CSR accesses.
1730 **/
1731static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1732{
1733        u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1734        s32 ret_val = 0;
1735
1736        if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1737                             &hw->adapter->state)) {
1738                e_dbg("contention for Phy access\n");
1739                return -E1000_ERR_PHY;
1740        }
1741
1742        while (timeout) {
1743                extcnf_ctrl = er32(EXTCNF_CTRL);
1744                if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1745                        break;
1746
1747                mdelay(1);
1748                timeout--;
1749        }
1750
1751        if (!timeout) {
1752                e_dbg("SW has already locked the resource.\n");
1753                ret_val = -E1000_ERR_CONFIG;
1754                goto out;
1755        }
1756
1757        timeout = SW_FLAG_TIMEOUT;
1758
1759        extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1760        ew32(EXTCNF_CTRL, extcnf_ctrl);
1761
1762        while (timeout) {
1763                extcnf_ctrl = er32(EXTCNF_CTRL);
1764                if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1765                        break;
1766
1767                mdelay(1);
1768                timeout--;
1769        }
1770
1771        if (!timeout) {
1772                e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1773                      er32(FWSM), extcnf_ctrl);
1774                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1775                ew32(EXTCNF_CTRL, extcnf_ctrl);
1776                ret_val = -E1000_ERR_CONFIG;
1777                goto out;
1778        }
1779
1780out:
1781        if (ret_val)
1782                clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1783
1784        return ret_val;
1785}
1786
1787/**
1788 *  e1000_release_swflag_ich8lan - Release software control flag
1789 *  @hw: pointer to the HW structure
1790 *
1791 *  Releases the software control flag for performing PHY and select
1792 *  MAC CSR accesses.
1793 **/
1794static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1795{
1796        u32 extcnf_ctrl;
1797
1798        extcnf_ctrl = er32(EXTCNF_CTRL);
1799
1800        if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1801                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1802                ew32(EXTCNF_CTRL, extcnf_ctrl);
1803        } else {
1804                e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1805        }
1806
1807        clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1808}
1809
1810/**
1811 *  e1000_check_mng_mode_ich8lan - Checks management mode
1812 *  @hw: pointer to the HW structure
1813 *
1814 *  This checks if the adapter has any manageability enabled.
1815 *  This is a function pointer entry point only called by read/write
1816 *  routines for the PHY and NVM parts.
1817 **/
1818static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1819{
1820        u32 fwsm;
1821
1822        fwsm = er32(FWSM);
1823        return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1824                ((fwsm & E1000_FWSM_MODE_MASK) ==
1825                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1826}
1827
1828/**
1829 *  e1000_check_mng_mode_pchlan - Checks management mode
1830 *  @hw: pointer to the HW structure
1831 *
1832 *  This checks if the adapter has iAMT enabled.
1833 *  This is a function pointer entry point only called by read/write
1834 *  routines for the PHY and NVM parts.
1835 **/
1836static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1837{
1838        u32 fwsm;
1839
1840        fwsm = er32(FWSM);
1841        return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1842            (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1843}
1844
1845/**
1846 *  e1000_rar_set_pch2lan - Set receive address register
1847 *  @hw: pointer to the HW structure
1848 *  @addr: pointer to the receive address
1849 *  @index: receive address array register
1850 *
1851 *  Sets the receive address array register at index to the address passed
1852 *  in by addr.  For 82579, RAR[0] is the base address register that is to
1853 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1854 *  Use SHRA[0-3] in place of those reserved for ME.
1855 **/
1856static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1857{
1858        u32 rar_low, rar_high;
1859
1860        /* HW expects these in little endian so we reverse the byte order
1861         * from network order (big endian) to little endian
1862         */
1863        rar_low = ((u32)addr[0] |
1864                   ((u32)addr[1] << 8) |
1865                   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1866
1867        rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1868
1869        /* If MAC address zero, no need to set the AV bit */
1870        if (rar_low || rar_high)
1871                rar_high |= E1000_RAH_AV;
1872
1873        if (index == 0) {
1874                ew32(RAL(index), rar_low);
1875                e1e_flush();
1876                ew32(RAH(index), rar_high);
1877                e1e_flush();
1878                return 0;
1879        }
1880
1881        /* RAR[1-6] are owned by manageability.  Skip those and program the
1882         * next address into the SHRA register array.
1883         */
1884        if (index < (u32)(hw->mac.rar_entry_count)) {
1885                s32 ret_val;
1886
1887                ret_val = e1000_acquire_swflag_ich8lan(hw);
1888                if (ret_val)
1889                        goto out;
1890
1891                ew32(SHRAL(index - 1), rar_low);
1892                e1e_flush();
1893                ew32(SHRAH(index - 1), rar_high);
1894                e1e_flush();
1895
1896                e1000_release_swflag_ich8lan(hw);
1897
1898                /* verify the register updates */
1899                if ((er32(SHRAL(index - 1)) == rar_low) &&
1900                    (er32(SHRAH(index - 1)) == rar_high))
1901                        return 0;
1902
1903                e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1904                      (index - 1), er32(FWSM));
1905        }
1906
1907out:
1908        e_dbg("Failed to write receive address at index %d\n", index);
1909        return -E1000_ERR_CONFIG;
1910}
1911
1912/**
1913 *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1914 *  @hw: pointer to the HW structure
1915 *
1916 *  Get the number of available receive registers that the Host can
1917 *  program. SHRA[0-10] are the shared receive address registers
1918 *  that are shared between the Host and manageability engine (ME).
1919 *  ME can reserve any number of addresses and the host needs to be
1920 *  able to tell how many available registers it has access to.
1921 **/
1922static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1923{
1924        u32 wlock_mac;
1925        u32 num_entries;
1926
1927        wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1928        wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1929
1930        switch (wlock_mac) {
1931        case 0:
1932                /* All SHRA[0..10] and RAR[0] available */
1933                num_entries = hw->mac.rar_entry_count;
1934                break;
1935        case 1:
1936                /* Only RAR[0] available */
1937                num_entries = 1;
1938                break;
1939        default:
1940                /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1941                num_entries = wlock_mac + 1;
1942                break;
1943        }
1944
1945        return num_entries;
1946}
1947
1948/**
1949 *  e1000_rar_set_pch_lpt - Set receive address registers
1950 *  @hw: pointer to the HW structure
1951 *  @addr: pointer to the receive address
1952 *  @index: receive address array register
1953 *
1954 *  Sets the receive address register array at index to the address passed
1955 *  in by addr. For LPT, RAR[0] is the base address register that is to
1956 *  contain the MAC address. SHRA[0-10] are the shared receive address
1957 *  registers that are shared between the Host and manageability engine (ME).
1958 **/
1959static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1960{
1961        u32 rar_low, rar_high;
1962        u32 wlock_mac;
1963
1964        /* HW expects these in little endian so we reverse the byte order
1965         * from network order (big endian) to little endian
1966         */
1967        rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1968                   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1969
1970        rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1971
1972        /* If MAC address zero, no need to set the AV bit */
1973        if (rar_low || rar_high)
1974                rar_high |= E1000_RAH_AV;
1975
1976        if (index == 0) {
1977                ew32(RAL(index), rar_low);
1978                e1e_flush();
1979                ew32(RAH(index), rar_high);
1980                e1e_flush();
1981                return 0;
1982        }
1983
1984        /* The manageability engine (ME) can lock certain SHRAR registers that
1985         * it is using - those registers are unavailable for use.
1986         */
1987        if (index < hw->mac.rar_entry_count) {
1988                wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1989                wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1990
1991                /* Check if all SHRAR registers are locked */
1992                if (wlock_mac == 1)
1993                        goto out;
1994
1995                if ((wlock_mac == 0) || (index <= wlock_mac)) {
1996                        s32 ret_val;
1997
1998                        ret_val = e1000_acquire_swflag_ich8lan(hw);
1999
2000                        if (ret_val)
2001                                goto out;
2002
2003                        ew32(SHRAL_PCH_LPT(index - 1), rar_low);
2004                        e1e_flush();
2005                        ew32(SHRAH_PCH_LPT(index - 1), rar_high);
2006                        e1e_flush();
2007
2008                        e1000_release_swflag_ich8lan(hw);
2009
2010                        /* verify the register updates */
2011                        if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2012                            (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
2013                                return 0;
2014                }
2015        }
2016
2017out:
2018        e_dbg("Failed to write receive address at index %d\n", index);
2019        return -E1000_ERR_CONFIG;
2020}
2021
2022/**
2023 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2024 *  @hw: pointer to the HW structure
2025 *
2026 *  Checks if firmware is blocking the reset of the PHY.
2027 *  This is a function pointer entry point only called by
2028 *  reset routines.
2029 **/
2030static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2031{
2032        bool blocked = false;
2033        int i = 0;
2034
2035        while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
2036               (i++ < 30))
2037                usleep_range(10000, 11000);
2038        return blocked ? E1000_BLK_PHY_RESET : 0;
2039}
2040
2041/**
2042 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2043 *  @hw: pointer to the HW structure
2044 *
2045 *  Assumes semaphore already acquired.
2046 *
2047 **/
2048static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2049{
2050        u16 phy_data;
2051        u32 strap = er32(STRAP);
2052        u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2053            E1000_STRAP_SMT_FREQ_SHIFT;
2054        s32 ret_val;
2055
2056        strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2057
2058        ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2059        if (ret_val)
2060                return ret_val;
2061
2062        phy_data &= ~HV_SMB_ADDR_MASK;
2063        phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2064        phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2065
2066        if (hw->phy.type == e1000_phy_i217) {
2067                /* Restore SMBus frequency */
2068                if (freq--) {
2069                        phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2070                        phy_data |= (freq & BIT(0)) <<
2071                            HV_SMB_ADDR_FREQ_LOW_SHIFT;
2072                        phy_data |= (freq & BIT(1)) <<
2073                            (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2074                } else {
2075                        e_dbg("Unsupported SMB frequency in PHY\n");
2076                }
2077        }
2078
2079        return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2080}
2081
2082/**
2083 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2084 *  @hw:   pointer to the HW structure
2085 *
2086 *  SW should configure the LCD from the NVM extended configuration region
2087 *  as a workaround for certain parts.
2088 **/
2089static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2090{
2091        struct e1000_phy_info *phy = &hw->phy;
2092        u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2093        s32 ret_val = 0;
2094        u16 word_addr, reg_data, reg_addr, phy_page = 0;
2095
2096        /* Initialize the PHY from the NVM on ICH platforms.  This
2097         * is needed due to an issue where the NVM configuration is
2098         * not properly autoloaded after power transitions.
2099         * Therefore, after each PHY reset, we will load the
2100         * configuration data out of the NVM manually.
2101         */
2102        switch (hw->mac.type) {
2103        case e1000_ich8lan:
2104                if (phy->type != e1000_phy_igp_3)
2105                        return ret_val;
2106
2107                if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
2108                    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
2109                        sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2110                        break;
2111                }
2112                fallthrough;
2113        case e1000_pchlan:
2114        case e1000_pch2lan:
2115        case e1000_pch_lpt:
2116        case e1000_pch_spt:
2117        case e1000_pch_cnp:
2118        case e1000_pch_tgp:
2119        case e1000_pch_adp:
2120        case e1000_pch_mtp:
2121                sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2122                break;
2123        default:
2124                return ret_val;
2125        }
2126
2127        ret_val = hw->phy.ops.acquire(hw);
2128        if (ret_val)
2129                return ret_val;
2130
2131        data = er32(FEXTNVM);
2132        if (!(data & sw_cfg_mask))
2133                goto release;
2134
2135        /* Make sure HW does not configure LCD from PHY
2136         * extended configuration before SW configuration
2137         */
2138        data = er32(EXTCNF_CTRL);
2139        if ((hw->mac.type < e1000_pch2lan) &&
2140            (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2141                goto release;
2142
2143        cnf_size = er32(EXTCNF_SIZE);
2144        cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2145        cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2146        if (!cnf_size)
2147                goto release;
2148
2149        cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2150        cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2151
2152        if (((hw->mac.type == e1000_pchlan) &&
2153             !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2154            (hw->mac.type > e1000_pchlan)) {
2155                /* HW configures the SMBus address and LEDs when the
2156                 * OEM and LCD Write Enable bits are set in the NVM.
2157                 * When both NVM bits are cleared, SW will configure
2158                 * them instead.
2159                 */
2160                ret_val = e1000_write_smbus_addr(hw);
2161                if (ret_val)
2162                        goto release;
2163
2164                data = er32(LEDCTL);
2165                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2166                                                        (u16)data);
2167                if (ret_val)
2168                        goto release;
2169        }
2170
2171        /* Configure LCD from extended configuration region. */
2172
2173        /* cnf_base_addr is in DWORD */
2174        word_addr = (u16)(cnf_base_addr << 1);
2175
2176        for (i = 0; i < cnf_size; i++) {
2177                ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
2178                if (ret_val)
2179                        goto release;
2180
2181                ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
2182                                         1, &reg_addr);
2183                if (ret_val)
2184                        goto release;
2185
2186                /* Save off the PHY page for future writes. */
2187                if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2188                        phy_page = reg_data;
2189                        continue;
2190                }
2191
2192                reg_addr &= PHY_REG_MASK;
2193                reg_addr |= phy_page;
2194
2195                ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2196                if (ret_val)
2197                        goto release;
2198        }
2199
2200release:
2201        hw->phy.ops.release(hw);
2202        return ret_val;
2203}
2204
2205/**
2206 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2207 *  @hw:   pointer to the HW structure
2208 *  @link: link up bool flag
2209 *
2210 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2211 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2212 *  If link is down, the function will restore the default K1 setting located
2213 *  in the NVM.
2214 **/
2215static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2216{
2217        s32 ret_val = 0;
2218        u16 status_reg = 0;
2219        bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2220
2221        if (hw->mac.type != e1000_pchlan)
2222                return 0;
2223
2224        /* Wrap the whole flow with the sw flag */
2225        ret_val = hw->phy.ops.acquire(hw);
2226        if (ret_val)
2227                return ret_val;
2228
2229        /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2230        if (link) {
2231                if (hw->phy.type == e1000_phy_82578) {
2232                        ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
2233                                                  &status_reg);
2234                        if (ret_val)
2235                                goto release;
2236
2237                        status_reg &= (BM_CS_STATUS_LINK_UP |
2238                                       BM_CS_STATUS_RESOLVED |
2239                                       BM_CS_STATUS_SPEED_MASK);
2240
2241                        if (status_reg == (BM_CS_STATUS_LINK_UP |
2242                                           BM_CS_STATUS_RESOLVED |
2243                                           BM_CS_STATUS_SPEED_1000))
2244                                k1_enable = false;
2245                }
2246
2247                if (hw->phy.type == e1000_phy_82577) {
2248                        ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2249                        if (ret_val)
2250                                goto release;
2251
2252                        status_reg &= (HV_M_STATUS_LINK_UP |
2253                                       HV_M_STATUS_AUTONEG_COMPLETE |
2254                                       HV_M_STATUS_SPEED_MASK);
2255
2256                        if (status_reg == (HV_M_STATUS_LINK_UP |
2257                                           HV_M_STATUS_AUTONEG_COMPLETE |
2258                                           HV_M_STATUS_SPEED_1000))
2259                                k1_enable = false;
2260                }
2261
2262                /* Link stall fix for link up */
2263                ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2264                if (ret_val)
2265                        goto release;
2266
2267        } else {
2268                /* Link stall fix for link down */
2269                ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2270                if (ret_val)
2271                        goto release;
2272        }
2273
2274        ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2275
2276release:
2277        hw->phy.ops.release(hw);
2278
2279        return ret_val;
2280}
2281
2282/**
2283 *  e1000_configure_k1_ich8lan - Configure K1 power state
2284 *  @hw: pointer to the HW structure
2285 *  @k1_enable: K1 state to configure
2286 *
2287 *  Configure the K1 power state based on the provided parameter.
2288 *  Assumes semaphore already acquired.
2289 *
2290 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2291 **/
2292s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2293{
2294        s32 ret_val;
2295        u32 ctrl_reg = 0;
2296        u32 ctrl_ext = 0;
2297        u32 reg = 0;
2298        u16 kmrn_reg = 0;
2299
2300        ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2301                                              &kmrn_reg);
2302        if (ret_val)
2303                return ret_val;
2304
2305        if (k1_enable)
2306                kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2307        else
2308                kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2309
2310        ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2311                                               kmrn_reg);
2312        if (ret_val)
2313                return ret_val;
2314
2315        usleep_range(20, 40);
2316        ctrl_ext = er32(CTRL_EXT);
2317        ctrl_reg = er32(CTRL);
2318
2319        reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2320        reg |= E1000_CTRL_FRCSPD;
2321        ew32(CTRL, reg);
2322
2323        ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2324        e1e_flush();
2325        usleep_range(20, 40);
2326        ew32(CTRL, ctrl_reg);
2327        ew32(CTRL_EXT, ctrl_ext);
2328        e1e_flush();
2329        usleep_range(20, 40);
2330
2331        return 0;
2332}
2333
2334/**
2335 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2336 *  @hw:       pointer to the HW structure
2337 *  @d0_state: boolean if entering d0 or d3 device state
2338 *
2339 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2340 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2341 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2342 **/
2343static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2344{
2345        s32 ret_val = 0;
2346        u32 mac_reg;
2347        u16 oem_reg;
2348
2349        if (hw->mac.type < e1000_pchlan)
2350                return ret_val;
2351
2352        ret_val = hw->phy.ops.acquire(hw);
2353        if (ret_val)
2354                return ret_val;
2355
2356        if (hw->mac.type == e1000_pchlan) {
2357                mac_reg = er32(EXTCNF_CTRL);
2358                if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2359                        goto release;
2360        }
2361
2362        mac_reg = er32(FEXTNVM);
2363        if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2364                goto release;
2365
2366        mac_reg = er32(PHY_CTRL);
2367
2368        ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2369        if (ret_val)
2370                goto release;
2371
2372        oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2373
2374        if (d0_state) {
2375                if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2376                        oem_reg |= HV_OEM_BITS_GBE_DIS;
2377
2378                if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2379                        oem_reg |= HV_OEM_BITS_LPLU;
2380        } else {
2381                if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2382                               E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2383                        oem_reg |= HV_OEM_BITS_GBE_DIS;
2384
2385                if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2386                               E1000_PHY_CTRL_NOND0A_LPLU))
2387                        oem_reg |= HV_OEM_BITS_LPLU;
2388        }
2389
2390        /* Set Restart auto-neg to activate the bits */
2391        if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2392            !hw->phy.ops.check_reset_block(hw))
2393                oem_reg |= HV_OEM_BITS_RESTART_AN;
2394
2395        ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2396
2397release:
2398        hw->phy.ops.release(hw);
2399
2400        return ret_val;
2401}
2402
2403/**
2404 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2405 *  @hw:   pointer to the HW structure
2406 **/
2407static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2408{
2409        s32 ret_val;
2410        u16 data;
2411
2412        ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
2413        if (ret_val)
2414                return ret_val;
2415
2416        data |= HV_KMRN_MDIO_SLOW;
2417
2418        ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
2419
2420        return ret_val;
2421}
2422
2423/**
2424 *  e1000_hv_phy_workarounds_ich8lan - apply PHY workarounds
2425 *  @hw: pointer to the HW structure
2426 *
2427 *  A series of PHY workarounds to be done after every PHY reset.
2428 **/
2429static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2430{
2431        s32 ret_val = 0;
2432        u16 phy_data;
2433
2434        if (hw->mac.type != e1000_pchlan)
2435                return 0;
2436
2437        /* Set MDIO slow mode before any other MDIO access */
2438        if (hw->phy.type == e1000_phy_82577) {
2439                ret_val = e1000_set_mdio_slow_mode_hv(hw);
2440                if (ret_val)
2441                        return ret_val;
2442        }
2443
2444        if (((hw->phy.type == e1000_phy_82577) &&
2445             ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2446            ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2447                /* Disable generation of early preamble */
2448                ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
2449                if (ret_val)
2450                        return ret_val;
2451
2452                /* Preamble tuning for SSC */
2453                ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2454                if (ret_val)
2455                        return ret_val;
2456        }
2457
2458        if (hw->phy.type == e1000_phy_82578) {
2459                /* Return registers to default by doing a soft reset then
2460                 * writing 0x3140 to the control register.
2461                 */
2462                if (hw->phy.revision < 2) {
2463                        e1000e_phy_sw_reset(hw);
2464                        ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2465                        if (ret_val)
2466                                return ret_val;
2467                }
2468        }
2469
2470        /* Select page 0 */
2471        ret_val = hw->phy.ops.acquire(hw);
2472        if (ret_val)
2473                return ret_val;
2474
2475        hw->phy.addr = 1;
2476        ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2477        hw->phy.ops.release(hw);
2478        if (ret_val)
2479                return ret_val;
2480
2481        /* Configure the K1 Si workaround during phy reset assuming there is
2482         * link so that it disables K1 if link is in 1Gbps.
2483         */
2484        ret_val = e1000_k1_gig_workaround_hv(hw, true);
2485        if (ret_val)
2486                return ret_val;
2487
2488        /* Workaround for link disconnects on a busy hub in half duplex */
2489        ret_val = hw->phy.ops.acquire(hw);
2490        if (ret_val)
2491                return ret_val;
2492        ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2493        if (ret_val)
2494                goto release;
2495        ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2496        if (ret_val)
2497                goto release;
2498
2499        /* set MSE higher to enable link to stay up when noise is high */
2500        ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2501release:
2502        hw->phy.ops.release(hw);
2503
2504        return ret_val;
2505}
2506
2507/**
2508 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2509 *  @hw:   pointer to the HW structure
2510 **/
2511void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2512{
2513        u32 mac_reg;
2514        u16 i, phy_reg = 0;
2515        s32 ret_val;
2516
2517        ret_val = hw->phy.ops.acquire(hw);
2518        if (ret_val)
2519                return;
2520        ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2521        if (ret_val)
2522                goto release;
2523
2524        /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2525        for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2526                mac_reg = er32(RAL(i));
2527                hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2528                                           (u16)(mac_reg & 0xFFFF));
2529                hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2530                                           (u16)((mac_reg >> 16) & 0xFFFF));
2531
2532                mac_reg = er32(RAH(i));
2533                hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2534                                           (u16)(mac_reg & 0xFFFF));
2535                hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2536                                           (u16)((mac_reg & E1000_RAH_AV)
2537                                                 >> 16));
2538        }
2539
2540        e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2541
2542release:
2543        hw->phy.ops.release(hw);
2544}
2545
2546/**
2547 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2548 *  with 82579 PHY
2549 *  @hw: pointer to the HW structure
2550 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2551 **/
2552s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2553{
2554        s32 ret_val = 0;
2555        u16 phy_reg, data;
2556        u32 mac_reg;
2557        u16 i;
2558
2559        if (hw->mac.type < e1000_pch2lan)
2560                return 0;
2561
2562        /* disable Rx path while enabling/disabling workaround */
2563        e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
2564        ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
2565        if (ret_val)
2566                return ret_val;
2567
2568        if (enable) {
2569                /* Write Rx addresses (rar_entry_count for RAL/H, and
2570                 * SHRAL/H) and initial CRC values to the MAC
2571                 */
2572                for (i = 0; i < hw->mac.rar_entry_count; i++) {
2573                        u8 mac_addr[ETH_ALEN] = { 0 };
2574                        u32 addr_high, addr_low;
2575
2576                        addr_high = er32(RAH(i));
2577                        if (!(addr_high & E1000_RAH_AV))
2578                                continue;
2579                        addr_low = er32(RAL(i));
2580                        mac_addr[0] = (addr_low & 0xFF);
2581                        mac_addr[1] = ((addr_low >> 8) & 0xFF);
2582                        mac_addr[2] = ((addr_low >> 16) & 0xFF);
2583                        mac_addr[3] = ((addr_low >> 24) & 0xFF);
2584                        mac_addr[4] = (addr_high & 0xFF);
2585                        mac_addr[5] = ((addr_high >> 8) & 0xFF);
2586
2587                        ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2588                }
2589
2590                /* Write Rx addresses to the PHY */
2591                e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2592
2593                /* Enable jumbo frame workaround in the MAC */
2594                mac_reg = er32(FFLT_DBG);
2595                mac_reg &= ~BIT(14);
2596                mac_reg |= (7 << 15);
2597                ew32(FFLT_DBG, mac_reg);
2598
2599                mac_reg = er32(RCTL);
2600                mac_reg |= E1000_RCTL_SECRC;
2601                ew32(RCTL, mac_reg);
2602
2603                ret_val = e1000e_read_kmrn_reg(hw,
2604                                               E1000_KMRNCTRLSTA_CTRL_OFFSET,
2605                                               &data);
2606                if (ret_val)
2607                        return ret_val;
2608                ret_val = e1000e_write_kmrn_reg(hw,
2609                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
2610                                                data | BIT(0));
2611                if (ret_val)
2612                        return ret_val;
2613                ret_val = e1000e_read_kmrn_reg(hw,
2614                                               E1000_KMRNCTRLSTA_HD_CTRL,
2615                                               &data);
2616                if (ret_val)
2617                        return ret_val;
2618                data &= ~(0xF << 8);
2619                data |= (0xB << 8);
2620                ret_val = e1000e_write_kmrn_reg(hw,
2621                                                E1000_KMRNCTRLSTA_HD_CTRL,
2622                                                data);
2623                if (ret_val)
2624                        return ret_val;
2625
2626                /* Enable jumbo frame workaround in the PHY */
2627                e1e_rphy(hw, PHY_REG(769, 23), &data);
2628                data &= ~(0x7F << 5);
2629                data |= (0x37 << 5);
2630                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2631                if (ret_val)
2632                        return ret_val;
2633                e1e_rphy(hw, PHY_REG(769, 16), &data);
2634                data &= ~BIT(13);
2635                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2636                if (ret_val)
2637                        return ret_val;
2638                e1e_rphy(hw, PHY_REG(776, 20), &data);
2639                data &= ~(0x3FF << 2);
2640                data |= (E1000_TX_PTR_GAP << 2);
2641                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2642                if (ret_val)
2643                        return ret_val;
2644                ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2645                if (ret_val)
2646                        return ret_val;
2647                e1e_rphy(hw, HV_PM_CTRL, &data);
2648                ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
2649                if (ret_val)
2650                        return ret_val;
2651        } else {
2652                /* Write MAC register values back to h/w defaults */
2653                mac_reg = er32(FFLT_DBG);
2654                mac_reg &= ~(0xF << 14);
2655                ew32(FFLT_DBG, mac_reg);
2656
2657                mac_reg = er32(RCTL);
2658                mac_reg &= ~E1000_RCTL_SECRC;
2659                ew32(RCTL, mac_reg);
2660
2661                ret_val = e1000e_read_kmrn_reg(hw,
2662                                               E1000_KMRNCTRLSTA_CTRL_OFFSET,
2663                                               &data);
2664                if (ret_val)
2665                        return ret_val;
2666                ret_val = e1000e_write_kmrn_reg(hw,
2667                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
2668                                                data & ~BIT(0));
2669                if (ret_val)
2670                        return ret_val;
2671                ret_val = e1000e_read_kmrn_reg(hw,
2672                                               E1000_KMRNCTRLSTA_HD_CTRL,
2673                                               &data);
2674                if (ret_val)
2675                        return ret_val;
2676                data &= ~(0xF << 8);
2677                data |= (0xB << 8);
2678                ret_val = e1000e_write_kmrn_reg(hw,
2679                                                E1000_KMRNCTRLSTA_HD_CTRL,
2680                                                data);
2681                if (ret_val)
2682                        return ret_val;
2683
2684                /* Write PHY register values back to h/w defaults */
2685                e1e_rphy(hw, PHY_REG(769, 23), &data);
2686                data &= ~(0x7F << 5);
2687                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2688                if (ret_val)
2689                        return ret_val;
2690                e1e_rphy(hw, PHY_REG(769, 16), &data);
2691                data |= BIT(13);
2692                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2693                if (ret_val)
2694                        return ret_val;
2695                e1e_rphy(hw, PHY_REG(776, 20), &data);
2696                data &= ~(0x3FF << 2);
2697                data |= (0x8 << 2);
2698                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2699                if (ret_val)
2700                        return ret_val;
2701                ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
2702                if (ret_val)
2703                        return ret_val;
2704                e1e_rphy(hw, HV_PM_CTRL, &data);
2705                ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
2706                if (ret_val)
2707                        return ret_val;
2708        }
2709
2710        /* re-enable Rx path after enabling/disabling workaround */
2711        return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
2712}
2713
2714/**
2715 *  e1000_lv_phy_workarounds_ich8lan - apply ich8 specific workarounds
2716 *  @hw: pointer to the HW structure
2717 *
2718 *  A series of PHY workarounds to be done after every PHY reset.
2719 **/
2720static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2721{
2722        s32 ret_val = 0;
2723
2724        if (hw->mac.type != e1000_pch2lan)
2725                return 0;
2726
2727        /* Set MDIO slow mode before any other MDIO access */
2728        ret_val = e1000_set_mdio_slow_mode_hv(hw);
2729        if (ret_val)
2730                return ret_val;
2731
2732        ret_val = hw->phy.ops.acquire(hw);
2733        if (ret_val)
2734                return ret_val;
2735        /* set MSE higher to enable link to stay up when noise is high */
2736        ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2737        if (ret_val)
2738                goto release;
2739        /* drop link after 5 times MSE threshold was reached */
2740        ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2741release:
2742        hw->phy.ops.release(hw);
2743
2744        return ret_val;
2745}
2746
2747/**
2748 *  e1000_k1_workaround_lv - K1 Si workaround
2749 *  @hw:   pointer to the HW structure
2750 *
2751 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2752 *  Disable K1 in 1000Mbps and 100Mbps
2753 **/
2754static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2755{
2756        s32 ret_val = 0;
2757        u16 status_reg = 0;
2758
2759        if (hw->mac.type != e1000_pch2lan)
2760                return 0;
2761
2762        /* Set K1 beacon duration based on 10Mbs speed */
2763        ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2764        if (ret_val)
2765                return ret_val;
2766
2767        if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2768            == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2769                if (status_reg &
2770                    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2771                        u16 pm_phy_reg;
2772
2773                        /* LV 1G/100 Packet drop issue wa  */
2774                        ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2775                        if (ret_val)
2776                                return ret_val;
2777                        pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2778                        ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2779                        if (ret_val)
2780                                return ret_val;
2781                } else {
2782                        u32 mac_reg;
2783
2784                        mac_reg = er32(FEXTNVM4);
2785                        mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2786                        mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2787                        ew32(FEXTNVM4, mac_reg);
2788                }
2789        }
2790
2791        return ret_val;
2792}
2793
2794/**
2795 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2796 *  @hw:   pointer to the HW structure
2797 *  @gate: boolean set to true to gate, false to ungate
2798 *
2799 *  Gate/ungate the automatic PHY configuration via hardware; perform
2800 *  the configuration via software instead.
2801 **/
2802static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2803{
2804        u32 extcnf_ctrl;
2805
2806        if (hw->mac.type < e1000_pch2lan)
2807                return;
2808
2809        extcnf_ctrl = er32(EXTCNF_CTRL);
2810
2811        if (gate)
2812                extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2813        else
2814                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2815
2816        ew32(EXTCNF_CTRL, extcnf_ctrl);
2817}
2818
2819/**
2820 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2821 *  @hw: pointer to the HW structure
2822 *
2823 *  Check the appropriate indication the MAC has finished configuring the
2824 *  PHY after a software reset.
2825 **/
2826static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2827{
2828        u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2829
2830        /* Wait for basic configuration completes before proceeding */
2831        do {
2832                data = er32(STATUS);
2833                data &= E1000_STATUS_LAN_INIT_DONE;
2834                usleep_range(100, 200);
2835        } while ((!data) && --loop);
2836
2837        /* If basic configuration is incomplete before the above loop
2838         * count reaches 0, loading the configuration from NVM will
2839         * leave the PHY in a bad state possibly resulting in no link.
2840         */
2841        if (loop == 0)
2842                e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2843
2844        /* Clear the Init Done bit for the next init event */
2845        data = er32(STATUS);
2846        data &= ~E1000_STATUS_LAN_INIT_DONE;
2847        ew32(STATUS, data);
2848}
2849
2850/**
2851 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2852 *  @hw: pointer to the HW structure
2853 **/
2854static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2855{
2856        s32 ret_val = 0;
2857        u16 reg;
2858
2859        if (hw->phy.ops.check_reset_block(hw))
2860                return 0;
2861
2862        /* Allow time for h/w to get to quiescent state after reset */
2863        usleep_range(10000, 11000);
2864
2865        /* Perform any necessary post-reset workarounds */
2866        switch (hw->mac.type) {
2867        case e1000_pchlan:
2868                ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2869                if (ret_val)
2870                        return ret_val;
2871                break;
2872        case e1000_pch2lan:
2873                ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2874                if (ret_val)
2875                        return ret_val;
2876                break;
2877        default:
2878                break;
2879        }
2880
2881        /* Clear the host wakeup bit after lcd reset */
2882        if (hw->mac.type >= e1000_pchlan) {
2883                e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2884                reg &= ~BM_WUC_HOST_WU_BIT;
2885                e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2886        }
2887
2888        /* Configure the LCD with the extended configuration region in NVM */
2889        ret_val = e1000_sw_lcd_config_ich8lan(hw);
2890        if (ret_val)
2891                return ret_val;
2892
2893        /* Configure the LCD with the OEM bits in NVM */
2894        ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2895
2896        if (hw->mac.type == e1000_pch2lan) {
2897                /* Ungate automatic PHY configuration on non-managed 82579 */
2898                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2899                        usleep_range(10000, 11000);
2900                        e1000_gate_hw_phy_config_ich8lan(hw, false);
2901                }
2902
2903                /* Set EEE LPI Update Timer to 200usec */
2904                ret_val = hw->phy.ops.acquire(hw);
2905                if (ret_val)
2906                        return ret_val;
2907                ret_val = e1000_write_emi_reg_locked(hw,
2908                                                     I82579_LPI_UPDATE_TIMER,
2909                                                     0x1387);
2910                hw->phy.ops.release(hw);
2911        }
2912
2913        return ret_val;
2914}
2915
2916/**
2917 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2918 *  @hw: pointer to the HW structure
2919 *
2920 *  Resets the PHY
2921 *  This is a function pointer entry point called by drivers
2922 *  or other shared routines.
2923 **/
2924static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2925{
2926        s32 ret_val = 0;
2927
2928        /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2929        if ((hw->mac.type == e1000_pch2lan) &&
2930            !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2931                e1000_gate_hw_phy_config_ich8lan(hw, true);
2932
2933        ret_val = e1000e_phy_hw_reset_generic(hw);
2934        if (ret_val)
2935                return ret_val;
2936
2937        return e1000_post_phy_reset_ich8lan(hw);
2938}
2939
2940/**
2941 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2942 *  @hw: pointer to the HW structure
2943 *  @active: true to enable LPLU, false to disable
2944 *
2945 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2946 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2947 *  the phy speed. This function will manually set the LPLU bit and restart
2948 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2949 *  since it configures the same bit.
2950 **/
2951static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2952{
2953        s32 ret_val;
2954        u16 oem_reg;
2955
2956        ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2957        if (ret_val)
2958                return ret_val;
2959
2960        if (active)
2961                oem_reg |= HV_OEM_BITS_LPLU;
2962        else
2963                oem_reg &= ~HV_OEM_BITS_LPLU;
2964
2965        if (!hw->phy.ops.check_reset_block(hw))
2966                oem_reg |= HV_OEM_BITS_RESTART_AN;
2967
2968        return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2969}
2970
2971/**
2972 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2973 *  @hw: pointer to the HW structure
2974 *  @active: true to enable LPLU, false to disable
2975 *
2976 *  Sets the LPLU D0 state according to the active flag.  When
2977 *  activating LPLU this function also disables smart speed
2978 *  and vice versa.  LPLU will not be activated unless the
2979 *  device autonegotiation advertisement meets standards of
2980 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2981 *  This is a function pointer entry point only called by
2982 *  PHY setup routines.
2983 **/
2984static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2985{
2986        struct e1000_phy_info *phy = &hw->phy;
2987        u32 phy_ctrl;
2988        s32 ret_val = 0;
2989        u16 data;
2990
2991        if (phy->type == e1000_phy_ife)
2992                return 0;
2993
2994        phy_ctrl = er32(PHY_CTRL);
2995
2996        if (active) {
2997                phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2998                ew32(PHY_CTRL, phy_ctrl);
2999
3000                if (phy->type != e1000_phy_igp_3)
3001                        return 0;
3002
3003                /* Call gig speed drop workaround on LPLU before accessing
3004                 * any PHY registers
3005                 */
3006                if (hw->mac.type == e1000_ich8lan)
3007                        e1000e_gig_downshift_workaround_ich8lan(hw);
3008
3009                /* When LPLU is enabled, we should disable SmartSpeed */
3010                ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3011                if (ret_val)
3012                        return ret_val;
3013                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3014                ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3015                if (ret_val)
3016                        return ret_val;
3017        } else {
3018                phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3019                ew32(PHY_CTRL, phy_ctrl);
3020
3021                if (phy->type != e1000_phy_igp_3)
3022                        return 0;
3023
3024                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3025                 * during Dx states where the power conservation is most
3026                 * important.  During driver activity we should enable
3027                 * SmartSpeed, so performance is maintained.
3028                 */
3029                if (phy->smart_speed == e1000_smart_speed_on) {
3030                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3031                                           &data);
3032                        if (ret_val)
3033                                return ret_val;
3034
3035                        data |= IGP01E1000_PSCFR_SMART_SPEED;
3036                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3037                                           data);
3038                        if (ret_val)
3039                                return ret_val;
3040                } else if (phy->smart_speed == e1000_smart_speed_off) {
3041                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3042                                           &data);
3043                        if (ret_val)
3044                                return ret_val;
3045
3046                        data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3047                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3048                                           data);
3049                        if (ret_val)
3050                                return ret_val;
3051                }
3052        }
3053
3054        return 0;
3055}
3056
3057/**
3058 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3059 *  @hw: pointer to the HW structure
3060 *  @active: true to enable LPLU, false to disable
3061 *
3062 *  Sets the LPLU D3 state according to the active flag.  When
3063 *  activating LPLU this function also disables smart speed
3064 *  and vice versa.  LPLU will not be activated unless the
3065 *  device autonegotiation advertisement meets standards of
3066 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3067 *  This is a function pointer entry point only called by
3068 *  PHY setup routines.
3069 **/
3070static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3071{
3072        struct e1000_phy_info *phy = &hw->phy;
3073        u32 phy_ctrl;
3074        s32 ret_val = 0;
3075        u16 data;
3076
3077        phy_ctrl = er32(PHY_CTRL);
3078
3079        if (!active) {
3080                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3081                ew32(PHY_CTRL, phy_ctrl);
3082
3083                if (phy->type != e1000_phy_igp_3)
3084                        return 0;
3085
3086                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3087                 * during Dx states where the power conservation is most
3088                 * important.  During driver activity we should enable
3089                 * SmartSpeed, so performance is maintained.
3090                 */
3091                if (phy->smart_speed == e1000_smart_speed_on) {
3092                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3093                                           &data);
3094                        if (ret_val)
3095                                return ret_val;
3096
3097                        data |= IGP01E1000_PSCFR_SMART_SPEED;
3098                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3099                                           data);
3100                        if (ret_val)
3101                                return ret_val;
3102                } else if (phy->smart_speed == e1000_smart_speed_off) {
3103                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3104                                           &data);
3105                        if (ret_val)
3106                                return ret_val;
3107
3108                        data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3109                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3110                                           data);
3111                        if (ret_val)
3112                                return ret_val;
3113                }
3114        } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3115                   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3116                   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3117                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3118                ew32(PHY_CTRL, phy_ctrl);
3119
3120                if (phy->type != e1000_phy_igp_3)
3121                        return 0;
3122
3123                /* Call gig speed drop workaround on LPLU before accessing
3124                 * any PHY registers
3125                 */
3126                if (hw->mac.type == e1000_ich8lan)
3127                        e1000e_gig_downshift_workaround_ich8lan(hw);
3128
3129                /* When LPLU is enabled, we should disable SmartSpeed */
3130                ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3131                if (ret_val)
3132                        return ret_val;
3133
3134                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3135                ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3136        }
3137
3138        return ret_val;
3139}
3140
3141/**
3142 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3143 *  @hw: pointer to the HW structure
3144 *  @bank:  pointer to the variable that returns the active bank
3145 *
3146 *  Reads signature byte from the NVM using the flash access registers.
3147 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3148 **/
3149static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3150{
3151        u32 eecd;
3152        struct e1000_nvm_info *nvm = &hw->nvm;
3153        u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3154        u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3155        u32 nvm_dword = 0;
3156        u8 sig_byte = 0;
3157        s32 ret_val;
3158
3159        switch (hw->mac.type) {
3160        case e1000_pch_spt:
3161        case e1000_pch_cnp:
3162        case e1000_pch_tgp:
3163        case e1000_pch_adp:
3164        case e1000_pch_mtp:
3165                bank1_offset = nvm->flash_bank_size;
3166                act_offset = E1000_ICH_NVM_SIG_WORD;
3167
3168                /* set bank to 0 in case flash read fails */
3169                *bank = 0;
3170
3171                /* Check bank 0 */
3172                ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3173                                                         &nvm_dword);
3174                if (ret_val)
3175                        return ret_val;
3176                sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3177                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3178                    E1000_ICH_NVM_SIG_VALUE) {
3179                        *bank = 0;
3180                        return 0;
3181                }
3182
3183                /* Check bank 1 */
3184                ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3185                                                         bank1_offset,
3186                                                         &nvm_dword);
3187                if (ret_val)
3188                        return ret_val;
3189                sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3190                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3191                    E1000_ICH_NVM_SIG_VALUE) {
3192                        *bank = 1;
3193                        return 0;
3194                }
3195
3196                e_dbg("ERROR: No valid NVM bank present\n");
3197                return -E1000_ERR_NVM;
3198        case e1000_ich8lan:
3199        case e1000_ich9lan:
3200                eecd = er32(EECD);
3201                if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3202                    E1000_EECD_SEC1VAL_VALID_MASK) {
3203                        if (eecd & E1000_EECD_SEC1VAL)
3204                                *bank = 1;
3205                        else
3206                                *bank = 0;
3207
3208                        return 0;
3209                }
3210                e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3211                fallthrough;
3212        default:
3213                /* set bank to 0 in case flash read fails */
3214                *bank = 0;
3215
3216                /* Check bank 0 */
3217                ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3218                                                        &sig_byte);
3219                if (ret_val)
3220                        return ret_val;
3221                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3222                    E1000_ICH_NVM_SIG_VALUE) {
3223                        *bank = 0;
3224                        return 0;
3225                }
3226
3227                /* Check bank 1 */
3228                ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3229                                                        bank1_offset,
3230                                                        &sig_byte);
3231                if (ret_val)
3232                        return ret_val;
3233                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3234                    E1000_ICH_NVM_SIG_VALUE) {
3235                        *bank = 1;
3236                        return 0;
3237                }
3238
3239                e_dbg("ERROR: No valid NVM bank present\n");
3240                return -E1000_ERR_NVM;
3241        }
3242}
3243
3244/**
3245 *  e1000_read_nvm_spt - NVM access for SPT
3246 *  @hw: pointer to the HW structure
3247 *  @offset: The offset (in bytes) of the word(s) to read.
3248 *  @words: Size of data to read in words.
3249 *  @data: pointer to the word(s) to read at offset.
3250 *
3251 *  Reads a word(s) from the NVM
3252 **/
3253static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3254                              u16 *data)
3255{
3256        struct e1000_nvm_info *nvm = &hw->nvm;
3257        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3258        u32 act_offset;
3259        s32 ret_val = 0;
3260        u32 bank = 0;
3261        u32 dword = 0;
3262        u16 offset_to_read;
3263        u16 i;
3264
3265        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3266            (words == 0)) {
3267                e_dbg("nvm parameter(s) out of bounds\n");
3268                ret_val = -E1000_ERR_NVM;
3269                goto out;
3270        }
3271
3272        nvm->ops.acquire(hw);
3273
3274        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3275        if (ret_val) {
3276                e_dbg("Could not detect valid bank, assuming bank 0\n");
3277                bank = 0;
3278        }
3279
3280        act_offset = (bank) ? nvm->flash_bank_size : 0;
3281        act_offset += offset;
3282
3283        ret_val = 0;
3284
3285        for (i = 0; i < words; i += 2) {
3286                if (words - i == 1) {
3287                        if (dev_spec->shadow_ram[offset + i].modified) {
3288                                data[i] =
3289                                    dev_spec->shadow_ram[offset + i].value;
3290                        } else {
3291                                offset_to_read = act_offset + i -
3292                                    ((act_offset + i) % 2);
3293                                ret_val =
3294                                  e1000_read_flash_dword_ich8lan(hw,
3295                                                                 offset_to_read,
3296                                                                 &dword);
3297                                if (ret_val)
3298                                        break;
3299                                if ((act_offset + i) % 2 == 0)
3300                                        data[i] = (u16)(dword & 0xFFFF);
3301                                else
3302                                        data[i] = (u16)((dword >> 16) & 0xFFFF);
3303                        }
3304                } else {
3305                        offset_to_read = act_offset + i;
3306                        if (!(dev_spec->shadow_ram[offset + i].modified) ||
3307                            !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3308                                ret_val =
3309                                  e1000_read_flash_dword_ich8lan(hw,
3310                                                                 offset_to_read,
3311                                                                 &dword);
3312                                if (ret_val)
3313                                        break;
3314                        }
3315                        if (dev_spec->shadow_ram[offset + i].modified)
3316                                data[i] =
3317                                    dev_spec->shadow_ram[offset + i].value;
3318                        else
3319                                data[i] = (u16)(dword & 0xFFFF);
3320                        if (dev_spec->shadow_ram[offset + i].modified)
3321                                data[i + 1] =
3322                                    dev_spec->shadow_ram[offset + i + 1].value;
3323                        else
3324                                data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3325                }
3326        }
3327
3328        nvm->ops.release(hw);
3329
3330out:
3331        if (ret_val)
3332                e_dbg("NVM read error: %d\n", ret_val);
3333
3334        return ret_val;
3335}
3336
3337/**
3338 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3339 *  @hw: pointer to the HW structure
3340 *  @offset: The offset (in bytes) of the word(s) to read.
3341 *  @words: Size of data to read in words
3342 *  @data: Pointer to the word(s) to read at offset.
3343 *
3344 *  Reads a word(s) from the NVM using the flash access registers.
3345 **/
3346static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3347                                  u16 *data)
3348{
3349        struct e1000_nvm_info *nvm = &hw->nvm;
3350        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3351        u32 act_offset;
3352        s32 ret_val = 0;
3353        u32 bank = 0;
3354        u16 i, word;
3355
3356        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3357            (words == 0)) {
3358                e_dbg("nvm parameter(s) out of bounds\n");
3359                ret_val = -E1000_ERR_NVM;
3360                goto out;
3361        }
3362
3363        nvm->ops.acquire(hw);
3364
3365        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3366        if (ret_val) {
3367                e_dbg("Could not detect valid bank, assuming bank 0\n");
3368                bank = 0;
3369        }
3370
3371        act_offset = (bank) ? nvm->flash_bank_size : 0;
3372        act_offset += offset;
3373
3374        ret_val = 0;
3375        for (i = 0; i < words; i++) {
3376                if (dev_spec->shadow_ram[offset + i].modified) {
3377                        data[i] = dev_spec->shadow_ram[offset + i].value;
3378                } else {
3379                        ret_val = e1000_read_flash_word_ich8lan(hw,
3380                                                                act_offset + i,
3381                                                                &word);
3382                        if (ret_val)
3383                                break;
3384                        data[i] = word;
3385                }
3386        }
3387
3388        nvm->ops.release(hw);
3389
3390out:
3391        if (ret_val)
3392                e_dbg("NVM read error: %d\n", ret_val);
3393
3394        return ret_val;
3395}
3396
3397/**
3398 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3399 *  @hw: pointer to the HW structure
3400 *
3401 *  This function does initial flash setup so that a new read/write/erase cycle
3402 *  can be started.
3403 **/
3404static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3405{
3406        union ich8_hws_flash_status hsfsts;
3407        s32 ret_val = -E1000_ERR_NVM;
3408
3409        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3410
3411        /* Check if the flash descriptor is valid */
3412        if (!hsfsts.hsf_status.fldesvalid) {
3413                e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
3414                return -E1000_ERR_NVM;
3415        }
3416
3417        /* Clear FCERR and DAEL in hw status by writing 1 */
3418        hsfsts.hsf_status.flcerr = 1;
3419        hsfsts.hsf_status.dael = 1;
3420        if (hw->mac.type >= e1000_pch_spt)
3421                ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3422        else
3423                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3424
3425        /* Either we should have a hardware SPI cycle in progress
3426         * bit to check against, in order to start a new cycle or
3427         * FDONE bit should be changed in the hardware so that it
3428         * is 1 after hardware reset, which can then be used as an
3429         * indication whether a cycle is in progress or has been
3430         * completed.
3431         */
3432
3433        if (!hsfsts.hsf_status.flcinprog) {
3434                /* There is no cycle running at present,
3435                 * so we can start a cycle.
3436                 * Begin by setting Flash Cycle Done.
3437                 */
3438                hsfsts.hsf_status.flcdone = 1;
3439                if (hw->mac.type >= e1000_pch_spt)
3440                        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3441                else
3442                        ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3443                ret_val = 0;
3444        } else {
3445                s32 i;
3446
3447                /* Otherwise poll for sometime so the current
3448                 * cycle has a chance to end before giving up.
3449                 */
3450                for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3451                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3452                        if (!hsfsts.hsf_status.flcinprog) {
3453                                ret_val = 0;
3454                                break;
3455                        }
3456                        udelay(1);
3457                }
3458                if (!ret_val) {
3459                        /* Successful in waiting for previous cycle to timeout,
3460                         * now set the Flash Cycle Done.
3461                         */
3462                        hsfsts.hsf_status.flcdone = 1;
3463                        if (hw->mac.type >= e1000_pch_spt)
3464                                ew32flash(ICH_FLASH_HSFSTS,
3465                                          hsfsts.regval & 0xFFFF);
3466                        else
3467                                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3468                } else {
3469                        e_dbg("Flash controller busy, cannot get access\n");
3470                }
3471        }
3472
3473        return ret_val;
3474}
3475
3476/**
3477 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3478 *  @hw: pointer to the HW structure
3479 *  @timeout: maximum time to wait for completion
3480 *
3481 *  This function starts a flash cycle and waits for its completion.
3482 **/
3483static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3484{
3485        union ich8_hws_flash_ctrl hsflctl;
3486        union ich8_hws_flash_status hsfsts;
3487        u32 i = 0;
3488
3489        /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3490        if (hw->mac.type >= e1000_pch_spt)
3491                hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3492        else
3493                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3494        hsflctl.hsf_ctrl.flcgo = 1;
3495
3496        if (hw->mac.type >= e1000_pch_spt)
3497                ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3498        else
3499                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3500
3501        /* wait till FDONE bit is set to 1 */
3502        do {
3503                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3504                if (hsfsts.hsf_status.flcdone)
3505                        break;
3506                udelay(1);
3507        } while (i++ < timeout);
3508
3509        if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3510                return 0;
3511
3512        return -E1000_ERR_NVM;
3513}
3514
3515/**
3516 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3517 *  @hw: pointer to the HW structure
3518 *  @offset: offset to data location
3519 *  @data: pointer to the location for storing the data
3520 *
3521 *  Reads the flash dword at offset into data.  Offset is converted
3522 *  to bytes before read.
3523 **/
3524static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3525                                          u32 *data)
3526{
3527        /* Must convert word offset into bytes. */
3528        offset <<= 1;
3529        return e1000_read_flash_data32_ich8lan(hw, offset, data);
3530}
3531
3532/**
3533 *  e1000_read_flash_word_ich8lan - Read word from flash
3534 *  @hw: pointer to the HW structure
3535 *  @offset: offset to data location
3536 *  @data: pointer to the location for storing the data
3537 *
3538 *  Reads the flash word at offset into data.  Offset is converted
3539 *  to bytes before read.
3540 **/
3541static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3542                                         u16 *data)
3543{
3544        /* Must convert offset into bytes. */
3545        offset <<= 1;
3546
3547        return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3548}
3549
3550/**
3551 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3552 *  @hw: pointer to the HW structure
3553 *  @offset: The offset of the byte to read.
3554 *  @data: Pointer to a byte to store the value read.
3555 *
3556 *  Reads a single byte from the NVM using the flash access registers.
3557 **/
3558static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3559                                         u8 *data)
3560{
3561        s32 ret_val;
3562        u16 word = 0;
3563
3564        /* In SPT, only 32 bits access is supported,
3565         * so this function should not be called.
3566         */
3567        if (hw->mac.type >= e1000_pch_spt)
3568                return -E1000_ERR_NVM;
3569        else
3570                ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3571
3572        if (ret_val)
3573                return ret_val;
3574
3575        *data = (u8)word;
3576
3577        return 0;
3578}
3579
3580/**
3581 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3582 *  @hw: pointer to the HW structure
3583 *  @offset: The offset (in bytes) of the byte or word to read.
3584 *  @size: Size of data to read, 1=byte 2=word
3585 *  @data: Pointer to the word to store the value read.
3586 *
3587 *  Reads a byte or word from the NVM using the flash access registers.
3588 **/
3589static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3590                                         u8 size, u16 *data)
3591{
3592        union ich8_hws_flash_status hsfsts;
3593        union ich8_hws_flash_ctrl hsflctl;
3594        u32 flash_linear_addr;
3595        u32 flash_data = 0;
3596        s32 ret_val = -E1000_ERR_NVM;
3597        u8 count = 0;
3598
3599        if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3600                return -E1000_ERR_NVM;
3601
3602        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3603                             hw->nvm.flash_base_addr);
3604
3605        do {
3606                udelay(1);
3607                /* Steps */
3608                ret_val = e1000_flash_cycle_init_ich8lan(hw);
3609                if (ret_val)
3610                        break;
3611
3612                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3613                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3614                hsflctl.hsf_ctrl.fldbcount = size - 1;
3615                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3616                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3617
3618                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3619
3620                ret_val =
3621                    e1000_flash_cycle_ich8lan(hw,
3622                                              ICH_FLASH_READ_COMMAND_TIMEOUT);
3623
3624                /* Check if FCERR is set to 1, if set to 1, clear it
3625                 * and try the whole sequence a few more times, else
3626                 * read in (shift in) the Flash Data0, the order is
3627                 * least significant byte first msb to lsb
3628                 */
3629                if (!ret_val) {
3630                        flash_data = er32flash(ICH_FLASH_FDATA0);
3631                        if (size == 1)
3632                                *data = (u8)(flash_data & 0x000000FF);
3633                        else if (size == 2)
3634                                *data = (u16)(flash_data & 0x0000FFFF);
3635                        break;
3636                } else {
3637                        /* If we've gotten here, then things are probably
3638                         * completely hosed, but if the error condition is
3639                         * detected, it won't hurt to give it another try...
3640                         * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3641                         */
3642                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3643                        if (hsfsts.hsf_status.flcerr) {
3644                                /* Repeat for some time before giving up. */
3645                                continue;
3646                        } else if (!hsfsts.hsf_status.flcdone) {
3647                                e_dbg("Timeout error - flash cycle did not complete.\n");
3648                                break;
3649                        }
3650                }
3651        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3652
3653        return ret_val;
3654}
3655
3656/**
3657 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3658 *  @hw: pointer to the HW structure
3659 *  @offset: The offset (in bytes) of the dword to read.
3660 *  @data: Pointer to the dword to store the value read.
3661 *
3662 *  Reads a byte or word from the NVM using the flash access registers.
3663 **/
3664
3665static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3666                                           u32 *data)
3667{
3668        union ich8_hws_flash_status hsfsts;
3669        union ich8_hws_flash_ctrl hsflctl;
3670        u32 flash_linear_addr;
3671        s32 ret_val = -E1000_ERR_NVM;
3672        u8 count = 0;
3673
3674        if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
3675                return -E1000_ERR_NVM;
3676        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3677                             hw->nvm.flash_base_addr);
3678
3679        do {
3680                udelay(1);
3681                /* Steps */
3682                ret_val = e1000_flash_cycle_init_ich8lan(hw);
3683                if (ret_val)
3684                        break;
3685                /* In SPT, This register is in Lan memory space, not flash.
3686                 * Therefore, only 32 bit access is supported
3687                 */
3688                hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3689
3690                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3691                hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3692                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3693                /* In SPT, This register is in Lan memory space, not flash.
3694                 * Therefore, only 32 bit access is supported
3695                 */
3696                ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3697                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3698
3699                ret_val =
3700                   e1000_flash_cycle_ich8lan(hw,
3701                                             ICH_FLASH_READ_COMMAND_TIMEOUT);
3702
3703                /* Check if FCERR is set to 1, if set to 1, clear it
3704                 * and try the whole sequence a few more times, else
3705                 * read in (shift in) the Flash Data0, the order is
3706                 * least significant byte first msb to lsb
3707                 */
3708                if (!ret_val) {
3709                        *data = er32flash(ICH_FLASH_FDATA0);
3710                        break;
3711                } else {
3712                        /* If we've gotten here, then things are probably
3713                         * completely hosed, but if the error condition is
3714                         * detected, it won't hurt to give it another try...
3715                         * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3716                         */
3717                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3718                        if (hsfsts.hsf_status.flcerr) {
3719                                /* Repeat for some time before giving up. */
3720                                continue;
3721                        } else if (!hsfsts.hsf_status.flcdone) {
3722                                e_dbg("Timeout error - flash cycle did not complete.\n");
3723                                break;
3724                        }
3725                }
3726        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3727
3728        return ret_val;
3729}
3730
3731/**
3732 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3733 *  @hw: pointer to the HW structure
3734 *  @offset: The offset (in bytes) of the word(s) to write.
3735 *  @words: Size of data to write in words
3736 *  @data: Pointer to the word(s) to write at offset.
3737 *
3738 *  Writes a byte or word to the NVM using the flash access registers.
3739 **/
3740static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3741                                   u16 *data)
3742{
3743        struct e1000_nvm_info *nvm = &hw->nvm;
3744        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3745        u16 i;
3746
3747        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3748            (words == 0)) {
3749                e_dbg("nvm parameter(s) out of bounds\n");
3750                return -E1000_ERR_NVM;
3751        }
3752
3753        nvm->ops.acquire(hw);
3754
3755        for (i = 0; i < words; i++) {
3756                dev_spec->shadow_ram[offset + i].modified = true;
3757                dev_spec->shadow_ram[offset + i].value = data[i];
3758        }
3759
3760        nvm->ops.release(hw);
3761
3762        return 0;
3763}
3764
3765/**
3766 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3767 *  @hw: pointer to the HW structure
3768 *
3769 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3770 *  which writes the checksum to the shadow ram.  The changes in the shadow
3771 *  ram are then committed to the EEPROM by processing each bank at a time
3772 *  checking for the modified bit and writing only the pending changes.
3773 *  After a successful commit, the shadow ram is cleared and is ready for
3774 *  future writes.
3775 **/
3776static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3777{
3778        struct e1000_nvm_info *nvm = &hw->nvm;
3779        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3780        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3781        s32 ret_val;
3782        u32 dword = 0;
3783
3784        ret_val = e1000e_update_nvm_checksum_generic(hw);
3785        if (ret_val)
3786                goto out;
3787
3788        if (nvm->type != e1000_nvm_flash_sw)
3789                goto out;
3790
3791        nvm->ops.acquire(hw);
3792
3793        /* We're writing to the opposite bank so if we're on bank 1,
3794         * write to bank 0 etc.  We also need to erase the segment that
3795         * is going to be written
3796         */
3797        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3798        if (ret_val) {
3799                e_dbg("Could not detect valid bank, assuming bank 0\n");
3800                bank = 0;
3801        }
3802
3803        if (bank == 0) {
3804                new_bank_offset = nvm->flash_bank_size;
3805                old_bank_offset = 0;
3806                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3807                if (ret_val)
3808                        goto release;
3809        } else {
3810                old_bank_offset = nvm->flash_bank_size;
3811                new_bank_offset = 0;
3812                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3813                if (ret_val)
3814                        goto release;
3815        }
3816        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3817                /* Determine whether to write the value stored
3818                 * in the other NVM bank or a modified value stored
3819                 * in the shadow RAM
3820                 */
3821                ret_val = e1000_read_flash_dword_ich8lan(hw,
3822                                                         i + old_bank_offset,
3823                                                         &dword);
3824
3825                if (dev_spec->shadow_ram[i].modified) {
3826                        dword &= 0xffff0000;
3827                        dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3828                }
3829                if (dev_spec->shadow_ram[i + 1].modified) {
3830                        dword &= 0x0000ffff;
3831                        dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3832                                  << 16);
3833                }
3834                if (ret_val)
3835                        break;
3836
3837                /* If the word is 0x13, then make sure the signature bits
3838                 * (15:14) are 11b until the commit has completed.
3839                 * This will allow us to write 10b which indicates the
3840                 * signature is valid.  We want to do this after the write
3841                 * has completed so that we don't mark the segment valid
3842                 * while the write is still in progress
3843                 */
3844                if (i == E1000_ICH_NVM_SIG_WORD - 1)
3845                        dword |= E1000_ICH_NVM_SIG_MASK << 16;
3846
3847                /* Convert offset to bytes. */
3848                act_offset = (i + new_bank_offset) << 1;
3849
3850                usleep_range(100, 200);
3851
3852                /* Write the data to the new bank. Offset in words */
3853                act_offset = i + new_bank_offset;
3854                ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3855                                                                dword);
3856                if (ret_val)
3857                        break;
3858        }
3859
3860        /* Don't bother writing the segment valid bits if sector
3861         * programming failed.
3862         */
3863        if (ret_val) {
3864                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3865                e_dbg("Flash commit failed.\n");
3866                goto release;
3867        }
3868
3869        /* Finally validate the new segment by setting bit 15:14
3870         * to 10b in word 0x13 , this can be done without an
3871         * erase as well since these bits are 11 to start with
3872         * and we need to change bit 14 to 0b
3873         */
3874        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3875
3876        /*offset in words but we read dword */
3877        --act_offset;
3878        ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3879
3880        if (ret_val)
3881                goto release;
3882
3883        dword &= 0xBFFFFFFF;
3884        ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3885
3886        if (ret_val)
3887                goto release;
3888
3889        /* offset in words but we read dword */
3890        act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3891        ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3892
3893        if (ret_val)
3894                goto release;
3895
3896        dword &= 0x00FFFFFF;
3897        ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3898
3899        if (ret_val)
3900                goto release;
3901
3902        /* Great!  Everything worked, we can now clear the cached entries. */
3903        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3904                dev_spec->shadow_ram[i].modified = false;
3905                dev_spec->shadow_ram[i].value = 0xFFFF;
3906        }
3907
3908release:
3909        nvm->ops.release(hw);
3910
3911        /* Reload the EEPROM, or else modifications will not appear
3912         * until after the next adapter reset.
3913         */
3914        if (!ret_val) {
3915                nvm->ops.reload(hw);
3916                usleep_range(10000, 11000);
3917        }
3918
3919out:
3920        if (ret_val)
3921                e_dbg("NVM update error: %d\n", ret_val);
3922
3923        return ret_val;
3924}
3925
3926/**
3927 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3928 *  @hw: pointer to the HW structure
3929 *
3930 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3931 *  which writes the checksum to the shadow ram.  The changes in the shadow
3932 *  ram are then committed to the EEPROM by processing each bank at a time
3933 *  checking for the modified bit and writing only the pending changes.
3934 *  After a successful commit, the shadow ram is cleared and is ready for
3935 *  future writes.
3936 **/
3937static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3938{
3939        struct e1000_nvm_info *nvm = &hw->nvm;
3940        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3941        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3942        s32 ret_val;
3943        u16 data = 0;
3944
3945        ret_val = e1000e_update_nvm_checksum_generic(hw);
3946        if (ret_val)
3947                goto out;
3948
3949        if (nvm->type != e1000_nvm_flash_sw)
3950                goto out;
3951
3952        nvm->ops.acquire(hw);
3953
3954        /* We're writing to the opposite bank so if we're on bank 1,
3955         * write to bank 0 etc.  We also need to erase the segment that
3956         * is going to be written
3957         */
3958        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3959        if (ret_val) {
3960                e_dbg("Could not detect valid bank, assuming bank 0\n");
3961                bank = 0;
3962        }
3963
3964        if (bank == 0) {
3965                new_bank_offset = nvm->flash_bank_size;
3966                old_bank_offset = 0;
3967                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3968                if (ret_val)
3969                        goto release;
3970        } else {
3971                old_bank_offset = nvm->flash_bank_size;
3972                new_bank_offset = 0;
3973                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3974                if (ret_val)
3975                        goto release;
3976        }
3977        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3978                if (dev_spec->shadow_ram[i].modified) {
3979                        data = dev_spec->shadow_ram[i].value;
3980                } else {
3981                        ret_val = e1000_read_flash_word_ich8lan(hw, i +
3982                                                                old_bank_offset,
3983                                                                &data);
3984                        if (ret_val)
3985                                break;
3986                }
3987
3988                /* If the word is 0x13, then make sure the signature bits
3989                 * (15:14) are 11b until the commit has completed.
3990                 * This will allow us to write 10b which indicates the
3991                 * signature is valid.  We want to do this after the write
3992                 * has completed so that we don't mark the segment valid
3993                 * while the write is still in progress
3994                 */
3995                if (i == E1000_ICH_NVM_SIG_WORD)
3996                        data |= E1000_ICH_NVM_SIG_MASK;
3997
3998                /* Convert offset to bytes. */
3999                act_offset = (i + new_bank_offset) << 1;
4000
4001                usleep_range(100, 200);
4002                /* Write the bytes to the new bank. */
4003                ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4004                                                               act_offset,
4005                                                               (u8)data);
4006                if (ret_val)
4007                        break;
4008
4009                usleep_range(100, 200);
4010                ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4011                                                               act_offset + 1,
4012                                                               (u8)(data >> 8));
4013                if (ret_val)
4014                        break;
4015        }
4016
4017        /* Don't bother writing the segment valid bits if sector
4018         * programming failed.
4019         */
4020        if (ret_val) {
4021                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
4022                e_dbg("Flash commit failed.\n");
4023                goto release;
4024        }
4025
4026        /* Finally validate the new segment by setting bit 15:14
4027         * to 10b in word 0x13 , this can be done without an
4028         * erase as well since these bits are 11 to start with
4029         * and we need to change bit 14 to 0b
4030         */
4031        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4032        ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4033        if (ret_val)
4034                goto release;
4035
4036        data &= 0xBFFF;
4037        ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4038                                                       act_offset * 2 + 1,
4039                                                       (u8)(data >> 8));
4040        if (ret_val)
4041                goto release;
4042
4043        /* And invalidate the previously valid segment by setting
4044         * its signature word (0x13) high_byte to 0b. This can be
4045         * done without an erase because flash erase sets all bits
4046         * to 1's. We can write 1's to 0's without an erase
4047         */
4048        act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4049        ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4050        if (ret_val)
4051                goto release;
4052
4053        /* Great!  Everything worked, we can now clear the cached entries. */
4054        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
4055                dev_spec->shadow_ram[i].modified = false;
4056                dev_spec->shadow_ram[i].value = 0xFFFF;
4057        }
4058
4059release:
4060        nvm->ops.release(hw);
4061
4062        /* Reload the EEPROM, or else modifications will not appear
4063         * until after the next adapter reset.
4064         */
4065        if (!ret_val) {
4066                nvm->ops.reload(hw);
4067                usleep_range(10000, 11000);
4068        }
4069
4070out:
4071        if (ret_val)
4072                e_dbg("NVM update error: %d\n", ret_val);
4073
4074        return ret_val;
4075}
4076
4077/**
4078 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4079 *  @hw: pointer to the HW structure
4080 *
4081 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4082 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4083 *  calculated, in which case we need to calculate the checksum and set bit 6.
4084 **/
4085static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4086{
4087        s32 ret_val;
4088        u16 data;
4089        u16 word;
4090        u16 valid_csum_mask;
4091
4092        /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4093         * the checksum needs to be fixed.  This bit is an indication that
4094         * the NVM was prepared by OEM software and did not calculate
4095         * the checksum...a likely scenario.
4096         */
4097        switch (hw->mac.type) {
4098        case e1000_pch_lpt:
4099        case e1000_pch_spt:
4100        case e1000_pch_cnp:
4101        case e1000_pch_tgp:
4102        case e1000_pch_adp:
4103        case e1000_pch_mtp:
4104                word = NVM_COMPAT;
4105                valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4106                break;
4107        default:
4108                word = NVM_FUTURE_INIT_WORD1;
4109                valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4110                break;
4111        }
4112
4113        ret_val = e1000_read_nvm(hw, word, 1, &data);
4114        if (ret_val)
4115                return ret_val;
4116
4117        if (!(data & valid_csum_mask)) {
4118                e_dbg("NVM Checksum Invalid\n");
4119
4120                if (hw->mac.type < e1000_pch_cnp) {
4121                        data |= valid_csum_mask;
4122                        ret_val = e1000_write_nvm(hw, word, 1, &data);
4123                        if (ret_val)
4124                                return ret_val;
4125                        ret_val = e1000e_update_nvm_checksum(hw);
4126                        if (ret_val)
4127                                return ret_val;
4128                }
4129        }
4130
4131        return e1000e_validate_nvm_checksum_generic(hw);
4132}
4133
4134/**
4135 *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
4136 *  @hw: pointer to the HW structure
4137 *
4138 *  To prevent malicious write/erase of the NVM, set it to be read-only
4139 *  so that the hardware ignores all write/erase cycles of the NVM via
4140 *  the flash control registers.  The shadow-ram copy of the NVM will
4141 *  still be updated, however any updates to this copy will not stick
4142 *  across driver reloads.
4143 **/
4144void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
4145{
4146        struct e1000_nvm_info *nvm = &hw->nvm;
4147        union ich8_flash_protected_range pr0;
4148        union ich8_hws_flash_status hsfsts;
4149        u32 gfpreg;
4150
4151        nvm->ops.acquire(hw);
4152
4153        gfpreg = er32flash(ICH_FLASH_GFPREG);
4154
4155        /* Write-protect GbE Sector of NVM */
4156        pr0.regval = er32flash(ICH_FLASH_PR0);
4157        pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
4158        pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
4159        pr0.range.wpe = true;
4160        ew32flash(ICH_FLASH_PR0, pr0.regval);
4161
4162        /* Lock down a subset of GbE Flash Control Registers, e.g.
4163         * PR0 to prevent the write-protection from being lifted.
4164         * Once FLOCKDN is set, the registers protected by it cannot
4165         * be written until FLOCKDN is cleared by a hardware reset.
4166         */
4167        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4168        hsfsts.hsf_status.flockdn = true;
4169        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
4170
4171        nvm->ops.release(hw);
4172}
4173
4174/**
4175 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4176 *  @hw: pointer to the HW structure
4177 *  @offset: The offset (in bytes) of the byte/word to read.
4178 *  @size: Size of data to read, 1=byte 2=word
4179 *  @data: The byte(s) to write to the NVM.
4180 *
4181 *  Writes one/two bytes to the NVM using the flash access registers.
4182 **/
4183static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4184                                          u8 size, u16 data)
4185{
4186        union ich8_hws_flash_status hsfsts;
4187        union ich8_hws_flash_ctrl hsflctl;
4188        u32 flash_linear_addr;
4189        u32 flash_data = 0;
4190        s32 ret_val;
4191        u8 count = 0;
4192
4193        if (hw->mac.type >= e1000_pch_spt) {
4194                if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4195                        return -E1000_ERR_NVM;
4196        } else {
4197                if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4198                        return -E1000_ERR_NVM;
4199        }
4200
4201        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4202                             hw->nvm.flash_base_addr);
4203
4204        do {
4205                udelay(1);
4206                /* Steps */
4207                ret_val = e1000_flash_cycle_init_ich8lan(hw);
4208                if (ret_val)
4209                        break;
4210                /* In SPT, This register is in Lan memory space, not
4211                 * flash.  Therefore, only 32 bit access is supported
4212                 */
4213                if (hw->mac.type >= e1000_pch_spt)
4214                        hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4215                else
4216                        hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4217
4218                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4219                hsflctl.hsf_ctrl.fldbcount = size - 1;
4220                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4221                /* In SPT, This register is in Lan memory space,
4222                 * not flash.  Therefore, only 32 bit access is
4223                 * supported
4224                 */
4225                if (hw->mac.type >= e1000_pch_spt)
4226                        ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4227                else
4228                        ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4229
4230                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4231
4232                if (size == 1)
4233                        flash_data = (u32)data & 0x00FF;
4234                else
4235                        flash_data = (u32)data;
4236
4237                ew32flash(ICH_FLASH_FDATA0, flash_data);
4238
4239                /* check if FCERR is set to 1 , if set to 1, clear it
4240                 * and try the whole sequence a few more times else done
4241                 */
4242                ret_val =
4243                    e1000_flash_cycle_ich8lan(hw,
4244                                              ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4245                if (!ret_val)
4246                        break;
4247
4248                /* If we're here, then things are most likely
4249                 * completely hosed, but if the error condition
4250                 * is detected, it won't hurt to give it another
4251                 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4252                 */
4253                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4254                if (hsfsts.hsf_status.flcerr)
4255                        /* Repeat for some time before giving up. */
4256                        continue;
4257                if (!hsfsts.hsf_status.flcdone) {
4258                        e_dbg("Timeout error - flash cycle did not complete.\n");
4259                        break;
4260                }
4261        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4262
4263        return ret_val;
4264}
4265
4266/**
4267*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4268*  @hw: pointer to the HW structure
4269*  @offset: The offset (in bytes) of the dwords to read.
4270*  @data: The 4 bytes to write to the NVM.
4271*
4272*  Writes one/two/four bytes to the NVM using the flash access registers.
4273**/
4274static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4275                                            u32 data)
4276{
4277        union ich8_hws_flash_status hsfsts;
4278        union ich8_hws_flash_ctrl hsflctl;
4279        u32 flash_linear_addr;
4280        s32 ret_val;
4281        u8 count = 0;
4282
4283        if (hw->mac.type >= e1000_pch_spt) {
4284                if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4285                        return -E1000_ERR_NVM;
4286        }
4287        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4288                             hw->nvm.flash_base_addr);
4289        do {
4290                udelay(1);
4291                /* Steps */
4292                ret_val = e1000_flash_cycle_init_ich8lan(hw);
4293                if (ret_val)
4294                        break;
4295
4296                /* In SPT, This register is in Lan memory space, not
4297                 * flash.  Therefore, only 32 bit access is supported
4298                 */
4299                if (hw->mac.type >= e1000_pch_spt)
4300                        hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4301                            >> 16;
4302                else
4303                        hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4304
4305                hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4306                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4307
4308                /* In SPT, This register is in Lan memory space,
4309                 * not flash.  Therefore, only 32 bit access is
4310                 * supported
4311                 */
4312                if (hw->mac.type >= e1000_pch_spt)
4313                        ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4314                else
4315                        ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4316
4317                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4318
4319                ew32flash(ICH_FLASH_FDATA0, data);
4320
4321                /* check if FCERR is set to 1 , if set to 1, clear it
4322                 * and try the whole sequence a few more times else done
4323                 */
4324                ret_val =
4325                   e1000_flash_cycle_ich8lan(hw,
4326                                             ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4327
4328                if (!ret_val)
4329                        break;
4330
4331                /* If we're here, then things are most likely
4332                 * completely hosed, but if the error condition
4333                 * is detected, it won't hurt to give it another
4334                 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4335                 */
4336                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4337
4338                if (hsfsts.hsf_status.flcerr)
4339                        /* Repeat for some time before giving up. */
4340                        continue;
4341                if (!hsfsts.hsf_status.flcdone) {
4342                        e_dbg("Timeout error - flash cycle did not complete.\n");
4343                        break;
4344                }
4345        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4346
4347        return ret_val;
4348}
4349
4350/**
4351 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4352 *  @hw: pointer to the HW structure
4353 *  @offset: The index of the byte to read.
4354 *  @data: The byte to write to the NVM.
4355 *
4356 *  Writes a single byte to the NVM using the flash access registers.
4357 **/
4358static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4359                                          u8 data)
4360{
4361        u16 word = (u16)data;
4362
4363        return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4364}
4365
4366/**
4367*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4368*  @hw: pointer to the HW structure
4369*  @offset: The offset of the word to write.
4370*  @dword: The dword to write to the NVM.
4371*
4372*  Writes a single dword to the NVM using the flash access registers.
4373*  Goes through a retry algorithm before giving up.
4374**/
4375static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4376                                                 u32 offset, u32 dword)
4377{
4378        s32 ret_val;
4379        u16 program_retries;
4380
4381        /* Must convert word offset into bytes. */
4382        offset <<= 1;
4383        ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4384
4385        if (!ret_val)
4386                return ret_val;
4387        for (program_retries = 0; program_retries < 100; program_retries++) {
4388                e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4389                usleep_range(100, 200);
4390                ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4391                if (!ret_val)
4392                        break;
4393        }
4394        if (program_retries == 100)
4395                return -E1000_ERR_NVM;
4396
4397        return 0;
4398}
4399
4400/**
4401 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4402 *  @hw: pointer to the HW structure
4403 *  @offset: The offset of the byte to write.
4404 *  @byte: The byte to write to the NVM.
4405 *
4406 *  Writes a single byte to the NVM using the flash access registers.
4407 *  Goes through a retry algorithm before giving up.
4408 **/
4409static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4410                                                u32 offset, u8 byte)
4411{
4412        s32 ret_val;
4413        u16 program_retries;
4414
4415        ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4416        if (!ret_val)
4417                return ret_val;
4418
4419        for (program_retries = 0; program_retries < 100; program_retries++) {
4420                e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
4421                usleep_range(100, 200);
4422                ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4423                if (!ret_val)
4424                        break;
4425        }
4426        if (program_retries == 100)
4427                return -E1000_ERR_NVM;
4428
4429        return 0;
4430}
4431
4432/**
4433 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4434 *  @hw: pointer to the HW structure
4435 *  @bank: 0 for first bank, 1 for second bank, etc.
4436 *
4437 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4438 *  bank N is 4096 * N + flash_reg_addr.
4439 **/
4440static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4441{
4442        struct e1000_nvm_info *nvm = &hw->nvm;
4443        union ich8_hws_flash_status hsfsts;
4444        union ich8_hws_flash_ctrl hsflctl;
4445        u32 flash_linear_addr;
4446        /* bank size is in 16bit words - adjust to bytes */
4447        u32 flash_bank_size = nvm->flash_bank_size * 2;
4448        s32 ret_val;
4449        s32 count = 0;
4450        s32 j, iteration, sector_size;
4451
4452        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4453
4454        /* Determine HW Sector size: Read BERASE bits of hw flash status
4455         * register
4456         * 00: The Hw sector is 256 bytes, hence we need to erase 16
4457         *     consecutive sectors.  The start index for the nth Hw sector
4458         *     can be calculated as = bank * 4096 + n * 256
4459         * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4460         *     The start index for the nth Hw sector can be calculated
4461         *     as = bank * 4096
4462         * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4463         *     (ich9 only, otherwise error condition)
4464         * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4465         */
4466        switch (hsfsts.hsf_status.berasesz) {
4467        case 0:
4468                /* Hw sector size 256 */
4469                sector_size = ICH_FLASH_SEG_SIZE_256;
4470                iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4471                break;
4472        case 1:
4473                sector_size = ICH_FLASH_SEG_SIZE_4K;
4474                iteration = 1;
4475                break;
4476        case 2:
4477                sector_size = ICH_FLASH_SEG_SIZE_8K;
4478                iteration = 1;
4479                break;
4480        case 3:
4481                sector_size = ICH_FLASH_SEG_SIZE_64K;
4482                iteration = 1;
4483                break;
4484        default:
4485                return -E1000_ERR_NVM;
4486        }
4487
4488        /* Start with the base address, then add the sector offset. */
4489        flash_linear_addr = hw->nvm.flash_base_addr;
4490        flash_linear_addr += (bank) ? flash_bank_size : 0;
4491
4492        for (j = 0; j < iteration; j++) {
4493                do {
4494                        u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4495
4496                        /* Steps */
4497                        ret_val = e1000_flash_cycle_init_ich8lan(hw);
4498                        if (ret_val)
4499                                return ret_val;
4500
4501                        /* Write a value 11 (block Erase) in Flash
4502                         * Cycle field in hw flash control
4503                         */
4504                        if (hw->mac.type >= e1000_pch_spt)
4505                                hsflctl.regval =
4506                                    er32flash(ICH_FLASH_HSFSTS) >> 16;
4507                        else
4508                                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4509
4510                        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4511                        if (hw->mac.type >= e1000_pch_spt)
4512                                ew32flash(ICH_FLASH_HSFSTS,
4513                                          hsflctl.regval << 16);
4514                        else
4515                                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4516
4517                        /* Write the last 24 bits of an index within the
4518                         * block into Flash Linear address field in Flash
4519                         * Address.
4520                         */
4521                        flash_linear_addr += (j * sector_size);
4522                        ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4523
4524                        ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4525                        if (!ret_val)
4526                                break;
4527
4528                        /* Check if FCERR is set to 1.  If 1,
4529                         * clear it and try the whole sequence
4530                         * a few more times else Done
4531                         */
4532                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4533                        if (hsfsts.hsf_status.flcerr)
4534                                /* repeat for some time before giving up */
4535                                continue;
4536                        else if (!hsfsts.hsf_status.flcdone)
4537                                return ret_val;
4538                } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4539        }
4540
4541        return 0;
4542}
4543
4544/**
4545 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4546 *  @hw: pointer to the HW structure
4547 *  @data: Pointer to the LED settings
4548 *
4549 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4550 *  settings is all 0's or F's, set the LED default to a valid LED default
4551 *  setting.
4552 **/
4553static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4554{
4555        s32 ret_val;
4556
4557        ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
4558        if (ret_val) {
4559                e_dbg("NVM Read Error\n");
4560                return ret_val;
4561        }
4562
4563        if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4564                *data = ID_LED_DEFAULT_ICH8LAN;
4565
4566        return 0;
4567}
4568
4569/**
4570 *  e1000_id_led_init_pchlan - store LED configurations
4571 *  @hw: pointer to the HW structure
4572 *
4573 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4574 *  the PHY LED configuration register.
4575 *
4576 *  PCH also does not have an "always on" or "always off" mode which
4577 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4578 *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
4579 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4580 *  link based on logic in e1000_led_[on|off]_pchlan().
4581 **/
4582static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4583{
4584        struct e1000_mac_info *mac = &hw->mac;
4585        s32 ret_val;
4586        const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4587        const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4588        u16 data, i, temp, shift;
4589
4590        /* Get default ID LED modes */
4591        ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4592        if (ret_val)
4593                return ret_val;
4594
4595        mac->ledctl_default = er32(LEDCTL);
4596        mac->ledctl_mode1 = mac->ledctl_default;
4597        mac->ledctl_mode2 = mac->ledctl_default;
4598
4599        for (i = 0; i < 4; i++) {
4600                temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4601                shift = (i * 5);
4602                switch (temp) {
4603                case ID_LED_ON1_DEF2:
4604                case ID_LED_ON1_ON2:
4605                case ID_LED_ON1_OFF2:
4606                        mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4607                        mac->ledctl_mode1 |= (ledctl_on << shift);
4608                        break;
4609                case ID_LED_OFF1_DEF2:
4610                case ID_LED_OFF1_ON2:
4611                case ID_LED_OFF1_OFF2:
4612                        mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4613                        mac->ledctl_mode1 |= (ledctl_off << shift);
4614                        break;
4615                default:
4616                        /* Do nothing */
4617                        break;
4618                }
4619                switch (temp) {
4620                case ID_LED_DEF1_ON2:
4621                case ID_LED_ON1_ON2:
4622                case ID_LED_OFF1_ON2:
4623                        mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4624                        mac->ledctl_mode2 |= (ledctl_on << shift);
4625                        break;
4626                case ID_LED_DEF1_OFF2:
4627                case ID_LED_ON1_OFF2:
4628                case ID_LED_OFF1_OFF2:
4629                        mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4630                        mac->ledctl_mode2 |= (ledctl_off << shift);
4631                        break;
4632                default:
4633                        /* Do nothing */
4634                        break;
4635                }
4636        }
4637
4638        return 0;
4639}
4640
4641/**
4642 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4643 *  @hw: pointer to the HW structure
4644 *
4645 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4646 *  register, so the the bus width is hard coded.
4647 **/
4648static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4649{
4650        struct e1000_bus_info *bus = &hw->bus;
4651        s32 ret_val;
4652
4653        ret_val = e1000e_get_bus_info_pcie(hw);
4654
4655        /* ICH devices are "PCI Express"-ish.  They have
4656         * a configuration space, but do not contain
4657         * PCI Express Capability registers, so bus width
4658         * must be hardcoded.
4659         */
4660        if (bus->width == e1000_bus_width_unknown)
4661                bus->width = e1000_bus_width_pcie_x1;
4662
4663        return ret_val;
4664}
4665
4666/**
4667 *  e1000_reset_hw_ich8lan - Reset the hardware
4668 *  @hw: pointer to the HW structure
4669 *
4670 *  Does a full reset of the hardware which includes a reset of the PHY and
4671 *  MAC.
4672 **/
4673static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4674{
4675        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4676        u16 kum_cfg;
4677        u32 ctrl, reg;
4678        s32 ret_val;
4679
4680        /* Prevent the PCI-E bus from sticking if there is no TLP connection
4681         * on the last TLP read/write transaction when MAC is reset.
4682         */
4683        ret_val = e1000e_disable_pcie_master(hw);
4684        if (ret_val)
4685                e_dbg("PCI-E Master disable polling has failed.\n");
4686
4687        e_dbg("Masking off all interrupts\n");
4688        ew32(IMC, 0xffffffff);
4689
4690        /* Disable the Transmit and Receive units.  Then delay to allow
4691         * any pending transactions to complete before we hit the MAC
4692         * with the global reset.
4693         */
4694        ew32(RCTL, 0);
4695        ew32(TCTL, E1000_TCTL_PSP);
4696        e1e_flush();
4697
4698        usleep_range(10000, 11000);
4699
4700        /* Workaround for ICH8 bit corruption issue in FIFO memory */
4701        if (hw->mac.type == e1000_ich8lan) {
4702                /* Set Tx and Rx buffer allocation to 8k apiece. */
4703                ew32(PBA, E1000_PBA_8K);
4704                /* Set Packet Buffer Size to 16k. */
4705                ew32(PBS, E1000_PBS_16K);
4706        }
4707
4708        if (hw->mac.type == e1000_pchlan) {
4709                /* Save the NVM K1 bit setting */
4710                ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4711                if (ret_val)
4712                        return ret_val;
4713
4714                if (kum_cfg & E1000_NVM_K1_ENABLE)
4715                        dev_spec->nvm_k1_enabled = true;
4716                else
4717                        dev_spec->nvm_k1_enabled = false;
4718        }
4719
4720        ctrl = er32(CTRL);
4721
4722        if (!hw->phy.ops.check_reset_block(hw)) {
4723                /* Full-chip reset requires MAC and PHY reset at the same
4724                 * time to make sure the interface between MAC and the
4725                 * external PHY is reset.
4726                 */
4727                ctrl |= E1000_CTRL_PHY_RST;
4728
4729                /* Gate automatic PHY configuration by hardware on
4730                 * non-managed 82579
4731                 */
4732                if ((hw->mac.type == e1000_pch2lan) &&
4733                    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
4734                        e1000_gate_hw_phy_config_ich8lan(hw, true);
4735        }
4736        ret_val = e1000_acquire_swflag_ich8lan(hw);
4737        e_dbg("Issuing a global reset to ich8lan\n");
4738        ew32(CTRL, (ctrl | E1000_CTRL_RST));
4739        /* cannot issue a flush here because it hangs the hardware */
4740        msleep(20);
4741
4742        /* Set Phy Config Counter to 50msec */
4743        if (hw->mac.type == e1000_pch2lan) {
4744                reg = er32(FEXTNVM3);
4745                reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4746                reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4747                ew32(FEXTNVM3, reg);
4748        }
4749
4750        if (!ret_val)
4751                clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4752
4753        if (ctrl & E1000_CTRL_PHY_RST) {
4754                ret_val = hw->phy.ops.get_cfg_done(hw);
4755                if (ret_val)
4756                        return ret_val;
4757
4758                ret_val = e1000_post_phy_reset_ich8lan(hw);
4759                if (ret_val)
4760                        return ret_val;
4761        }
4762
4763        /* For PCH, this write will make sure that any noise
4764         * will be detected as a CRC error and be dropped rather than show up
4765         * as a bad packet to the DMA engine.
4766         */
4767        if (hw->mac.type == e1000_pchlan)
4768                ew32(CRC_OFFSET, 0x65656565);
4769
4770        ew32(IMC, 0xffffffff);
4771        er32(ICR);
4772
4773        reg = er32(KABGTXD);
4774        reg |= E1000_KABGTXD_BGSQLBIAS;
4775        ew32(KABGTXD, reg);
4776
4777        return 0;
4778}
4779
4780/**
4781 *  e1000_init_hw_ich8lan - Initialize the hardware
4782 *  @hw: pointer to the HW structure
4783 *
4784 *  Prepares the hardware for transmit and receive by doing the following:
4785 *   - initialize hardware bits
4786 *   - initialize LED identification
4787 *   - setup receive address registers
4788 *   - setup flow control
4789 *   - setup transmit descriptors
4790 *   - clear statistics
4791 **/
4792static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4793{
4794        struct e1000_mac_info *mac = &hw->mac;
4795        u32 ctrl_ext, txdctl, snoop;
4796        s32 ret_val;
4797        u16 i;
4798
4799        e1000_initialize_hw_bits_ich8lan(hw);
4800
4801        /* Initialize identification LED */
4802        ret_val = mac->ops.id_led_init(hw);
4803        /* An error is not fatal and we should not stop init due to this */
4804        if (ret_val)
4805                e_dbg("Error initializing identification LED\n");
4806
4807        /* Setup the receive address. */
4808        e1000e_init_rx_addrs(hw, mac->rar_entry_count);
4809
4810        /* Zero out the Multicast HASH table */
4811        e_dbg("Zeroing the MTA\n");
4812        for (i = 0; i < mac->mta_reg_count; i++)
4813                E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4814
4815        /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4816         * the ME.  Disable wakeup by clearing the host wakeup bit.
4817         * Reset the phy after disabling host wakeup to reset the Rx buffer.
4818         */
4819        if (hw->phy.type == e1000_phy_82578) {
4820                e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
4821                i &= ~BM_WUC_HOST_WU_BIT;
4822                e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4823                ret_val = e1000_phy_hw_reset_ich8lan(hw);
4824                if (ret_val)
4825                        return ret_val;
4826        }
4827
4828        /* Setup link and flow control */
4829        ret_val = mac->ops.setup_link(hw);
4830
4831        /* Set the transmit descriptor write-back policy for both queues */
4832        txdctl = er32(TXDCTL(0));
4833        txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4834                  E1000_TXDCTL_FULL_TX_DESC_WB);
4835        txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4836                  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4837        ew32(TXDCTL(0), txdctl);
4838        txdctl = er32(TXDCTL(1));
4839        txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4840                  E1000_TXDCTL_FULL_TX_DESC_WB);
4841        txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4842                  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4843        ew32(TXDCTL(1), txdctl);
4844
4845        /* ICH8 has opposite polarity of no_snoop bits.
4846         * By default, we should use snoop behavior.
4847         */
4848        if (mac->type == e1000_ich8lan)
4849                snoop = PCIE_ICH8_SNOOP_ALL;
4850        else
4851                snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4852        e1000e_set_pcie_no_snoop(hw, snoop);
4853
4854        ctrl_ext = er32(CTRL_EXT);
4855        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4856        ew32(CTRL_EXT, ctrl_ext);
4857
4858        /* Clear all of the statistics registers (clear on read).  It is
4859         * important that we do this after we have tried to establish link
4860         * because the symbol error count will increment wildly if there
4861         * is no link.
4862         */
4863        e1000_clear_hw_cntrs_ich8lan(hw);
4864
4865        return ret_val;
4866}
4867
4868/**
4869 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4870 *  @hw: pointer to the HW structure
4871 *
4872 *  Sets/Clears required hardware bits necessary for correctly setting up the
4873 *  hardware for transmit and receive.
4874 **/
4875static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4876{
4877        u32 reg;
4878
4879        /* Extended Device Control */
4880        reg = er32(CTRL_EXT);
4881        reg |= BIT(22);
4882        /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4883        if (hw->mac.type >= e1000_pchlan)
4884                reg |= E1000_CTRL_EXT_PHYPDEN;
4885        ew32(CTRL_EXT, reg);
4886
4887        /* Transmit Descriptor Control 0 */
4888        reg = er32(TXDCTL(0));
4889        reg |= BIT(22);
4890        ew32(TXDCTL(0), reg);
4891
4892        /* Transmit Descriptor Control 1 */
4893        reg = er32(TXDCTL(1));
4894        reg |= BIT(22);
4895        ew32(TXDCTL(1), reg);
4896
4897        /* Transmit Arbitration Control 0 */
4898        reg = er32(TARC(0));
4899        if (hw->mac.type == e1000_ich8lan)
4900                reg |= BIT(28) | BIT(29);
4901        reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
4902        ew32(TARC(0), reg);
4903
4904        /* Transmit Arbitration Control 1 */
4905        reg = er32(TARC(1));
4906        if (er32(TCTL) & E1000_TCTL_MULR)
4907                reg &= ~BIT(28);
4908        else
4909                reg |= BIT(28);
4910        reg |= BIT(24) | BIT(26) | BIT(30);
4911        ew32(TARC(1), reg);
4912
4913        /* Device Status */
4914        if (hw->mac.type == e1000_ich8lan) {
4915                reg = er32(STATUS);
4916                reg &= ~BIT(31);
4917                ew32(STATUS, reg);
4918        }
4919
4920        /* work-around descriptor data corruption issue during nfs v2 udp
4921         * traffic, just disable the nfs filtering capability
4922         */
4923        reg = er32(RFCTL);
4924        reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4925
4926        /* Disable IPv6 extension header parsing because some malformed
4927         * IPv6 headers can hang the Rx.
4928         */
4929        if (hw->mac.type == e1000_ich8lan)
4930                reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4931        ew32(RFCTL, reg);
4932
4933        /* Enable ECC on Lynxpoint */
4934        if (hw->mac.type >= e1000_pch_lpt) {
4935                reg = er32(PBECCSTS);
4936                reg |= E1000_PBECCSTS_ECC_ENABLE;
4937                ew32(PBECCSTS, reg);
4938
4939                reg = er32(CTRL);
4940                reg |= E1000_CTRL_MEHE;
4941                ew32(CTRL, reg);
4942        }
4943}
4944
4945/**
4946 *  e1000_setup_link_ich8lan - Setup flow control and link settings
4947 *  @hw: pointer to the HW structure
4948 *
4949 *  Determines which flow control settings to use, then configures flow
4950 *  control.  Calls the appropriate media-specific link configuration
4951 *  function.  Assuming the adapter has a valid link partner, a valid link
4952 *  should be established.  Assumes the hardware has previously been reset
4953 *  and the transmitter and receiver are not enabled.
4954 **/
4955static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4956{
4957        s32 ret_val;
4958
4959        if (hw->phy.ops.check_reset_block(hw))
4960                return 0;
4961
4962        /* ICH parts do not have a word in the NVM to determine
4963         * the default flow control setting, so we explicitly
4964         * set it to full.
4965         */
4966        if (hw->fc.requested_mode == e1000_fc_default) {
4967                /* Workaround h/w hang when Tx flow control enabled */
4968                if (hw->mac.type == e1000_pchlan)
4969                        hw->fc.requested_mode = e1000_fc_rx_pause;
4970                else
4971                        hw->fc.requested_mode = e1000_fc_full;
4972        }
4973
4974        /* Save off the requested flow control mode for use later.  Depending
4975         * on the link partner's capabilities, we may or may not use this mode.
4976         */
4977        hw->fc.current_mode = hw->fc.requested_mode;
4978
4979        e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
4980
4981        /* Continue to configure the copper link. */
4982        ret_val = hw->mac.ops.setup_physical_interface(hw);
4983        if (ret_val)
4984                return ret_val;
4985
4986        ew32(FCTTV, hw->fc.pause_time);
4987        if ((hw->phy.type == e1000_phy_82578) ||
4988            (hw->phy.type == e1000_phy_82579) ||
4989            (hw->phy.type == e1000_phy_i217) ||
4990            (hw->phy.type == e1000_phy_82577)) {
4991                ew32(FCRTV_PCH, hw->fc.refresh_time);
4992
4993                ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
4994                                   hw->fc.pause_time);
4995                if (ret_val)
4996                        return ret_val;
4997        }
4998
4999        return e1000e_set_fc_watermarks(hw);
5000}
5001
5002/**
5003 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5004 *  @hw: pointer to the HW structure
5005 *
5006 *  Configures the kumeran interface to the PHY to wait the appropriate time
5007 *  when polling the PHY, then call the generic setup_copper_link to finish
5008 *  configuring the copper link.
5009 **/
5010static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5011{
5012        u32 ctrl;
5013        s32 ret_val;
5014        u16 reg_data;
5015
5016        ctrl = er32(CTRL);
5017        ctrl |= E1000_CTRL_SLU;
5018        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5019        ew32(CTRL, ctrl);
5020
5021        /* Set the mac to wait the maximum time between each iteration
5022         * and increase the max iterations when polling the phy;
5023         * this fixes erroneous timeouts at 10Mbps.
5024         */
5025        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
5026        if (ret_val)
5027                return ret_val;
5028        ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5029                                       &reg_data);
5030        if (ret_val)
5031                return ret_val;
5032        reg_data |= 0x3F;
5033        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5034                                        reg_data);
5035        if (ret_val)
5036                return ret_val;
5037
5038        switch (hw->phy.type) {
5039        case e1000_phy_igp_3:
5040                ret_val = e1000e_copper_link_setup_igp(hw);
5041                if (ret_val)
5042                        return ret_val;
5043                break;
5044        case e1000_phy_bm:
5045        case e1000_phy_82578:
5046                ret_val = e1000e_copper_link_setup_m88(hw);
5047                if (ret_val)
5048                        return ret_val;
5049                break;
5050        case e1000_phy_82577:
5051        case e1000_phy_82579:
5052                ret_val = e1000_copper_link_setup_82577(hw);
5053                if (ret_val)
5054                        return ret_val;
5055                break;
5056        case e1000_phy_ife:
5057                ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
5058                if (ret_val)
5059                        return ret_val;
5060
5061                reg_data &= ~IFE_PMC_AUTO_MDIX;
5062
5063                switch (hw->phy.mdix) {
5064                case 1:
5065                        reg_data &= ~IFE_PMC_FORCE_MDIX;
5066                        break;
5067                case 2:
5068                        reg_data |= IFE_PMC_FORCE_MDIX;
5069                        break;
5070                case 0:
5071                default:
5072                        reg_data |= IFE_PMC_AUTO_MDIX;
5073                        break;
5074                }
5075                ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
5076                if (ret_val)
5077                        return ret_val;
5078                break;
5079        default:
5080                break;
5081        }
5082
5083        return e1000e_setup_copper_link(hw);
5084}
5085
5086/**
5087 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5088 *  @hw: pointer to the HW structure
5089 *
5090 *  Calls the PHY specific link setup function and then calls the
5091 *  generic setup_copper_link to finish configuring the link for
5092 *  Lynxpoint PCH devices
5093 **/
5094static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5095{
5096        u32 ctrl;
5097        s32 ret_val;
5098
5099        ctrl = er32(CTRL);
5100        ctrl |= E1000_CTRL_SLU;
5101        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5102        ew32(CTRL, ctrl);
5103
5104        ret_val = e1000_copper_link_setup_82577(hw);
5105        if (ret_val)
5106                return ret_val;
5107
5108        return e1000e_setup_copper_link(hw);
5109}
5110
5111/**
5112 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5113 *  @hw: pointer to the HW structure
5114 *  @speed: pointer to store current link speed
5115 *  @duplex: pointer to store the current link duplex
5116 *
5117 *  Calls the generic get_speed_and_duplex to retrieve the current link
5118 *  information and then calls the Kumeran lock loss workaround for links at
5119 *  gigabit speeds.
5120 **/
5121static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5122                                          u16 *duplex)
5123{
5124        s32 ret_val;
5125
5126        ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
5127        if (ret_val)
5128                return ret_val;
5129
5130        if ((hw->mac.type == e1000_ich8lan) &&
5131            (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
5132                ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5133        }
5134
5135        return ret_val;
5136}
5137
5138/**
5139 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5140 *  @hw: pointer to the HW structure
5141 *
5142 *  Work-around for 82566 Kumeran PCS lock loss:
5143 *  On link status change (i.e. PCI reset, speed change) and link is up and
5144 *  speed is gigabit-
5145 *    0) if workaround is optionally disabled do nothing
5146 *    1) wait 1ms for Kumeran link to come up
5147 *    2) check Kumeran Diagnostic register PCS lock loss bit
5148 *    3) if not set the link is locked (all is good), otherwise...
5149 *    4) reset the PHY
5150 *    5) repeat up to 10 times
5151 *  Note: this is only called for IGP3 copper when speed is 1gb.
5152 **/
5153static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5154{
5155        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5156        u32 phy_ctrl;
5157        s32 ret_val;
5158        u16 i, data;
5159        bool link;
5160
5161        if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5162                return 0;
5163
5164        /* Make sure link is up before proceeding.  If not just return.
5165         * Attempting this while link is negotiating fouled up link
5166         * stability
5167         */
5168        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
5169        if (!link)
5170                return 0;
5171
5172        for (i = 0; i < 10; i++) {
5173                /* read once to clear */
5174                ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5175                if (ret_val)
5176                        return ret_val;
5177                /* and again to get new status */
5178                ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5179                if (ret_val)
5180                        return ret_val;
5181
5182                /* check for PCS lock */
5183                if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5184                        return 0;
5185
5186                /* Issue PHY reset */
5187                e1000_phy_hw_reset(hw);
5188                mdelay(5);
5189        }
5190        /* Disable GigE link negotiation */
5191        phy_ctrl = er32(PHY_CTRL);
5192        phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5193                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5194        ew32(PHY_CTRL, phy_ctrl);
5195
5196        /* Call gig speed drop workaround on Gig disable before accessing
5197         * any PHY registers
5198         */
5199        e1000e_gig_downshift_workaround_ich8lan(hw);
5200
5201        /* unable to acquire PCS lock */
5202        return -E1000_ERR_PHY;
5203}
5204
5205/**
5206 *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5207 *  @hw: pointer to the HW structure
5208 *  @state: boolean value used to set the current Kumeran workaround state
5209 *
5210 *  If ICH8, set the current Kumeran workaround state (enabled - true
5211 *  /disabled - false).
5212 **/
5213void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5214                                                  bool state)
5215{
5216        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5217
5218        if (hw->mac.type != e1000_ich8lan) {
5219                e_dbg("Workaround applies to ICH8 only.\n");
5220                return;
5221        }
5222
5223        dev_spec->kmrn_lock_loss_workaround_enabled = state;
5224}
5225
5226/**
5227 *  e1000e_igp3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5228 *  @hw: pointer to the HW structure
5229 *
5230 *  Workaround for 82566 power-down on D3 entry:
5231 *    1) disable gigabit link
5232 *    2) write VR power-down enable
5233 *    3) read it back
5234 *  Continue if successful, else issue LCD reset and repeat
5235 **/
5236void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5237{
5238        u32 reg;
5239        u16 data;
5240        u8 retry = 0;
5241
5242        if (hw->phy.type != e1000_phy_igp_3)
5243                return;
5244
5245        /* Try the workaround twice (if needed) */
5246        do {
5247                /* Disable link */
5248                reg = er32(PHY_CTRL);
5249                reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5250                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5251                ew32(PHY_CTRL, reg);
5252
5253                /* Call gig speed drop workaround on Gig disable before
5254                 * accessing any PHY registers
5255                 */
5256                if (hw->mac.type == e1000_ich8lan)
5257                        e1000e_gig_downshift_workaround_ich8lan(hw);
5258
5259                /* Write VR power-down enable */
5260                e1e_rphy(hw, IGP3_VR_CTRL, &data);
5261                data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5262                e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5263
5264                /* Read it back and test */
5265                e1e_rphy(hw, IGP3_VR_CTRL, &data);
5266                data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5267                if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5268                        break;
5269
5270                /* Issue PHY reset and repeat at most one more time */
5271                reg = er32(CTRL);
5272                ew32(CTRL, reg | E1000_CTRL_PHY_RST);
5273                retry++;
5274        } while (retry);
5275}
5276
5277/**
5278 *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5279 *  @hw: pointer to the HW structure
5280 *
5281 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5282 *  LPLU, Gig disable, MDIC PHY reset):
5283 *    1) Set Kumeran Near-end loopback
5284 *    2) Clear Kumeran Near-end loopback
5285 *  Should only be called for ICH8[m] devices with any 1G Phy.
5286 **/
5287void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5288{
5289        s32 ret_val;
5290        u16 reg_data;
5291
5292        if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
5293                return;
5294
5295        ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5296                                       &reg_data);
5297        if (ret_val)
5298                return;
5299        reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5300        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5301                                        reg_data);
5302        if (ret_val)
5303                return;
5304        reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5305        e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
5306}
5307
5308/**
5309 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5310 *  @hw: pointer to the HW structure
5311 *
5312 *  During S0 to Sx transition, it is possible the link remains at gig
5313 *  instead of negotiating to a lower speed.  Before going to Sx, set
5314 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5315 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5316 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5317 *  needs to be written.
5318 *  Parts that support (and are linked to a partner which support) EEE in
5319 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5320 *  than 10Mbps w/o EEE.
5321 **/
5322void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5323{
5324        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5325        u32 phy_ctrl;
5326        s32 ret_val;
5327
5328        phy_ctrl = er32(PHY_CTRL);
5329        phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5330
5331        if (hw->phy.type == e1000_phy_i217) {
5332                u16 phy_reg, device_id = hw->adapter->pdev->device;
5333
5334                if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5335                    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5336                    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5337                    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5338                    (hw->mac.type >= e1000_pch_spt)) {
5339                        u32 fextnvm6 = er32(FEXTNVM6);
5340
5341                        ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5342                }
5343
5344                ret_val = hw->phy.ops.acquire(hw);
5345                if (ret_val)
5346                        goto out;
5347
5348                if (!dev_spec->eee_disable) {
5349                        u16 eee_advert;
5350
5351                        ret_val =
5352                            e1000_read_emi_reg_locked(hw,
5353                                                      I217_EEE_ADVERTISEMENT,
5354                                                      &eee_advert);
5355                        if (ret_val)
5356                                goto release;
5357
5358                        /* Disable LPLU if both link partners support 100BaseT
5359                         * EEE and 100Full is advertised on both ends of the
5360                         * link, and enable Auto Enable LPI since there will
5361                         * be no driver to enable LPI while in Sx.
5362                         */
5363                        if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5364                            (dev_spec->eee_lp_ability &
5365                             I82579_EEE_100_SUPPORTED) &&
5366                            (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5367                                phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5368                                              E1000_PHY_CTRL_NOND0A_LPLU);
5369
5370                                /* Set Auto Enable LPI after link up */
5371                                e1e_rphy_locked(hw,
5372                                                I217_LPI_GPIO_CTRL, &phy_reg);
5373                                phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5374                                e1e_wphy_locked(hw,
5375                                                I217_LPI_GPIO_CTRL, phy_reg);
5376                        }
5377                }
5378
5379                /* For i217 Intel Rapid Start Technology support,
5380                 * when the system is going into Sx and no manageability engine
5381                 * is present, the driver must configure proxy to reset only on
5382                 * power good.  LPI (Low Power Idle) state must also reset only
5383                 * on power good, as well as the MTA (Multicast table array).
5384                 * The SMBus release must also be disabled on LCD reset.
5385                 */
5386                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5387                        /* Enable proxy to reset only on power good. */
5388                        e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
5389                        phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5390                        e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
5391
5392                        /* Set bit enable LPI (EEE) to reset only on
5393                         * power good.
5394                         */
5395                        e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
5396                        phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5397                        e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
5398
5399                        /* Disable the SMB release on LCD reset. */
5400                        e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5401                        phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5402                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5403                }
5404
5405                /* Enable MTA to reset for Intel Rapid Start Technology
5406                 * Support
5407                 */
5408                e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5409                phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5410                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5411
5412release:
5413                hw->phy.ops.release(hw);
5414        }
5415out:
5416        ew32(PHY_CTRL, phy_ctrl);
5417
5418        if (hw->mac.type == e1000_ich8lan)
5419                e1000e_gig_downshift_workaround_ich8lan(hw);
5420
5421        if (hw->mac.type >= e1000_pchlan) {
5422                e1000_oem_bits_config_ich8lan(hw, false);
5423
5424                /* Reset PHY to activate OEM bits on 82577/8 */
5425                if (hw->mac.type == e1000_pchlan)
5426                        e1000e_phy_hw_reset_generic(hw);
5427
5428                ret_val = hw->phy.ops.acquire(hw);
5429                if (ret_val)
5430                        return;
5431                e1000_write_smbus_addr(hw);
5432                hw->phy.ops.release(hw);
5433        }
5434}
5435
5436/**
5437 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5438 *  @hw: pointer to the HW structure
5439 *
5440 *  During Sx to S0 transitions on non-managed devices or managed devices
5441 *  on which PHY resets are not blocked, if the PHY registers cannot be
5442 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5443 *  the PHY.
5444 *  On i217, setup Intel Rapid Start Technology.
5445 **/
5446void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5447{
5448        s32 ret_val;
5449
5450        if (hw->mac.type < e1000_pch2lan)
5451                return;
5452
5453        ret_val = e1000_init_phy_workarounds_pchlan(hw);
5454        if (ret_val) {
5455                e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
5456                return;
5457        }
5458
5459        /* For i217 Intel Rapid Start Technology support when the system
5460         * is transitioning from Sx and no manageability engine is present
5461         * configure SMBus to restore on reset, disable proxy, and enable
5462         * the reset on MTA (Multicast table array).
5463         */
5464        if (hw->phy.type == e1000_phy_i217) {
5465                u16 phy_reg;
5466
5467                ret_val = hw->phy.ops.acquire(hw);
5468                if (ret_val) {
5469                        e_dbg("Failed to setup iRST\n");
5470                        return;
5471                }
5472
5473                /* Clear Auto Enable LPI after link up */
5474                e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5475                phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5476                e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5477
5478                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5479                        /* Restore clear on SMB if no manageability engine
5480                         * is present
5481                         */
5482                        ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5483                        if (ret_val)
5484                                goto release;
5485                        phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5486                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5487
5488                        /* Disable Proxy */
5489                        e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
5490                }
5491                /* Enable reset on MTA */
5492                ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5493                if (ret_val)
5494                        goto release;
5495                phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5496                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5497release:
5498                if (ret_val)
5499                        e_dbg("Error %d in resume workarounds\n", ret_val);
5500                hw->phy.ops.release(hw);
5501        }
5502}
5503
5504/**
5505 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5506 *  @hw: pointer to the HW structure
5507 *
5508 *  Return the LED back to the default configuration.
5509 **/
5510static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5511{
5512        if (hw->phy.type == e1000_phy_ife)
5513                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
5514
5515        ew32(LEDCTL, hw->mac.ledctl_default);
5516        return 0;
5517}
5518
5519/**
5520 *  e1000_led_on_ich8lan - Turn LEDs on
5521 *  @hw: pointer to the HW structure
5522 *
5523 *  Turn on the LEDs.
5524 **/
5525static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5526{
5527        if (hw->phy.type == e1000_phy_ife)
5528                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5529                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5530
5531        ew32(LEDCTL, hw->mac.ledctl_mode2);
5532        return 0;
5533}
5534
5535/**
5536 *  e1000_led_off_ich8lan - Turn LEDs off
5537 *  @hw: pointer to the HW structure
5538 *
5539 *  Turn off the LEDs.
5540 **/
5541static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5542{
5543        if (hw->phy.type == e1000_phy_ife)
5544                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5545                                (IFE_PSCL_PROBE_MODE |
5546                                 IFE_PSCL_PROBE_LEDS_OFF));
5547
5548        ew32(LEDCTL, hw->mac.ledctl_mode1);
5549        return 0;
5550}
5551
5552/**
5553 *  e1000_setup_led_pchlan - Configures SW controllable LED
5554 *  @hw: pointer to the HW structure
5555 *
5556 *  This prepares the SW controllable LED for use.
5557 **/
5558static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5559{
5560        return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
5561}
5562
5563/**
5564 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5565 *  @hw: pointer to the HW structure
5566 *
5567 *  Return the LED back to the default configuration.
5568 **/
5569static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5570{
5571        return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
5572}
5573
5574/**
5575 *  e1000_led_on_pchlan - Turn LEDs on
5576 *  @hw: pointer to the HW structure
5577 *
5578 *  Turn on the LEDs.
5579 **/
5580static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5581{
5582        u16 data = (u16)hw->mac.ledctl_mode2;
5583        u32 i, led;
5584
5585        /* If no link, then turn LED on by setting the invert bit
5586         * for each LED that's mode is "link_up" in ledctl_mode2.
5587         */
5588        if (!(er32(STATUS) & E1000_STATUS_LU)) {
5589                for (i = 0; i < 3; i++) {
5590                        led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5591                        if ((led & E1000_PHY_LED0_MODE_MASK) !=
5592                            E1000_LEDCTL_MODE_LINK_UP)
5593                                continue;
5594                        if (led & E1000_PHY_LED0_IVRT)
5595                                data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5596                        else
5597                                data |= (E1000_PHY_LED0_IVRT << (i * 5));
5598                }
5599        }
5600
5601        return e1e_wphy(hw, HV_LED_CONFIG, data);
5602}
5603
5604/**
5605 *  e1000_led_off_pchlan - Turn LEDs off
5606 *  @hw: pointer to the HW structure
5607 *
5608 *  Turn off the LEDs.
5609 **/
5610static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5611{
5612        u16 data = (u16)hw->mac.ledctl_mode1;
5613        u32 i, led;
5614
5615        /* If no link, then turn LED off by clearing the invert bit
5616         * for each LED that's mode is "link_up" in ledctl_mode1.
5617         */
5618        if (!(er32(STATUS) & E1000_STATUS_LU)) {
5619                for (i = 0; i < 3; i++) {
5620                        led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5621                        if ((led & E1000_PHY_LED0_MODE_MASK) !=
5622                            E1000_LEDCTL_MODE_LINK_UP)
5623                                continue;
5624                        if (led & E1000_PHY_LED0_IVRT)
5625                                data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5626                        else
5627                                data |= (E1000_PHY_LED0_IVRT << (i * 5));
5628                }
5629        }
5630
5631        return e1e_wphy(hw, HV_LED_CONFIG, data);
5632}
5633
5634/**
5635 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5636 *  @hw: pointer to the HW structure
5637 *
5638 *  Read appropriate register for the config done bit for completion status
5639 *  and configure the PHY through s/w for EEPROM-less parts.
5640 *
5641 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5642 *  config done bit, so only an error is logged and continues.  If we were
5643 *  to return with error, EEPROM-less silicon would not be able to be reset
5644 *  or change link.
5645 **/
5646static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5647{
5648        s32 ret_val = 0;
5649        u32 bank = 0;
5650        u32 status;
5651
5652        e1000e_get_cfg_done_generic(hw);
5653
5654        /* Wait for indication from h/w that it has completed basic config */
5655        if (hw->mac.type >= e1000_ich10lan) {
5656                e1000_lan_init_done_ich8lan(hw);
5657        } else {
5658                ret_val = e1000e_get_auto_rd_done(hw);
5659                if (ret_val) {
5660                        /* When auto config read does not complete, do not
5661                         * return with an error. This can happen in situations
5662                         * where there is no eeprom and prevents getting link.
5663                         */
5664                        e_dbg("Auto Read Done did not complete\n");
5665                        ret_val = 0;
5666                }
5667        }
5668
5669        /* Clear PHY Reset Asserted bit */
5670        status = er32(STATUS);
5671        if (status & E1000_STATUS_PHYRA)
5672                ew32(STATUS, status & ~E1000_STATUS_PHYRA);
5673        else
5674                e_dbg("PHY Reset Asserted not set - needs delay\n");
5675
5676        /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5677        if (hw->mac.type <= e1000_ich9lan) {
5678                if (!(er32(EECD) & E1000_EECD_PRES) &&
5679                    (hw->phy.type == e1000_phy_igp_3)) {
5680                        e1000e_phy_init_script_igp3(hw);
5681                }
5682        } else {
5683                if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5684                        /* Maybe we should do a basic PHY config */
5685                        e_dbg("EEPROM not present\n");
5686                        ret_val = -E1000_ERR_CONFIG;
5687                }
5688        }
5689
5690        return ret_val;
5691}
5692
5693/**
5694 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5695 * @hw: pointer to the HW structure
5696 *
5697 * In the case of a PHY power down to save power, or to turn off link during a
5698 * driver unload, or wake on lan is not enabled, remove the link.
5699 **/
5700static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5701{
5702        /* If the management interface is not enabled, then power down */
5703        if (!(hw->mac.ops.check_mng_mode(hw) ||
5704              hw->phy.ops.check_reset_block(hw)))
5705                e1000_power_down_phy_copper(hw);
5706}
5707
5708/**
5709 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5710 *  @hw: pointer to the HW structure
5711 *
5712 *  Clears hardware counters specific to the silicon family and calls
5713 *  clear_hw_cntrs_generic to clear all general purpose counters.
5714 **/
5715static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5716{
5717        u16 phy_data;
5718        s32 ret_val;
5719
5720        e1000e_clear_hw_cntrs_base(hw);
5721
5722        er32(ALGNERRC);
5723        er32(RXERRC);
5724        er32(TNCRS);
5725        er32(CEXTERR);
5726        er32(TSCTC);
5727        er32(TSCTFC);
5728
5729        er32(MGTPRC);
5730        er32(MGTPDC);
5731        er32(MGTPTC);
5732
5733        er32(IAC);
5734        er32(ICRXOC);
5735
5736        /* Clear PHY statistics registers */
5737        if ((hw->phy.type == e1000_phy_82578) ||
5738            (hw->phy.type == e1000_phy_82579) ||
5739            (hw->phy.type == e1000_phy_i217) ||
5740            (hw->phy.type == e1000_phy_82577)) {
5741                ret_val = hw->phy.ops.acquire(hw);
5742                if (ret_val)
5743                        return;
5744                ret_val = hw->phy.ops.set_page(hw,
5745                                               HV_STATS_PAGE << IGP_PAGE_SHIFT);
5746                if (ret_val)
5747                        goto release;
5748                hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5749                hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5750                hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5751                hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5752                hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5753                hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5754                hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5755                hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5756                hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5757                hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5758                hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5759                hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5760                hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5761                hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5762release:
5763                hw->phy.ops.release(hw);
5764        }
5765}
5766
5767static const struct e1000_mac_operations ich8_mac_ops = {
5768        /* check_mng_mode dependent on mac type */
5769        .check_for_link         = e1000_check_for_copper_link_ich8lan,
5770        /* cleanup_led dependent on mac type */
5771        .clear_hw_cntrs         = e1000_clear_hw_cntrs_ich8lan,
5772        .get_bus_info           = e1000_get_bus_info_ich8lan,
5773        .set_lan_id             = e1000_set_lan_id_single_port,
5774        .get_link_up_info       = e1000_get_link_up_info_ich8lan,
5775        /* led_on dependent on mac type */
5776        /* led_off dependent on mac type */
5777        .update_mc_addr_list    = e1000e_update_mc_addr_list_generic,
5778        .reset_hw               = e1000_reset_hw_ich8lan,
5779        .init_hw                = e1000_init_hw_ich8lan,
5780        .setup_link             = e1000_setup_link_ich8lan,
5781        .setup_physical_interface = e1000_setup_copper_link_ich8lan,
5782        /* id_led_init dependent on mac type */
5783        .config_collision_dist  = e1000e_config_collision_dist_generic,
5784        .rar_set                = e1000e_rar_set_generic,
5785        .rar_get_count          = e1000e_rar_get_count_generic,
5786};
5787
5788static const struct e1000_phy_operations ich8_phy_ops = {
5789        .acquire                = e1000_acquire_swflag_ich8lan,
5790        .check_reset_block      = e1000_check_reset_block_ich8lan,
5791        .commit                 = NULL,
5792        .get_cfg_done           = e1000_get_cfg_done_ich8lan,
5793        .get_cable_length       = e1000e_get_cable_length_igp_2,
5794        .read_reg               = e1000e_read_phy_reg_igp,
5795        .release                = e1000_release_swflag_ich8lan,
5796        .reset                  = e1000_phy_hw_reset_ich8lan,
5797        .set_d0_lplu_state      = e1000_set_d0_lplu_state_ich8lan,
5798        .set_d3_lplu_state      = e1000_set_d3_lplu_state_ich8lan,
5799        .write_reg              = e1000e_write_phy_reg_igp,
5800};
5801
5802static const struct e1000_nvm_operations ich8_nvm_ops = {
5803        .acquire                = e1000_acquire_nvm_ich8lan,
5804        .read                   = e1000_read_nvm_ich8lan,
5805        .release                = e1000_release_nvm_ich8lan,
5806        .reload                 = e1000e_reload_nvm_generic,
5807        .update                 = e1000_update_nvm_checksum_ich8lan,
5808        .valid_led_default      = e1000_valid_led_default_ich8lan,
5809        .validate               = e1000_validate_nvm_checksum_ich8lan,
5810        .write                  = e1000_write_nvm_ich8lan,
5811};
5812
5813static const struct e1000_nvm_operations spt_nvm_ops = {
5814        .acquire                = e1000_acquire_nvm_ich8lan,
5815        .release                = e1000_release_nvm_ich8lan,
5816        .read                   = e1000_read_nvm_spt,
5817        .update                 = e1000_update_nvm_checksum_spt,
5818        .reload                 = e1000e_reload_nvm_generic,
5819        .valid_led_default      = e1000_valid_led_default_ich8lan,
5820        .validate               = e1000_validate_nvm_checksum_ich8lan,
5821        .write                  = e1000_write_nvm_ich8lan,
5822};
5823
5824const struct e1000_info e1000_ich8_info = {
5825        .mac                    = e1000_ich8lan,
5826        .flags                  = FLAG_HAS_WOL
5827                                  | FLAG_IS_ICH
5828                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5829                                  | FLAG_HAS_AMT
5830                                  | FLAG_HAS_FLASH
5831                                  | FLAG_APME_IN_WUC,
5832        .pba                    = 8,
5833        .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
5834        .get_variants           = e1000_get_variants_ich8lan,
5835        .mac_ops                = &ich8_mac_ops,
5836        .phy_ops                = &ich8_phy_ops,
5837        .nvm_ops                = &ich8_nvm_ops,
5838};
5839
5840const struct e1000_info e1000_ich9_info = {
5841        .mac                    = e1000_ich9lan,
5842        .flags                  = FLAG_HAS_JUMBO_FRAMES
5843                                  | FLAG_IS_ICH
5844                                  | FLAG_HAS_WOL
5845                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5846                                  | FLAG_HAS_AMT
5847                                  | FLAG_HAS_FLASH
5848                                  | FLAG_APME_IN_WUC,
5849        .pba                    = 18,
5850        .max_hw_frame_size      = DEFAULT_JUMBO,
5851        .get_variants           = e1000_get_variants_ich8lan,
5852        .mac_ops                = &ich8_mac_ops,
5853        .phy_ops                = &ich8_phy_ops,
5854        .nvm_ops                = &ich8_nvm_ops,
5855};
5856
5857const struct e1000_info e1000_ich10_info = {
5858        .mac                    = e1000_ich10lan,
5859        .flags                  = FLAG_HAS_JUMBO_FRAMES
5860                                  | FLAG_IS_ICH
5861                                  | FLAG_HAS_WOL
5862                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5863                                  | FLAG_HAS_AMT
5864                                  | FLAG_HAS_FLASH
5865                                  | FLAG_APME_IN_WUC,
5866        .pba                    = 18,
5867        .max_hw_frame_size      = DEFAULT_JUMBO,
5868        .get_variants           = e1000_get_variants_ich8lan,
5869        .mac_ops                = &ich8_mac_ops,
5870        .phy_ops                = &ich8_phy_ops,
5871        .nvm_ops                = &ich8_nvm_ops,
5872};
5873
5874const struct e1000_info e1000_pch_info = {
5875        .mac                    = e1000_pchlan,
5876        .flags                  = FLAG_IS_ICH
5877                                  | FLAG_HAS_WOL
5878                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5879                                  | FLAG_HAS_AMT
5880                                  | FLAG_HAS_FLASH
5881                                  | FLAG_HAS_JUMBO_FRAMES
5882                                  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
5883                                  | FLAG_APME_IN_WUC,
5884        .flags2                 = FLAG2_HAS_PHY_STATS,
5885        .pba                    = 26,
5886        .max_hw_frame_size      = 4096,
5887        .get_variants           = e1000_get_variants_ich8lan,
5888        .mac_ops                = &ich8_mac_ops,
5889        .phy_ops                = &ich8_phy_ops,
5890        .nvm_ops                = &ich8_nvm_ops,
5891};
5892
5893const struct e1000_info e1000_pch2_info = {
5894        .mac                    = e1000_pch2lan,
5895        .flags                  = FLAG_IS_ICH
5896                                  | FLAG_HAS_WOL
5897                                  | FLAG_HAS_HW_TIMESTAMP
5898                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5899                                  | FLAG_HAS_AMT
5900                                  | FLAG_HAS_FLASH
5901                                  | FLAG_HAS_JUMBO_FRAMES
5902                                  | FLAG_APME_IN_WUC,
5903        .flags2                 = FLAG2_HAS_PHY_STATS
5904                                  | FLAG2_HAS_EEE
5905                                  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5906        .pba                    = 26,
5907        .max_hw_frame_size      = 9022,
5908        .get_variants           = e1000_get_variants_ich8lan,
5909        .mac_ops                = &ich8_mac_ops,
5910        .phy_ops                = &ich8_phy_ops,
5911        .nvm_ops                = &ich8_nvm_ops,
5912};
5913
5914const struct e1000_info e1000_pch_lpt_info = {
5915        .mac                    = e1000_pch_lpt,
5916        .flags                  = FLAG_IS_ICH
5917                                  | FLAG_HAS_WOL
5918                                  | FLAG_HAS_HW_TIMESTAMP
5919                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5920                                  | FLAG_HAS_AMT
5921                                  | FLAG_HAS_FLASH
5922                                  | FLAG_HAS_JUMBO_FRAMES
5923                                  | FLAG_APME_IN_WUC,
5924        .flags2                 = FLAG2_HAS_PHY_STATS
5925                                  | FLAG2_HAS_EEE
5926                                  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5927        .pba                    = 26,
5928        .max_hw_frame_size      = 9022,
5929        .get_variants           = e1000_get_variants_ich8lan,
5930        .mac_ops                = &ich8_mac_ops,
5931        .phy_ops                = &ich8_phy_ops,
5932        .nvm_ops                = &ich8_nvm_ops,
5933};
5934
5935const struct e1000_info e1000_pch_spt_info = {
5936        .mac                    = e1000_pch_spt,
5937        .flags                  = FLAG_IS_ICH
5938                                  | FLAG_HAS_WOL
5939                                  | FLAG_HAS_HW_TIMESTAMP
5940                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5941                                  | FLAG_HAS_AMT
5942                                  | FLAG_HAS_FLASH
5943                                  | FLAG_HAS_JUMBO_FRAMES
5944                                  | FLAG_APME_IN_WUC,
5945        .flags2                 = FLAG2_HAS_PHY_STATS
5946                                  | FLAG2_HAS_EEE,
5947        .pba                    = 26,
5948        .max_hw_frame_size      = 9022,
5949        .get_variants           = e1000_get_variants_ich8lan,
5950        .mac_ops                = &ich8_mac_ops,
5951        .phy_ops                = &ich8_phy_ops,
5952        .nvm_ops                = &spt_nvm_ops,
5953};
5954
5955const struct e1000_info e1000_pch_cnp_info = {
5956        .mac                    = e1000_pch_cnp,
5957        .flags                  = FLAG_IS_ICH
5958                                  | FLAG_HAS_WOL
5959                                  | FLAG_HAS_HW_TIMESTAMP
5960                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5961                                  | FLAG_HAS_AMT
5962                                  | FLAG_HAS_FLASH
5963                                  | FLAG_HAS_JUMBO_FRAMES
5964                                  | FLAG_APME_IN_WUC,
5965        .flags2                 = FLAG2_HAS_PHY_STATS
5966                                  | FLAG2_HAS_EEE,
5967        .pba                    = 26,
5968        .max_hw_frame_size      = 9022,
5969        .get_variants           = e1000_get_variants_ich8lan,
5970        .mac_ops                = &ich8_mac_ops,
5971        .phy_ops                = &ich8_phy_ops,
5972        .nvm_ops                = &spt_nvm_ops,
5973};
5974