linux/drivers/net/ethernet/intel/e1000e/ich8lan.c
<<
>>
Prefs
   1/* Intel PRO/1000 Linux driver
   2 * Copyright(c) 1999 - 2015 Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * The full GNU General Public License is included in this distribution in
  14 * the file called "COPYING".
  15 *
  16 * Contact Information:
  17 * Linux NICS <linux.nics@intel.com>
  18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20 */
  21
  22/* 82562G 10/100 Network Connection
  23 * 82562G-2 10/100 Network Connection
  24 * 82562GT 10/100 Network Connection
  25 * 82562GT-2 10/100 Network Connection
  26 * 82562V 10/100 Network Connection
  27 * 82562V-2 10/100 Network Connection
  28 * 82566DC-2 Gigabit Network Connection
  29 * 82566DC Gigabit Network Connection
  30 * 82566DM-2 Gigabit Network Connection
  31 * 82566DM Gigabit Network Connection
  32 * 82566MC Gigabit Network Connection
  33 * 82566MM Gigabit Network Connection
  34 * 82567LM Gigabit Network Connection
  35 * 82567LF Gigabit Network Connection
  36 * 82567V Gigabit Network Connection
  37 * 82567LM-2 Gigabit Network Connection
  38 * 82567LF-2 Gigabit Network Connection
  39 * 82567V-2 Gigabit Network Connection
  40 * 82567LF-3 Gigabit Network Connection
  41 * 82567LM-3 Gigabit Network Connection
  42 * 82567LM-4 Gigabit Network Connection
  43 * 82577LM Gigabit Network Connection
  44 * 82577LC Gigabit Network Connection
  45 * 82578DM Gigabit Network Connection
  46 * 82578DC Gigabit Network Connection
  47 * 82579LM Gigabit Network Connection
  48 * 82579V Gigabit Network Connection
  49 * Ethernet Connection I217-LM
  50 * Ethernet Connection I217-V
  51 * Ethernet Connection I218-V
  52 * Ethernet Connection I218-LM
  53 * Ethernet Connection (2) I218-LM
  54 * Ethernet Connection (2) I218-V
  55 * Ethernet Connection (3) I218-LM
  56 * Ethernet Connection (3) I218-V
  57 */
  58
  59#include "e1000.h"
  60
  61/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
  62/* Offset 04h HSFSTS */
  63union ich8_hws_flash_status {
  64        struct ich8_hsfsts {
  65                u16 flcdone:1;  /* bit 0 Flash Cycle Done */
  66                u16 flcerr:1;   /* bit 1 Flash Cycle Error */
  67                u16 dael:1;     /* bit 2 Direct Access error Log */
  68                u16 berasesz:2; /* bit 4:3 Sector Erase Size */
  69                u16 flcinprog:1;        /* bit 5 flash cycle in Progress */
  70                u16 reserved1:2;        /* bit 13:6 Reserved */
  71                u16 reserved2:6;        /* bit 13:6 Reserved */
  72                u16 fldesvalid:1;       /* bit 14 Flash Descriptor Valid */
  73                u16 flockdn:1;  /* bit 15 Flash Config Lock-Down */
  74        } hsf_status;
  75        u16 regval;
  76};
  77
  78/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
  79/* Offset 06h FLCTL */
  80union ich8_hws_flash_ctrl {
  81        struct ich8_hsflctl {
  82                u16 flcgo:1;    /* 0 Flash Cycle Go */
  83                u16 flcycle:2;  /* 2:1 Flash Cycle */
  84                u16 reserved:5; /* 7:3 Reserved  */
  85                u16 fldbcount:2;        /* 9:8 Flash Data Byte Count */
  86                u16 flockdn:6;  /* 15:10 Reserved */
  87        } hsf_ctrl;
  88        u16 regval;
  89};
  90
  91/* ICH Flash Region Access Permissions */
  92union ich8_hws_flash_regacc {
  93        struct ich8_flracc {
  94                u32 grra:8;     /* 0:7 GbE region Read Access */
  95                u32 grwa:8;     /* 8:15 GbE region Write Access */
  96                u32 gmrag:8;    /* 23:16 GbE Master Read Access Grant */
  97                u32 gmwag:8;    /* 31:24 GbE Master Write Access Grant */
  98        } hsf_flregacc;
  99        u16 regval;
 100};
 101
 102/* ICH Flash Protected Region */
 103union ich8_flash_protected_range {
 104        struct ich8_pr {
 105                u32 base:13;    /* 0:12 Protected Range Base */
 106                u32 reserved1:2;        /* 13:14 Reserved */
 107                u32 rpe:1;      /* 15 Read Protection Enable */
 108                u32 limit:13;   /* 16:28 Protected Range Limit */
 109                u32 reserved2:2;        /* 29:30 Reserved */
 110                u32 wpe:1;      /* 31 Write Protection Enable */
 111        } range;
 112        u32 regval;
 113};
 114
 115static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
 116static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
 117static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
 118static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
 119                                                u32 offset, u8 byte);
 120static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
 121                                         u8 *data);
 122static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
 123                                         u16 *data);
 124static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
 125                                         u8 size, u16 *data);
 126static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
 127                                           u32 *data);
 128static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
 129                                          u32 offset, u32 *data);
 130static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
 131                                            u32 offset, u32 data);
 132static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
 133                                                 u32 offset, u32 dword);
 134static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
 135static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
 136static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
 137static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
 138static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
 139static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
 140static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
 141static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
 142static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
 143static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
 144static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
 145static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
 146static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
 147static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
 148static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
 149static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
 150static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
 151static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
 152static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
 153static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 154static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
 155static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
 156static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
 157static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
 158
 159static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 160{
 161        return readw(hw->flash_address + reg);
 162}
 163
 164static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
 165{
 166        return readl(hw->flash_address + reg);
 167}
 168
 169static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
 170{
 171        writew(val, hw->flash_address + reg);
 172}
 173
 174static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
 175{
 176        writel(val, hw->flash_address + reg);
 177}
 178
 179#define er16flash(reg)          __er16flash(hw, (reg))
 180#define er32flash(reg)          __er32flash(hw, (reg))
 181#define ew16flash(reg, val)     __ew16flash(hw, (reg), (val))
 182#define ew32flash(reg, val)     __ew32flash(hw, (reg), (val))
 183
 184/**
 185 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
 186 *  @hw: pointer to the HW structure
 187 *
 188 *  Test access to the PHY registers by reading the PHY ID registers.  If
 189 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
 190 *  otherwise assume the read PHY ID is correct if it is valid.
 191 *
 192 *  Assumes the sw/fw/hw semaphore is already acquired.
 193 **/
 194static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
 195{
 196        u16 phy_reg = 0;
 197        u32 phy_id = 0;
 198        s32 ret_val = 0;
 199        u16 retry_count;
 200        u32 mac_reg = 0;
 201
 202        for (retry_count = 0; retry_count < 2; retry_count++) {
 203                ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
 204                if (ret_val || (phy_reg == 0xFFFF))
 205                        continue;
 206                phy_id = (u32)(phy_reg << 16);
 207
 208                ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
 209                if (ret_val || (phy_reg == 0xFFFF)) {
 210                        phy_id = 0;
 211                        continue;
 212                }
 213                phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
 214                break;
 215        }
 216
 217        if (hw->phy.id) {
 218                if (hw->phy.id == phy_id)
 219                        goto out;
 220        } else if (phy_id) {
 221                hw->phy.id = phy_id;
 222                hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
 223                goto out;
 224        }
 225
 226        /* In case the PHY needs to be in mdio slow mode,
 227         * set slow mode and try to get the PHY id again.
 228         */
 229        if (hw->mac.type < e1000_pch_lpt) {
 230                hw->phy.ops.release(hw);
 231                ret_val = e1000_set_mdio_slow_mode_hv(hw);
 232                if (!ret_val)
 233                        ret_val = e1000e_get_phy_id(hw);
 234                hw->phy.ops.acquire(hw);
 235        }
 236
 237        if (ret_val)
 238                return false;
 239out:
 240        if (hw->mac.type >= e1000_pch_lpt) {
 241                /* Only unforce SMBus if ME is not active */
 242                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
 243                        /* Unforce SMBus mode in PHY */
 244                        e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
 245                        phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
 246                        e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
 247
 248                        /* Unforce SMBus mode in MAC */
 249                        mac_reg = er32(CTRL_EXT);
 250                        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 251                        ew32(CTRL_EXT, mac_reg);
 252                }
 253        }
 254
 255        return true;
 256}
 257
 258/**
 259 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
 260 *  @hw: pointer to the HW structure
 261 *
 262 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
 263 *  used to reset the PHY to a quiescent state when necessary.
 264 **/
 265static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
 266{
 267        u32 mac_reg;
 268
 269        /* Set Phy Config Counter to 50msec */
 270        mac_reg = er32(FEXTNVM3);
 271        mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
 272        mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
 273        ew32(FEXTNVM3, mac_reg);
 274
 275        /* Toggle LANPHYPC Value bit */
 276        mac_reg = er32(CTRL);
 277        mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
 278        mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
 279        ew32(CTRL, mac_reg);
 280        e1e_flush();
 281        usleep_range(10, 20);
 282        mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
 283        ew32(CTRL, mac_reg);
 284        e1e_flush();
 285
 286        if (hw->mac.type < e1000_pch_lpt) {
 287                msleep(50);
 288        } else {
 289                u16 count = 20;
 290
 291                do {
 292                        usleep_range(5000, 10000);
 293                } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
 294
 295                msleep(30);
 296        }
 297}
 298
 299/**
 300 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
 301 *  @hw: pointer to the HW structure
 302 *
 303 *  Workarounds/flow necessary for PHY initialization during driver load
 304 *  and resume paths.
 305 **/
 306static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 307{
 308        struct e1000_adapter *adapter = hw->adapter;
 309        u32 mac_reg, fwsm = er32(FWSM);
 310        s32 ret_val;
 311
 312        /* Gate automatic PHY configuration by hardware on managed and
 313         * non-managed 82579 and newer adapters.
 314         */
 315        e1000_gate_hw_phy_config_ich8lan(hw, true);
 316
 317        /* It is not possible to be certain of the current state of ULP
 318         * so forcibly disable it.
 319         */
 320        hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
 321        e1000_disable_ulp_lpt_lp(hw, true);
 322
 323        ret_val = hw->phy.ops.acquire(hw);
 324        if (ret_val) {
 325                e_dbg("Failed to initialize PHY flow\n");
 326                goto out;
 327        }
 328
 329        /* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
 330         * inaccessible and resetting the PHY is not blocked, toggle the
 331         * LANPHYPC Value bit to force the interconnect to PCIe mode.
 332         */
 333        switch (hw->mac.type) {
 334        case e1000_pch_lpt:
 335        case e1000_pch_spt:
 336        case e1000_pch_cnp:
 337                if (e1000_phy_is_accessible_pchlan(hw))
 338                        break;
 339
 340                /* Before toggling LANPHYPC, see if PHY is accessible by
 341                 * forcing MAC to SMBus mode first.
 342                 */
 343                mac_reg = er32(CTRL_EXT);
 344                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
 345                ew32(CTRL_EXT, mac_reg);
 346
 347                /* Wait 50 milliseconds for MAC to finish any retries
 348                 * that it might be trying to perform from previous
 349                 * attempts to acknowledge any phy read requests.
 350                 */
 351                msleep(50);
 352
 353                /* fall-through */
 354        case e1000_pch2lan:
 355                if (e1000_phy_is_accessible_pchlan(hw))
 356                        break;
 357
 358                /* fall-through */
 359        case e1000_pchlan:
 360                if ((hw->mac.type == e1000_pchlan) &&
 361                    (fwsm & E1000_ICH_FWSM_FW_VALID))
 362                        break;
 363
 364                if (hw->phy.ops.check_reset_block(hw)) {
 365                        e_dbg("Required LANPHYPC toggle blocked by ME\n");
 366                        ret_val = -E1000_ERR_PHY;
 367                        break;
 368                }
 369
 370                /* Toggle LANPHYPC Value bit */
 371                e1000_toggle_lanphypc_pch_lpt(hw);
 372                if (hw->mac.type >= e1000_pch_lpt) {
 373                        if (e1000_phy_is_accessible_pchlan(hw))
 374                                break;
 375
 376                        /* Toggling LANPHYPC brings the PHY out of SMBus mode
 377                         * so ensure that the MAC is also out of SMBus mode
 378                         */
 379                        mac_reg = er32(CTRL_EXT);
 380                        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
 381                        ew32(CTRL_EXT, mac_reg);
 382
 383                        if (e1000_phy_is_accessible_pchlan(hw))
 384                                break;
 385
 386                        ret_val = -E1000_ERR_PHY;
 387                }
 388                break;
 389        default:
 390                break;
 391        }
 392
 393        hw->phy.ops.release(hw);
 394        if (!ret_val) {
 395
 396                /* Check to see if able to reset PHY.  Print error if not */
 397                if (hw->phy.ops.check_reset_block(hw)) {
 398                        e_err("Reset blocked by ME\n");
 399                        goto out;
 400                }
 401
 402                /* Reset the PHY before any access to it.  Doing so, ensures
 403                 * that the PHY is in a known good state before we read/write
 404                 * PHY registers.  The generic reset is sufficient here,
 405                 * because we haven't determined the PHY type yet.
 406                 */
 407                ret_val = e1000e_phy_hw_reset_generic(hw);
 408                if (ret_val)
 409                        goto out;
 410
 411                /* On a successful reset, possibly need to wait for the PHY
 412                 * to quiesce to an accessible state before returning control
 413                 * to the calling function.  If the PHY does not quiesce, then
 414                 * return E1000E_BLK_PHY_RESET, as this is the condition that
 415                 *  the PHY is in.
 416                 */
 417                ret_val = hw->phy.ops.check_reset_block(hw);
 418                if (ret_val)
 419                        e_err("ME blocked access to PHY after reset\n");
 420        }
 421
 422out:
 423        /* Ungate automatic PHY configuration on non-managed 82579 */
 424        if ((hw->mac.type == e1000_pch2lan) &&
 425            !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
 426                usleep_range(10000, 20000);
 427                e1000_gate_hw_phy_config_ich8lan(hw, false);
 428        }
 429
 430        return ret_val;
 431}
 432
 433/**
 434 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
 435 *  @hw: pointer to the HW structure
 436 *
 437 *  Initialize family-specific PHY parameters and function pointers.
 438 **/
 439static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
 440{
 441        struct e1000_phy_info *phy = &hw->phy;
 442        s32 ret_val;
 443
 444        phy->addr = 1;
 445        phy->reset_delay_us = 100;
 446
 447        phy->ops.set_page = e1000_set_page_igp;
 448        phy->ops.read_reg = e1000_read_phy_reg_hv;
 449        phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
 450        phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
 451        phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
 452        phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
 453        phy->ops.write_reg = e1000_write_phy_reg_hv;
 454        phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
 455        phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
 456        phy->ops.power_up = e1000_power_up_phy_copper;
 457        phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 458        phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 459
 460        phy->id = e1000_phy_unknown;
 461
 462        ret_val = e1000_init_phy_workarounds_pchlan(hw);
 463        if (ret_val)
 464                return ret_val;
 465
 466        if (phy->id == e1000_phy_unknown)
 467                switch (hw->mac.type) {
 468                default:
 469                        ret_val = e1000e_get_phy_id(hw);
 470                        if (ret_val)
 471                                return ret_val;
 472                        if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
 473                                break;
 474                        /* fall-through */
 475                case e1000_pch2lan:
 476                case e1000_pch_lpt:
 477                case e1000_pch_spt:
 478                case e1000_pch_cnp:
 479                        /* In case the PHY needs to be in mdio slow mode,
 480                         * set slow mode and try to get the PHY id again.
 481                         */
 482                        ret_val = e1000_set_mdio_slow_mode_hv(hw);
 483                        if (ret_val)
 484                                return ret_val;
 485                        ret_val = e1000e_get_phy_id(hw);
 486                        if (ret_val)
 487                                return ret_val;
 488                        break;
 489                }
 490        phy->type = e1000e_get_phy_type_from_id(phy->id);
 491
 492        switch (phy->type) {
 493        case e1000_phy_82577:
 494        case e1000_phy_82579:
 495        case e1000_phy_i217:
 496                phy->ops.check_polarity = e1000_check_polarity_82577;
 497                phy->ops.force_speed_duplex =
 498                    e1000_phy_force_speed_duplex_82577;
 499                phy->ops.get_cable_length = e1000_get_cable_length_82577;
 500                phy->ops.get_info = e1000_get_phy_info_82577;
 501                phy->ops.commit = e1000e_phy_sw_reset;
 502                break;
 503        case e1000_phy_82578:
 504                phy->ops.check_polarity = e1000_check_polarity_m88;
 505                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 506                phy->ops.get_cable_length = e1000e_get_cable_length_m88;
 507                phy->ops.get_info = e1000e_get_phy_info_m88;
 508                break;
 509        default:
 510                ret_val = -E1000_ERR_PHY;
 511                break;
 512        }
 513
 514        return ret_val;
 515}
 516
 517/**
 518 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
 519 *  @hw: pointer to the HW structure
 520 *
 521 *  Initialize family-specific PHY parameters and function pointers.
 522 **/
 523static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
 524{
 525        struct e1000_phy_info *phy = &hw->phy;
 526        s32 ret_val;
 527        u16 i = 0;
 528
 529        phy->addr = 1;
 530        phy->reset_delay_us = 100;
 531
 532        phy->ops.power_up = e1000_power_up_phy_copper;
 533        phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
 534
 535        /* We may need to do this twice - once for IGP and if that fails,
 536         * we'll set BM func pointers and try again
 537         */
 538        ret_val = e1000e_determine_phy_address(hw);
 539        if (ret_val) {
 540                phy->ops.write_reg = e1000e_write_phy_reg_bm;
 541                phy->ops.read_reg = e1000e_read_phy_reg_bm;
 542                ret_val = e1000e_determine_phy_address(hw);
 543                if (ret_val) {
 544                        e_dbg("Cannot determine PHY addr. Erroring out\n");
 545                        return ret_val;
 546                }
 547        }
 548
 549        phy->id = 0;
 550        while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
 551               (i++ < 100)) {
 552                usleep_range(1000, 2000);
 553                ret_val = e1000e_get_phy_id(hw);
 554                if (ret_val)
 555                        return ret_val;
 556        }
 557
 558        /* Verify phy id */
 559        switch (phy->id) {
 560        case IGP03E1000_E_PHY_ID:
 561                phy->type = e1000_phy_igp_3;
 562                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 563                phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
 564                phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
 565                phy->ops.get_info = e1000e_get_phy_info_igp;
 566                phy->ops.check_polarity = e1000_check_polarity_igp;
 567                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
 568                break;
 569        case IFE_E_PHY_ID:
 570        case IFE_PLUS_E_PHY_ID:
 571        case IFE_C_E_PHY_ID:
 572                phy->type = e1000_phy_ife;
 573                phy->autoneg_mask = E1000_ALL_NOT_GIG;
 574                phy->ops.get_info = e1000_get_phy_info_ife;
 575                phy->ops.check_polarity = e1000_check_polarity_ife;
 576                phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
 577                break;
 578        case BME1000_E_PHY_ID:
 579                phy->type = e1000_phy_bm;
 580                phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 581                phy->ops.read_reg = e1000e_read_phy_reg_bm;
 582                phy->ops.write_reg = e1000e_write_phy_reg_bm;
 583                phy->ops.commit = e1000e_phy_sw_reset;
 584                phy->ops.get_info = e1000e_get_phy_info_m88;
 585                phy->ops.check_polarity = e1000_check_polarity_m88;
 586                phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
 587                break;
 588        default:
 589                return -E1000_ERR_PHY;
 590        }
 591
 592        return 0;
 593}
 594
 595/**
 596 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
 597 *  @hw: pointer to the HW structure
 598 *
 599 *  Initialize family-specific NVM parameters and function
 600 *  pointers.
 601 **/
 602static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
 603{
 604        struct e1000_nvm_info *nvm = &hw->nvm;
 605        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 606        u32 gfpreg, sector_base_addr, sector_end_addr;
 607        u16 i;
 608        u32 nvm_size;
 609
 610        nvm->type = e1000_nvm_flash_sw;
 611
 612        if (hw->mac.type >= e1000_pch_spt) {
 613                /* in SPT, gfpreg doesn't exist. NVM size is taken from the
 614                 * STRAP register. This is because in SPT the GbE Flash region
 615                 * is no longer accessed through the flash registers. Instead,
 616                 * the mechanism has changed, and the Flash region access
 617                 * registers are now implemented in GbE memory space.
 618                 */
 619                nvm->flash_base_addr = 0;
 620                nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
 621                    * NVM_SIZE_MULTIPLIER;
 622                nvm->flash_bank_size = nvm_size / 2;
 623                /* Adjust to word count */
 624                nvm->flash_bank_size /= sizeof(u16);
 625                /* Set the base address for flash register access */
 626                hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
 627        } else {
 628                /* Can't read flash registers if register set isn't mapped. */
 629                if (!hw->flash_address) {
 630                        e_dbg("ERROR: Flash registers not mapped\n");
 631                        return -E1000_ERR_CONFIG;
 632                }
 633
 634                gfpreg = er32flash(ICH_FLASH_GFPREG);
 635
 636                /* sector_X_addr is a "sector"-aligned address (4096 bytes)
 637                 * Add 1 to sector_end_addr since this sector is included in
 638                 * the overall size.
 639                 */
 640                sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
 641                sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
 642
 643                /* flash_base_addr is byte-aligned */
 644                nvm->flash_base_addr = sector_base_addr
 645                    << FLASH_SECTOR_ADDR_SHIFT;
 646
 647                /* find total size of the NVM, then cut in half since the total
 648                 * size represents two separate NVM banks.
 649                 */
 650                nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
 651                                        << FLASH_SECTOR_ADDR_SHIFT);
 652                nvm->flash_bank_size /= 2;
 653                /* Adjust to word count */
 654                nvm->flash_bank_size /= sizeof(u16);
 655        }
 656
 657        nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
 658
 659        /* Clear shadow ram */
 660        for (i = 0; i < nvm->word_size; i++) {
 661                dev_spec->shadow_ram[i].modified = false;
 662                dev_spec->shadow_ram[i].value = 0xFFFF;
 663        }
 664
 665        return 0;
 666}
 667
 668/**
 669 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
 670 *  @hw: pointer to the HW structure
 671 *
 672 *  Initialize family-specific MAC parameters and function
 673 *  pointers.
 674 **/
 675static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
 676{
 677        struct e1000_mac_info *mac = &hw->mac;
 678
 679        /* Set media type function pointer */
 680        hw->phy.media_type = e1000_media_type_copper;
 681
 682        /* Set mta register count */
 683        mac->mta_reg_count = 32;
 684        /* Set rar entry count */
 685        mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
 686        if (mac->type == e1000_ich8lan)
 687                mac->rar_entry_count--;
 688        /* FWSM register */
 689        mac->has_fwsm = true;
 690        /* ARC subsystem not supported */
 691        mac->arc_subsystem_valid = false;
 692        /* Adaptive IFS supported */
 693        mac->adaptive_ifs = true;
 694
 695        /* LED and other operations */
 696        switch (mac->type) {
 697        case e1000_ich8lan:
 698        case e1000_ich9lan:
 699        case e1000_ich10lan:
 700                /* check management mode */
 701                mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
 702                /* ID LED init */
 703                mac->ops.id_led_init = e1000e_id_led_init_generic;
 704                /* blink LED */
 705                mac->ops.blink_led = e1000e_blink_led_generic;
 706                /* setup LED */
 707                mac->ops.setup_led = e1000e_setup_led_generic;
 708                /* cleanup LED */
 709                mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
 710                /* turn on/off LED */
 711                mac->ops.led_on = e1000_led_on_ich8lan;
 712                mac->ops.led_off = e1000_led_off_ich8lan;
 713                break;
 714        case e1000_pch2lan:
 715                mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
 716                mac->ops.rar_set = e1000_rar_set_pch2lan;
 717                /* fall-through */
 718        case e1000_pch_lpt:
 719        case e1000_pch_spt:
 720        case e1000_pch_cnp:
 721        case e1000_pchlan:
 722                /* check management mode */
 723                mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
 724                /* ID LED init */
 725                mac->ops.id_led_init = e1000_id_led_init_pchlan;
 726                /* setup LED */
 727                mac->ops.setup_led = e1000_setup_led_pchlan;
 728                /* cleanup LED */
 729                mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
 730                /* turn on/off LED */
 731                mac->ops.led_on = e1000_led_on_pchlan;
 732                mac->ops.led_off = e1000_led_off_pchlan;
 733                break;
 734        default:
 735                break;
 736        }
 737
 738        if (mac->type >= e1000_pch_lpt) {
 739                mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
 740                mac->ops.rar_set = e1000_rar_set_pch_lpt;
 741                mac->ops.setup_physical_interface =
 742                    e1000_setup_copper_link_pch_lpt;
 743                mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
 744        }
 745
 746        /* Enable PCS Lock-loss workaround for ICH8 */
 747        if (mac->type == e1000_ich8lan)
 748                e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
 749
 750        return 0;
 751}
 752
 753/**
 754 *  __e1000_access_emi_reg_locked - Read/write EMI register
 755 *  @hw: pointer to the HW structure
 756 *  @addr: EMI address to program
 757 *  @data: pointer to value to read/write from/to the EMI address
 758 *  @read: boolean flag to indicate read or write
 759 *
 760 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
 761 **/
 762static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
 763                                         u16 *data, bool read)
 764{
 765        s32 ret_val;
 766
 767        ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
 768        if (ret_val)
 769                return ret_val;
 770
 771        if (read)
 772                ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
 773        else
 774                ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
 775
 776        return ret_val;
 777}
 778
 779/**
 780 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
 781 *  @hw: pointer to the HW structure
 782 *  @addr: EMI address to program
 783 *  @data: value to be read from the EMI address
 784 *
 785 *  Assumes the SW/FW/HW Semaphore is already acquired.
 786 **/
 787s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
 788{
 789        return __e1000_access_emi_reg_locked(hw, addr, data, true);
 790}
 791
 792/**
 793 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
 794 *  @hw: pointer to the HW structure
 795 *  @addr: EMI address to program
 796 *  @data: value to be written to the EMI address
 797 *
 798 *  Assumes the SW/FW/HW Semaphore is already acquired.
 799 **/
 800s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
 801{
 802        return __e1000_access_emi_reg_locked(hw, addr, &data, false);
 803}
 804
 805/**
 806 *  e1000_set_eee_pchlan - Enable/disable EEE support
 807 *  @hw: pointer to the HW structure
 808 *
 809 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
 810 *  the link and the EEE capabilities of the link partner.  The LPI Control
 811 *  register bits will remain set only if/when link is up.
 812 *
 813 *  EEE LPI must not be asserted earlier than one second after link is up.
 814 *  On 82579, EEE LPI should not be enabled until such time otherwise there
 815 *  can be link issues with some switches.  Other devices can have EEE LPI
 816 *  enabled immediately upon link up since they have a timer in hardware which
 817 *  prevents LPI from being asserted too early.
 818 **/
 819s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 820{
 821        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 822        s32 ret_val;
 823        u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
 824
 825        switch (hw->phy.type) {
 826        case e1000_phy_82579:
 827                lpa = I82579_EEE_LP_ABILITY;
 828                pcs_status = I82579_EEE_PCS_STATUS;
 829                adv_addr = I82579_EEE_ADVERTISEMENT;
 830                break;
 831        case e1000_phy_i217:
 832                lpa = I217_EEE_LP_ABILITY;
 833                pcs_status = I217_EEE_PCS_STATUS;
 834                adv_addr = I217_EEE_ADVERTISEMENT;
 835                break;
 836        default:
 837                return 0;
 838        }
 839
 840        ret_val = hw->phy.ops.acquire(hw);
 841        if (ret_val)
 842                return ret_val;
 843
 844        ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
 845        if (ret_val)
 846                goto release;
 847
 848        /* Clear bits that enable EEE in various speeds */
 849        lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
 850
 851        /* Enable EEE if not disabled by user */
 852        if (!dev_spec->eee_disable) {
 853                /* Save off link partner's EEE ability */
 854                ret_val = e1000_read_emi_reg_locked(hw, lpa,
 855                                                    &dev_spec->eee_lp_ability);
 856                if (ret_val)
 857                        goto release;
 858
 859                /* Read EEE advertisement */
 860                ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
 861                if (ret_val)
 862                        goto release;
 863
 864                /* Enable EEE only for speeds in which the link partner is
 865                 * EEE capable and for which we advertise EEE.
 866                 */
 867                if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
 868                        lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
 869
 870                if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
 871                        e1e_rphy_locked(hw, MII_LPA, &data);
 872                        if (data & LPA_100FULL)
 873                                lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
 874                        else
 875                                /* EEE is not supported in 100Half, so ignore
 876                                 * partner's EEE in 100 ability if full-duplex
 877                                 * is not advertised.
 878                                 */
 879                                dev_spec->eee_lp_ability &=
 880                                    ~I82579_EEE_100_SUPPORTED;
 881                }
 882        }
 883
 884        if (hw->phy.type == e1000_phy_82579) {
 885                ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 886                                                    &data);
 887                if (ret_val)
 888                        goto release;
 889
 890                data &= ~I82579_LPI_100_PLL_SHUT;
 891                ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
 892                                                     data);
 893        }
 894
 895        /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
 896        ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
 897        if (ret_val)
 898                goto release;
 899
 900        ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
 901release:
 902        hw->phy.ops.release(hw);
 903
 904        return ret_val;
 905}
 906
 907/**
 908 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
 909 *  @hw:   pointer to the HW structure
 910 *  @link: link up bool flag
 911 *
 912 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
 913 *  preventing further DMA write requests.  Workaround the issue by disabling
 914 *  the de-assertion of the clock request when in 1Gpbs mode.
 915 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
 916 *  speeds in order to avoid Tx hangs.
 917 **/
 918static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
 919{
 920        u32 fextnvm6 = er32(FEXTNVM6);
 921        u32 status = er32(STATUS);
 922        s32 ret_val = 0;
 923        u16 reg;
 924
 925        if (link && (status & E1000_STATUS_SPEED_1000)) {
 926                ret_val = hw->phy.ops.acquire(hw);
 927                if (ret_val)
 928                        return ret_val;
 929
 930                ret_val =
 931                    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
 932                                                &reg);
 933                if (ret_val)
 934                        goto release;
 935
 936                ret_val =
 937                    e1000e_write_kmrn_reg_locked(hw,
 938                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
 939                                                 reg &
 940                                                 ~E1000_KMRNCTRLSTA_K1_ENABLE);
 941                if (ret_val)
 942                        goto release;
 943
 944                usleep_range(10, 20);
 945
 946                ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
 947
 948                ret_val =
 949                    e1000e_write_kmrn_reg_locked(hw,
 950                                                 E1000_KMRNCTRLSTA_K1_CONFIG,
 951                                                 reg);
 952release:
 953                hw->phy.ops.release(hw);
 954        } else {
 955                /* clear FEXTNVM6 bit 8 on link down or 10/100 */
 956                fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
 957
 958                if ((hw->phy.revision > 5) || !link ||
 959                    ((status & E1000_STATUS_SPEED_100) &&
 960                     (status & E1000_STATUS_FD)))
 961                        goto update_fextnvm6;
 962
 963                ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
 964                if (ret_val)
 965                        return ret_val;
 966
 967                /* Clear link status transmit timeout */
 968                reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
 969
 970                if (status & E1000_STATUS_SPEED_100) {
 971                        /* Set inband Tx timeout to 5x10us for 100Half */
 972                        reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 973
 974                        /* Do not extend the K1 entry latency for 100Half */
 975                        fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 976                } else {
 977                        /* Set inband Tx timeout to 50x10us for 10Full/Half */
 978                        reg |= 50 <<
 979                            I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
 980
 981                        /* Extend the K1 entry latency for 10 Mbps */
 982                        fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
 983                }
 984
 985                ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
 986                if (ret_val)
 987                        return ret_val;
 988
 989update_fextnvm6:
 990                ew32(FEXTNVM6, fextnvm6);
 991        }
 992
 993        return ret_val;
 994}
 995
 996/**
 997 *  e1000_platform_pm_pch_lpt - Set platform power management values
 998 *  @hw: pointer to the HW structure
 999 *  @link: bool indicating link status
1000 *
1001 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1002 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1003 *  when link is up (which must not exceed the maximum latency supported
1004 *  by the platform), otherwise specify there is no LTR requirement.
1005 *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
1006 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1007 *  Capability register set, on this device LTR is set by writing the
1008 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1009 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1010 *  message to the PMC.
1011 **/
1012static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1013{
1014        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1015            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1016        u16 lat_enc = 0;        /* latency encoded */
1017
1018        if (link) {
1019                u16 speed, duplex, scale = 0;
1020                u16 max_snoop, max_nosnoop;
1021                u16 max_ltr_enc;        /* max LTR latency encoded */
1022                u64 value;
1023                u32 rxa;
1024
1025                if (!hw->adapter->max_frame_size) {
1026                        e_dbg("max_frame_size not set.\n");
1027                        return -E1000_ERR_CONFIG;
1028                }
1029
1030                hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1031                if (!speed) {
1032                        e_dbg("Speed not set.\n");
1033                        return -E1000_ERR_CONFIG;
1034                }
1035
1036                /* Rx Packet Buffer Allocation size (KB) */
1037                rxa = er32(PBA) & E1000_PBA_RXA_MASK;
1038
1039                /* Determine the maximum latency tolerated by the device.
1040                 *
1041                 * Per the PCIe spec, the tolerated latencies are encoded as
1042                 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1043                 * a 10-bit value (0-1023) to provide a range from 1 ns to
1044                 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1045                 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1046                 */
1047                rxa *= 512;
1048                value = (rxa > hw->adapter->max_frame_size) ?
1049                        (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
1050                        0;
1051
1052                while (value > PCI_LTR_VALUE_MASK) {
1053                        scale++;
1054                        value = DIV_ROUND_UP(value, BIT(5));
1055                }
1056                if (scale > E1000_LTRV_SCALE_MAX) {
1057                        e_dbg("Invalid LTR latency scale %d\n", scale);
1058                        return -E1000_ERR_CONFIG;
1059                }
1060                lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
1061
1062                /* Determine the maximum latency tolerated by the platform */
1063                pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
1064                                     &max_snoop);
1065                pci_read_config_word(hw->adapter->pdev,
1066                                     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1067                max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
1068
1069                if (lat_enc > max_ltr_enc)
1070                        lat_enc = max_ltr_enc;
1071        }
1072
1073        /* Set Snoop and No-Snoop latencies the same */
1074        reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1075        ew32(LTRV, reg);
1076
1077        return 0;
1078}
1079
1080/**
1081 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1082 *  @hw: pointer to the HW structure
1083 *  @to_sx: boolean indicating a system power state transition to Sx
1084 *
1085 *  When link is down, configure ULP mode to significantly reduce the power
1086 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1087 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1088 *  system, configure the ULP mode by software.
1089 */
1090s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1091{
1092        u32 mac_reg;
1093        s32 ret_val = 0;
1094        u16 phy_reg;
1095        u16 oem_reg = 0;
1096
1097        if ((hw->mac.type < e1000_pch_lpt) ||
1098            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1099            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1100            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1101            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1102            (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1103                return 0;
1104
1105        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1106                /* Request ME configure ULP mode in the PHY */
1107                mac_reg = er32(H2ME);
1108                mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1109                ew32(H2ME, mac_reg);
1110
1111                goto out;
1112        }
1113
1114        if (!to_sx) {
1115                int i = 0;
1116
1117                /* Poll up to 5 seconds for Cable Disconnected indication */
1118                while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1119                        /* Bail if link is re-acquired */
1120                        if (er32(STATUS) & E1000_STATUS_LU)
1121                                return -E1000_ERR_PHY;
1122
1123                        if (i++ == 100)
1124                                break;
1125
1126                        msleep(50);
1127                }
1128                e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1129                      (er32(FEXT) &
1130                       E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1131        }
1132
1133        ret_val = hw->phy.ops.acquire(hw);
1134        if (ret_val)
1135                goto out;
1136
1137        /* Force SMBus mode in PHY */
1138        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1139        if (ret_val)
1140                goto release;
1141        phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1142        e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1143
1144        /* Force SMBus mode in MAC */
1145        mac_reg = er32(CTRL_EXT);
1146        mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1147        ew32(CTRL_EXT, mac_reg);
1148
1149        /* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1150         * LPLU and disable Gig speed when entering ULP
1151         */
1152        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1153                ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1154                                                       &oem_reg);
1155                if (ret_val)
1156                        goto release;
1157
1158                phy_reg = oem_reg;
1159                phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1160
1161                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1162                                                        phy_reg);
1163
1164                if (ret_val)
1165                        goto release;
1166        }
1167
1168        /* Set Inband ULP Exit, Reset to SMBus mode and
1169         * Disable SMBus Release on PERST# in PHY
1170         */
1171        ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1172        if (ret_val)
1173                goto release;
1174        phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1175                    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1176        if (to_sx) {
1177                if (er32(WUFC) & E1000_WUFC_LNKC)
1178                        phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1179                else
1180                        phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1181
1182                phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1183                phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1184        } else {
1185                phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1186                phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1187                phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1188        }
1189        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1190
1191        /* Set Disable SMBus Release on PERST# in MAC */
1192        mac_reg = er32(FEXTNVM7);
1193        mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1194        ew32(FEXTNVM7, mac_reg);
1195
1196        /* Commit ULP changes in PHY by starting auto ULP configuration */
1197        phy_reg |= I218_ULP_CONFIG1_START;
1198        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1199
1200        if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1201            to_sx && (er32(STATUS) & E1000_STATUS_LU)) {
1202                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1203                                                        oem_reg);
1204                if (ret_val)
1205                        goto release;
1206        }
1207
1208release:
1209        hw->phy.ops.release(hw);
1210out:
1211        if (ret_val)
1212                e_dbg("Error in ULP enable flow: %d\n", ret_val);
1213        else
1214                hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1215
1216        return ret_val;
1217}
1218
1219/**
1220 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1221 *  @hw: pointer to the HW structure
1222 *  @force: boolean indicating whether or not to force disabling ULP
1223 *
1224 *  Un-configure ULP mode when link is up, the system is transitioned from
1225 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1226 *  system, poll for an indication from ME that ULP has been un-configured.
1227 *  If not on an ME enabled system, un-configure the ULP mode by software.
1228 *
1229 *  During nominal operation, this function is called when link is acquired
1230 *  to disable ULP mode (force=false); otherwise, for example when unloading
1231 *  the driver or during Sx->S0 transitions, this is called with force=true
1232 *  to forcibly disable ULP.
1233 */
1234static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1235{
1236        s32 ret_val = 0;
1237        u32 mac_reg;
1238        u16 phy_reg;
1239        int i = 0;
1240
1241        if ((hw->mac.type < e1000_pch_lpt) ||
1242            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1243            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1244            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1245            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1246            (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1247                return 0;
1248
1249        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1250                if (force) {
1251                        /* Request ME un-configure ULP mode in the PHY */
1252                        mac_reg = er32(H2ME);
1253                        mac_reg &= ~E1000_H2ME_ULP;
1254                        mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1255                        ew32(H2ME, mac_reg);
1256                }
1257
1258                /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1259                while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1260                        if (i++ == 30) {
1261                                ret_val = -E1000_ERR_PHY;
1262                                goto out;
1263                        }
1264
1265                        usleep_range(10000, 20000);
1266                }
1267                e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1268
1269                if (force) {
1270                        mac_reg = er32(H2ME);
1271                        mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1272                        ew32(H2ME, mac_reg);
1273                } else {
1274                        /* Clear H2ME.ULP after ME ULP configuration */
1275                        mac_reg = er32(H2ME);
1276                        mac_reg &= ~E1000_H2ME_ULP;
1277                        ew32(H2ME, mac_reg);
1278                }
1279
1280                goto out;
1281        }
1282
1283        ret_val = hw->phy.ops.acquire(hw);
1284        if (ret_val)
1285                goto out;
1286
1287        if (force)
1288                /* Toggle LANPHYPC Value bit */
1289                e1000_toggle_lanphypc_pch_lpt(hw);
1290
1291        /* Unforce SMBus mode in PHY */
1292        ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1293        if (ret_val) {
1294                /* The MAC might be in PCIe mode, so temporarily force to
1295                 * SMBus mode in order to access the PHY.
1296                 */
1297                mac_reg = er32(CTRL_EXT);
1298                mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1299                ew32(CTRL_EXT, mac_reg);
1300
1301                msleep(50);
1302
1303                ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1304                                                       &phy_reg);
1305                if (ret_val)
1306                        goto release;
1307        }
1308        phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1309        e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1310
1311        /* Unforce SMBus mode in MAC */
1312        mac_reg = er32(CTRL_EXT);
1313        mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1314        ew32(CTRL_EXT, mac_reg);
1315
1316        /* When ULP mode was previously entered, K1 was disabled by the
1317         * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1318         */
1319        ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1320        if (ret_val)
1321                goto release;
1322        phy_reg |= HV_PM_CTRL_K1_ENABLE;
1323        e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1324
1325        /* Clear ULP enabled configuration */
1326        ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1327        if (ret_val)
1328                goto release;
1329        phy_reg &= ~(I218_ULP_CONFIG1_IND |
1330                     I218_ULP_CONFIG1_STICKY_ULP |
1331                     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1332                     I218_ULP_CONFIG1_WOL_HOST |
1333                     I218_ULP_CONFIG1_INBAND_EXIT |
1334                     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1335                     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1336                     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1337        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1338
1339        /* Commit ULP changes by starting auto ULP configuration */
1340        phy_reg |= I218_ULP_CONFIG1_START;
1341        e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1342
1343        /* Clear Disable SMBus Release on PERST# in MAC */
1344        mac_reg = er32(FEXTNVM7);
1345        mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1346        ew32(FEXTNVM7, mac_reg);
1347
1348release:
1349        hw->phy.ops.release(hw);
1350        if (force) {
1351                e1000_phy_hw_reset(hw);
1352                msleep(50);
1353        }
1354out:
1355        if (ret_val)
1356                e_dbg("Error in ULP disable flow: %d\n", ret_val);
1357        else
1358                hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1359
1360        return ret_val;
1361}
1362
1363/**
1364 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1365 *  @hw: pointer to the HW structure
1366 *
1367 *  Checks to see of the link status of the hardware has changed.  If a
1368 *  change in link status has been detected, then we read the PHY registers
1369 *  to get the current speed/duplex if link exists.
1370 **/
1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1372{
1373        struct e1000_mac_info *mac = &hw->mac;
1374        s32 ret_val, tipg_reg = 0;
1375        u16 emi_addr, emi_val = 0;
1376        bool link;
1377        u16 phy_reg;
1378
1379        /* We only want to go out to the PHY registers to see if Auto-Neg
1380         * has completed and/or if our link status has changed.  The
1381         * get_link_status flag is set upon receiving a Link Status
1382         * Change or Rx Sequence Error interrupt.
1383         */
1384        if (!mac->get_link_status)
1385                return 0;
1386
1387        /* First we want to see if the MII Status Register reports
1388         * link.  If so, then we want to get the current speed/duplex
1389         * of the PHY.
1390         */
1391        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1392        if (ret_val)
1393                return ret_val;
1394
1395        if (hw->mac.type == e1000_pchlan) {
1396                ret_val = e1000_k1_gig_workaround_hv(hw, link);
1397                if (ret_val)
1398                        return ret_val;
1399        }
1400
1401        /* When connected at 10Mbps half-duplex, some parts are excessively
1402         * aggressive resulting in many collisions. To avoid this, increase
1403         * the IPG and reduce Rx latency in the PHY.
1404         */
1405        if ((hw->mac.type >= e1000_pch2lan) && link) {
1406                u16 speed, duplex;
1407
1408                e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex);
1409                tipg_reg = er32(TIPG);
1410                tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1411
1412                if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1413                        tipg_reg |= 0xFF;
1414                        /* Reduce Rx latency in analog PHY */
1415                        emi_val = 0;
1416                } else if (hw->mac.type >= e1000_pch_spt &&
1417                           duplex == FULL_DUPLEX && speed != SPEED_1000) {
1418                        tipg_reg |= 0xC;
1419                        emi_val = 1;
1420                } else {
1421
1422                        /* Roll back the default values */
1423                        tipg_reg |= 0x08;
1424                        emi_val = 1;
1425                }
1426
1427                ew32(TIPG, tipg_reg);
1428
1429                ret_val = hw->phy.ops.acquire(hw);
1430                if (ret_val)
1431                        return ret_val;
1432
1433                if (hw->mac.type == e1000_pch2lan)
1434                        emi_addr = I82579_RX_CONFIG;
1435                else
1436                        emi_addr = I217_RX_CONFIG;
1437                ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1438
1439                if (hw->mac.type >= e1000_pch_lpt) {
1440                        u16 phy_reg;
1441
1442                        e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
1443                        phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1444                        if (speed == SPEED_100 || speed == SPEED_10)
1445                                phy_reg |= 0x3E8;
1446                        else
1447                                phy_reg |= 0xFA;
1448                        e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
1449                }
1450                hw->phy.ops.release(hw);
1451
1452                if (ret_val)
1453                        return ret_val;
1454
1455                if (hw->mac.type >= e1000_pch_spt) {
1456                        u16 data;
1457                        u16 ptr_gap;
1458
1459                        if (speed == SPEED_1000) {
1460                                ret_val = hw->phy.ops.acquire(hw);
1461                                if (ret_val)
1462                                        return ret_val;
1463
1464                                ret_val = e1e_rphy_locked(hw,
1465                                                          PHY_REG(776, 20),
1466                                                          &data);
1467                                if (ret_val) {
1468                                        hw->phy.ops.release(hw);
1469                                        return ret_val;
1470                                }
1471
1472                                ptr_gap = (data & (0x3FF << 2)) >> 2;
1473                                if (ptr_gap < 0x18) {
1474                                        data &= ~(0x3FF << 2);
1475                                        data |= (0x18 << 2);
1476                                        ret_val =
1477                                            e1e_wphy_locked(hw,
1478                                                            PHY_REG(776, 20),
1479                                                            data);
1480                                }
1481                                hw->phy.ops.release(hw);
1482                                if (ret_val)
1483                                        return ret_val;
1484                        } else {
1485                                ret_val = hw->phy.ops.acquire(hw);
1486                                if (ret_val)
1487                                        return ret_val;
1488
1489                                ret_val = e1e_wphy_locked(hw,
1490                                                          PHY_REG(776, 20),
1491                                                          0xC023);
1492                                hw->phy.ops.release(hw);
1493                                if (ret_val)
1494                                        return ret_val;
1495
1496                        }
1497                }
1498        }
1499
1500        /* I217 Packet Loss issue:
1501         * ensure that FEXTNVM4 Beacon Duration is set correctly
1502         * on power up.
1503         * Set the Beacon Duration for I217 to 8 usec
1504         */
1505        if (hw->mac.type >= e1000_pch_lpt) {
1506                u32 mac_reg;
1507
1508                mac_reg = er32(FEXTNVM4);
1509                mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1510                mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1511                ew32(FEXTNVM4, mac_reg);
1512        }
1513
1514        /* Work-around I218 hang issue */
1515        if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1516            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1517            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1518            (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1519                ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1520                if (ret_val)
1521                        return ret_val;
1522        }
1523        if (hw->mac.type >= e1000_pch_lpt) {
1524                /* Set platform power management values for
1525                 * Latency Tolerance Reporting (LTR)
1526                 */
1527                ret_val = e1000_platform_pm_pch_lpt(hw, link);
1528                if (ret_val)
1529                        return ret_val;
1530        }
1531
1532        /* Clear link partner's EEE ability */
1533        hw->dev_spec.ich8lan.eee_lp_ability = 0;
1534
1535        if (hw->mac.type >= e1000_pch_lpt) {
1536                u32 fextnvm6 = er32(FEXTNVM6);
1537
1538                if (hw->mac.type == e1000_pch_spt) {
1539                        /* FEXTNVM6 K1-off workaround - for SPT only */
1540                        u32 pcieanacfg = er32(PCIEANACFG);
1541
1542                        if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1543                                fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1544                        else
1545                                fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1546                }
1547
1548                ew32(FEXTNVM6, fextnvm6);
1549        }
1550
1551        if (!link)
1552                return 0;       /* No link detected */
1553
1554        mac->get_link_status = false;
1555
1556        switch (hw->mac.type) {
1557        case e1000_pch2lan:
1558                ret_val = e1000_k1_workaround_lv(hw);
1559                if (ret_val)
1560                        return ret_val;
1561                /* fall-thru */
1562        case e1000_pchlan:
1563                if (hw->phy.type == e1000_phy_82578) {
1564                        ret_val = e1000_link_stall_workaround_hv(hw);
1565                        if (ret_val)
1566                                return ret_val;
1567                }
1568
1569                /* Workaround for PCHx parts in half-duplex:
1570                 * Set the number of preambles removed from the packet
1571                 * when it is passed from the PHY to the MAC to prevent
1572                 * the MAC from misinterpreting the packet type.
1573                 */
1574                e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1575                phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1576
1577                if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
1578                        phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1579
1580                e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1581                break;
1582        default:
1583                break;
1584        }
1585
1586        /* Check if there was DownShift, must be checked
1587         * immediately after link-up
1588         */
1589        e1000e_check_downshift(hw);
1590
1591        /* Enable/Disable EEE after link up */
1592        if (hw->phy.type > e1000_phy_82579) {
1593                ret_val = e1000_set_eee_pchlan(hw);
1594                if (ret_val)
1595                        return ret_val;
1596        }
1597
1598        /* If we are forcing speed/duplex, then we simply return since
1599         * we have already determined whether we have link or not.
1600         */
1601        if (!mac->autoneg)
1602                return -E1000_ERR_CONFIG;
1603
1604        /* Auto-Neg is enabled.  Auto Speed Detection takes care
1605         * of MAC speed/duplex configuration.  So we only need to
1606         * configure Collision Distance in the MAC.
1607         */
1608        mac->ops.config_collision_dist(hw);
1609
1610        /* Configure Flow Control now that Auto-Neg has completed.
1611         * First, we need to restore the desired flow control
1612         * settings because we may have had to re-autoneg with a
1613         * different link partner.
1614         */
1615        ret_val = e1000e_config_fc_after_link_up(hw);
1616        if (ret_val)
1617                e_dbg("Error configuring flow control\n");
1618
1619        return ret_val;
1620}
1621
1622static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1623{
1624        struct e1000_hw *hw = &adapter->hw;
1625        s32 rc;
1626
1627        rc = e1000_init_mac_params_ich8lan(hw);
1628        if (rc)
1629                return rc;
1630
1631        rc = e1000_init_nvm_params_ich8lan(hw);
1632        if (rc)
1633                return rc;
1634
1635        switch (hw->mac.type) {
1636        case e1000_ich8lan:
1637        case e1000_ich9lan:
1638        case e1000_ich10lan:
1639                rc = e1000_init_phy_params_ich8lan(hw);
1640                break;
1641        case e1000_pchlan:
1642        case e1000_pch2lan:
1643        case e1000_pch_lpt:
1644        case e1000_pch_spt:
1645        case e1000_pch_cnp:
1646                rc = e1000_init_phy_params_pchlan(hw);
1647                break;
1648        default:
1649                break;
1650        }
1651        if (rc)
1652                return rc;
1653
1654        /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1655         * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1656         */
1657        if ((adapter->hw.phy.type == e1000_phy_ife) ||
1658            ((adapter->hw.mac.type >= e1000_pch2lan) &&
1659             (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1660                adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
1661                adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
1662
1663                hw->mac.ops.blink_led = NULL;
1664        }
1665
1666        if ((adapter->hw.mac.type == e1000_ich8lan) &&
1667            (adapter->hw.phy.type != e1000_phy_ife))
1668                adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
1669
1670        /* Enable workaround for 82579 w/ ME enabled */
1671        if ((adapter->hw.mac.type == e1000_pch2lan) &&
1672            (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1673                adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1674
1675        return 0;
1676}
1677
1678static DEFINE_MUTEX(nvm_mutex);
1679
1680/**
1681 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1682 *  @hw: pointer to the HW structure
1683 *
1684 *  Acquires the mutex for performing NVM operations.
1685 **/
1686static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1687{
1688        mutex_lock(&nvm_mutex);
1689
1690        return 0;
1691}
1692
1693/**
1694 *  e1000_release_nvm_ich8lan - Release NVM mutex
1695 *  @hw: pointer to the HW structure
1696 *
1697 *  Releases the mutex used while performing NVM operations.
1698 **/
1699static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1700{
1701        mutex_unlock(&nvm_mutex);
1702}
1703
1704/**
1705 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1706 *  @hw: pointer to the HW structure
1707 *
1708 *  Acquires the software control flag for performing PHY and select
1709 *  MAC CSR accesses.
1710 **/
1711static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1712{
1713        u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1714        s32 ret_val = 0;
1715
1716        if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1717                             &hw->adapter->state)) {
1718                e_dbg("contention for Phy access\n");
1719                return -E1000_ERR_PHY;
1720        }
1721
1722        while (timeout) {
1723                extcnf_ctrl = er32(EXTCNF_CTRL);
1724                if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1725                        break;
1726
1727                mdelay(1);
1728                timeout--;
1729        }
1730
1731        if (!timeout) {
1732                e_dbg("SW has already locked the resource.\n");
1733                ret_val = -E1000_ERR_CONFIG;
1734                goto out;
1735        }
1736
1737        timeout = SW_FLAG_TIMEOUT;
1738
1739        extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1740        ew32(EXTCNF_CTRL, extcnf_ctrl);
1741
1742        while (timeout) {
1743                extcnf_ctrl = er32(EXTCNF_CTRL);
1744                if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1745                        break;
1746
1747                mdelay(1);
1748                timeout--;
1749        }
1750
1751        if (!timeout) {
1752                e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1753                      er32(FWSM), extcnf_ctrl);
1754                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1755                ew32(EXTCNF_CTRL, extcnf_ctrl);
1756                ret_val = -E1000_ERR_CONFIG;
1757                goto out;
1758        }
1759
1760out:
1761        if (ret_val)
1762                clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1763
1764        return ret_val;
1765}
1766
1767/**
1768 *  e1000_release_swflag_ich8lan - Release software control flag
1769 *  @hw: pointer to the HW structure
1770 *
1771 *  Releases the software control flag for performing PHY and select
1772 *  MAC CSR accesses.
1773 **/
1774static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1775{
1776        u32 extcnf_ctrl;
1777
1778        extcnf_ctrl = er32(EXTCNF_CTRL);
1779
1780        if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1781                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1782                ew32(EXTCNF_CTRL, extcnf_ctrl);
1783        } else {
1784                e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1785        }
1786
1787        clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1788}
1789
1790/**
1791 *  e1000_check_mng_mode_ich8lan - Checks management mode
1792 *  @hw: pointer to the HW structure
1793 *
1794 *  This checks if the adapter has any manageability enabled.
1795 *  This is a function pointer entry point only called by read/write
1796 *  routines for the PHY and NVM parts.
1797 **/
1798static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1799{
1800        u32 fwsm;
1801
1802        fwsm = er32(FWSM);
1803        return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1804                ((fwsm & E1000_FWSM_MODE_MASK) ==
1805                 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1806}
1807
1808/**
1809 *  e1000_check_mng_mode_pchlan - Checks management mode
1810 *  @hw: pointer to the HW structure
1811 *
1812 *  This checks if the adapter has iAMT enabled.
1813 *  This is a function pointer entry point only called by read/write
1814 *  routines for the PHY and NVM parts.
1815 **/
1816static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1817{
1818        u32 fwsm;
1819
1820        fwsm = er32(FWSM);
1821        return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1822            (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1823}
1824
1825/**
1826 *  e1000_rar_set_pch2lan - Set receive address register
1827 *  @hw: pointer to the HW structure
1828 *  @addr: pointer to the receive address
1829 *  @index: receive address array register
1830 *
1831 *  Sets the receive address array register at index to the address passed
1832 *  in by addr.  For 82579, RAR[0] is the base address register that is to
1833 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1834 *  Use SHRA[0-3] in place of those reserved for ME.
1835 **/
1836static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1837{
1838        u32 rar_low, rar_high;
1839
1840        /* HW expects these in little endian so we reverse the byte order
1841         * from network order (big endian) to little endian
1842         */
1843        rar_low = ((u32)addr[0] |
1844                   ((u32)addr[1] << 8) |
1845                   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1846
1847        rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1848
1849        /* If MAC address zero, no need to set the AV bit */
1850        if (rar_low || rar_high)
1851                rar_high |= E1000_RAH_AV;
1852
1853        if (index == 0) {
1854                ew32(RAL(index), rar_low);
1855                e1e_flush();
1856                ew32(RAH(index), rar_high);
1857                e1e_flush();
1858                return 0;
1859        }
1860
1861        /* RAR[1-6] are owned by manageability.  Skip those and program the
1862         * next address into the SHRA register array.
1863         */
1864        if (index < (u32)(hw->mac.rar_entry_count)) {
1865                s32 ret_val;
1866
1867                ret_val = e1000_acquire_swflag_ich8lan(hw);
1868                if (ret_val)
1869                        goto out;
1870
1871                ew32(SHRAL(index - 1), rar_low);
1872                e1e_flush();
1873                ew32(SHRAH(index - 1), rar_high);
1874                e1e_flush();
1875
1876                e1000_release_swflag_ich8lan(hw);
1877
1878                /* verify the register updates */
1879                if ((er32(SHRAL(index - 1)) == rar_low) &&
1880                    (er32(SHRAH(index - 1)) == rar_high))
1881                        return 0;
1882
1883                e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1884                      (index - 1), er32(FWSM));
1885        }
1886
1887out:
1888        e_dbg("Failed to write receive address at index %d\n", index);
1889        return -E1000_ERR_CONFIG;
1890}
1891
1892/**
1893 *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1894 *  @hw: pointer to the HW structure
1895 *
1896 *  Get the number of available receive registers that the Host can
1897 *  program. SHRA[0-10] are the shared receive address registers
1898 *  that are shared between the Host and manageability engine (ME).
1899 *  ME can reserve any number of addresses and the host needs to be
1900 *  able to tell how many available registers it has access to.
1901 **/
1902static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1903{
1904        u32 wlock_mac;
1905        u32 num_entries;
1906
1907        wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1908        wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1909
1910        switch (wlock_mac) {
1911        case 0:
1912                /* All SHRA[0..10] and RAR[0] available */
1913                num_entries = hw->mac.rar_entry_count;
1914                break;
1915        case 1:
1916                /* Only RAR[0] available */
1917                num_entries = 1;
1918                break;
1919        default:
1920                /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1921                num_entries = wlock_mac + 1;
1922                break;
1923        }
1924
1925        return num_entries;
1926}
1927
1928/**
1929 *  e1000_rar_set_pch_lpt - Set receive address registers
1930 *  @hw: pointer to the HW structure
1931 *  @addr: pointer to the receive address
1932 *  @index: receive address array register
1933 *
1934 *  Sets the receive address register array at index to the address passed
1935 *  in by addr. For LPT, RAR[0] is the base address register that is to
1936 *  contain the MAC address. SHRA[0-10] are the shared receive address
1937 *  registers that are shared between the Host and manageability engine (ME).
1938 **/
1939static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1940{
1941        u32 rar_low, rar_high;
1942        u32 wlock_mac;
1943
1944        /* HW expects these in little endian so we reverse the byte order
1945         * from network order (big endian) to little endian
1946         */
1947        rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1948                   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1949
1950        rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1951
1952        /* If MAC address zero, no need to set the AV bit */
1953        if (rar_low || rar_high)
1954                rar_high |= E1000_RAH_AV;
1955
1956        if (index == 0) {
1957                ew32(RAL(index), rar_low);
1958                e1e_flush();
1959                ew32(RAH(index), rar_high);
1960                e1e_flush();
1961                return 0;
1962        }
1963
1964        /* The manageability engine (ME) can lock certain SHRAR registers that
1965         * it is using - those registers are unavailable for use.
1966         */
1967        if (index < hw->mac.rar_entry_count) {
1968                wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1969                wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1970
1971                /* Check if all SHRAR registers are locked */
1972                if (wlock_mac == 1)
1973                        goto out;
1974
1975                if ((wlock_mac == 0) || (index <= wlock_mac)) {
1976                        s32 ret_val;
1977
1978                        ret_val = e1000_acquire_swflag_ich8lan(hw);
1979
1980                        if (ret_val)
1981                                goto out;
1982
1983                        ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1984                        e1e_flush();
1985                        ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1986                        e1e_flush();
1987
1988                        e1000_release_swflag_ich8lan(hw);
1989
1990                        /* verify the register updates */
1991                        if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1992                            (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1993                                return 0;
1994                }
1995        }
1996
1997out:
1998        e_dbg("Failed to write receive address at index %d\n", index);
1999        return -E1000_ERR_CONFIG;
2000}
2001
2002/**
2003 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2004 *  @hw: pointer to the HW structure
2005 *
2006 *  Checks if firmware is blocking the reset of the PHY.
2007 *  This is a function pointer entry point only called by
2008 *  reset routines.
2009 **/
2010static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2011{
2012        bool blocked = false;
2013        int i = 0;
2014
2015        while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
2016               (i++ < 30))
2017                usleep_range(10000, 20000);
2018        return blocked ? E1000_BLK_PHY_RESET : 0;
2019}
2020
2021/**
2022 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2023 *  @hw: pointer to the HW structure
2024 *
2025 *  Assumes semaphore already acquired.
2026 *
2027 **/
2028static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2029{
2030        u16 phy_data;
2031        u32 strap = er32(STRAP);
2032        u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2033            E1000_STRAP_SMT_FREQ_SHIFT;
2034        s32 ret_val;
2035
2036        strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2037
2038        ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2039        if (ret_val)
2040                return ret_val;
2041
2042        phy_data &= ~HV_SMB_ADDR_MASK;
2043        phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2044        phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2045
2046        if (hw->phy.type == e1000_phy_i217) {
2047                /* Restore SMBus frequency */
2048                if (freq--) {
2049                        phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2050                        phy_data |= (freq & BIT(0)) <<
2051                            HV_SMB_ADDR_FREQ_LOW_SHIFT;
2052                        phy_data |= (freq & BIT(1)) <<
2053                            (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2054                } else {
2055                        e_dbg("Unsupported SMB frequency in PHY\n");
2056                }
2057        }
2058
2059        return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2060}
2061
2062/**
2063 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2064 *  @hw:   pointer to the HW structure
2065 *
2066 *  SW should configure the LCD from the NVM extended configuration region
2067 *  as a workaround for certain parts.
2068 **/
2069static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2070{
2071        struct e1000_phy_info *phy = &hw->phy;
2072        u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2073        s32 ret_val = 0;
2074        u16 word_addr, reg_data, reg_addr, phy_page = 0;
2075
2076        /* Initialize the PHY from the NVM on ICH platforms.  This
2077         * is needed due to an issue where the NVM configuration is
2078         * not properly autoloaded after power transitions.
2079         * Therefore, after each PHY reset, we will load the
2080         * configuration data out of the NVM manually.
2081         */
2082        switch (hw->mac.type) {
2083        case e1000_ich8lan:
2084                if (phy->type != e1000_phy_igp_3)
2085                        return ret_val;
2086
2087                if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
2088                    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
2089                        sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2090                        break;
2091                }
2092                /* Fall-thru */
2093        case e1000_pchlan:
2094        case e1000_pch2lan:
2095        case e1000_pch_lpt:
2096        case e1000_pch_spt:
2097        case e1000_pch_cnp:
2098                sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2099                break;
2100        default:
2101                return ret_val;
2102        }
2103
2104        ret_val = hw->phy.ops.acquire(hw);
2105        if (ret_val)
2106                return ret_val;
2107
2108        data = er32(FEXTNVM);
2109        if (!(data & sw_cfg_mask))
2110                goto release;
2111
2112        /* Make sure HW does not configure LCD from PHY
2113         * extended configuration before SW configuration
2114         */
2115        data = er32(EXTCNF_CTRL);
2116        if ((hw->mac.type < e1000_pch2lan) &&
2117            (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2118                goto release;
2119
2120        cnf_size = er32(EXTCNF_SIZE);
2121        cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2122        cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2123        if (!cnf_size)
2124                goto release;
2125
2126        cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2127        cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2128
2129        if (((hw->mac.type == e1000_pchlan) &&
2130             !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2131            (hw->mac.type > e1000_pchlan)) {
2132                /* HW configures the SMBus address and LEDs when the
2133                 * OEM and LCD Write Enable bits are set in the NVM.
2134                 * When both NVM bits are cleared, SW will configure
2135                 * them instead.
2136                 */
2137                ret_val = e1000_write_smbus_addr(hw);
2138                if (ret_val)
2139                        goto release;
2140
2141                data = er32(LEDCTL);
2142                ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2143                                                        (u16)data);
2144                if (ret_val)
2145                        goto release;
2146        }
2147
2148        /* Configure LCD from extended configuration region. */
2149
2150        /* cnf_base_addr is in DWORD */
2151        word_addr = (u16)(cnf_base_addr << 1);
2152
2153        for (i = 0; i < cnf_size; i++) {
2154                ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
2155                if (ret_val)
2156                        goto release;
2157
2158                ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
2159                                         1, &reg_addr);
2160                if (ret_val)
2161                        goto release;
2162
2163                /* Save off the PHY page for future writes. */
2164                if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2165                        phy_page = reg_data;
2166                        continue;
2167                }
2168
2169                reg_addr &= PHY_REG_MASK;
2170                reg_addr |= phy_page;
2171
2172                ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2173                if (ret_val)
2174                        goto release;
2175        }
2176
2177release:
2178        hw->phy.ops.release(hw);
2179        return ret_val;
2180}
2181
2182/**
2183 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2184 *  @hw:   pointer to the HW structure
2185 *  @link: link up bool flag
2186 *
2187 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2188 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2189 *  If link is down, the function will restore the default K1 setting located
2190 *  in the NVM.
2191 **/
2192static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2193{
2194        s32 ret_val = 0;
2195        u16 status_reg = 0;
2196        bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2197
2198        if (hw->mac.type != e1000_pchlan)
2199                return 0;
2200
2201        /* Wrap the whole flow with the sw flag */
2202        ret_val = hw->phy.ops.acquire(hw);
2203        if (ret_val)
2204                return ret_val;
2205
2206        /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2207        if (link) {
2208                if (hw->phy.type == e1000_phy_82578) {
2209                        ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
2210                                                  &status_reg);
2211                        if (ret_val)
2212                                goto release;
2213
2214                        status_reg &= (BM_CS_STATUS_LINK_UP |
2215                                       BM_CS_STATUS_RESOLVED |
2216                                       BM_CS_STATUS_SPEED_MASK);
2217
2218                        if (status_reg == (BM_CS_STATUS_LINK_UP |
2219                                           BM_CS_STATUS_RESOLVED |
2220                                           BM_CS_STATUS_SPEED_1000))
2221                                k1_enable = false;
2222                }
2223
2224                if (hw->phy.type == e1000_phy_82577) {
2225                        ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2226                        if (ret_val)
2227                                goto release;
2228
2229                        status_reg &= (HV_M_STATUS_LINK_UP |
2230                                       HV_M_STATUS_AUTONEG_COMPLETE |
2231                                       HV_M_STATUS_SPEED_MASK);
2232
2233                        if (status_reg == (HV_M_STATUS_LINK_UP |
2234                                           HV_M_STATUS_AUTONEG_COMPLETE |
2235                                           HV_M_STATUS_SPEED_1000))
2236                                k1_enable = false;
2237                }
2238
2239                /* Link stall fix for link up */
2240                ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2241                if (ret_val)
2242                        goto release;
2243
2244        } else {
2245                /* Link stall fix for link down */
2246                ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2247                if (ret_val)
2248                        goto release;
2249        }
2250
2251        ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2252
2253release:
2254        hw->phy.ops.release(hw);
2255
2256        return ret_val;
2257}
2258
2259/**
2260 *  e1000_configure_k1_ich8lan - Configure K1 power state
2261 *  @hw: pointer to the HW structure
2262 *  @enable: K1 state to configure
2263 *
2264 *  Configure the K1 power state based on the provided parameter.
2265 *  Assumes semaphore already acquired.
2266 *
2267 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2268 **/
2269s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2270{
2271        s32 ret_val;
2272        u32 ctrl_reg = 0;
2273        u32 ctrl_ext = 0;
2274        u32 reg = 0;
2275        u16 kmrn_reg = 0;
2276
2277        ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2278                                              &kmrn_reg);
2279        if (ret_val)
2280                return ret_val;
2281
2282        if (k1_enable)
2283                kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2284        else
2285                kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2286
2287        ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2288                                               kmrn_reg);
2289        if (ret_val)
2290                return ret_val;
2291
2292        usleep_range(20, 40);
2293        ctrl_ext = er32(CTRL_EXT);
2294        ctrl_reg = er32(CTRL);
2295
2296        reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2297        reg |= E1000_CTRL_FRCSPD;
2298        ew32(CTRL, reg);
2299
2300        ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2301        e1e_flush();
2302        usleep_range(20, 40);
2303        ew32(CTRL, ctrl_reg);
2304        ew32(CTRL_EXT, ctrl_ext);
2305        e1e_flush();
2306        usleep_range(20, 40);
2307
2308        return 0;
2309}
2310
2311/**
2312 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2313 *  @hw:       pointer to the HW structure
2314 *  @d0_state: boolean if entering d0 or d3 device state
2315 *
2316 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2317 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2318 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2319 **/
2320static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2321{
2322        s32 ret_val = 0;
2323        u32 mac_reg;
2324        u16 oem_reg;
2325
2326        if (hw->mac.type < e1000_pchlan)
2327                return ret_val;
2328
2329        ret_val = hw->phy.ops.acquire(hw);
2330        if (ret_val)
2331                return ret_val;
2332
2333        if (hw->mac.type == e1000_pchlan) {
2334                mac_reg = er32(EXTCNF_CTRL);
2335                if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2336                        goto release;
2337        }
2338
2339        mac_reg = er32(FEXTNVM);
2340        if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2341                goto release;
2342
2343        mac_reg = er32(PHY_CTRL);
2344
2345        ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2346        if (ret_val)
2347                goto release;
2348
2349        oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2350
2351        if (d0_state) {
2352                if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2353                        oem_reg |= HV_OEM_BITS_GBE_DIS;
2354
2355                if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2356                        oem_reg |= HV_OEM_BITS_LPLU;
2357        } else {
2358                if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2359                               E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2360                        oem_reg |= HV_OEM_BITS_GBE_DIS;
2361
2362                if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2363                               E1000_PHY_CTRL_NOND0A_LPLU))
2364                        oem_reg |= HV_OEM_BITS_LPLU;
2365        }
2366
2367        /* Set Restart auto-neg to activate the bits */
2368        if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2369            !hw->phy.ops.check_reset_block(hw))
2370                oem_reg |= HV_OEM_BITS_RESTART_AN;
2371
2372        ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2373
2374release:
2375        hw->phy.ops.release(hw);
2376
2377        return ret_val;
2378}
2379
2380/**
2381 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2382 *  @hw:   pointer to the HW structure
2383 **/
2384static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2385{
2386        s32 ret_val;
2387        u16 data;
2388
2389        ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
2390        if (ret_val)
2391                return ret_val;
2392
2393        data |= HV_KMRN_MDIO_SLOW;
2394
2395        ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
2396
2397        return ret_val;
2398}
2399
2400/**
2401 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2402 *  done after every PHY reset.
2403 **/
2404static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2405{
2406        s32 ret_val = 0;
2407        u16 phy_data;
2408
2409        if (hw->mac.type != e1000_pchlan)
2410                return 0;
2411
2412        /* Set MDIO slow mode before any other MDIO access */
2413        if (hw->phy.type == e1000_phy_82577) {
2414                ret_val = e1000_set_mdio_slow_mode_hv(hw);
2415                if (ret_val)
2416                        return ret_val;
2417        }
2418
2419        if (((hw->phy.type == e1000_phy_82577) &&
2420             ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2421            ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2422                /* Disable generation of early preamble */
2423                ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
2424                if (ret_val)
2425                        return ret_val;
2426
2427                /* Preamble tuning for SSC */
2428                ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2429                if (ret_val)
2430                        return ret_val;
2431        }
2432
2433        if (hw->phy.type == e1000_phy_82578) {
2434                /* Return registers to default by doing a soft reset then
2435                 * writing 0x3140 to the control register.
2436                 */
2437                if (hw->phy.revision < 2) {
2438                        e1000e_phy_sw_reset(hw);
2439                        ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2440                        if (ret_val)
2441                                return ret_val;
2442                }
2443        }
2444
2445        /* Select page 0 */
2446        ret_val = hw->phy.ops.acquire(hw);
2447        if (ret_val)
2448                return ret_val;
2449
2450        hw->phy.addr = 1;
2451        ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2452        hw->phy.ops.release(hw);
2453        if (ret_val)
2454                return ret_val;
2455
2456        /* Configure the K1 Si workaround during phy reset assuming there is
2457         * link so that it disables K1 if link is in 1Gbps.
2458         */
2459        ret_val = e1000_k1_gig_workaround_hv(hw, true);
2460        if (ret_val)
2461                return ret_val;
2462
2463        /* Workaround for link disconnects on a busy hub in half duplex */
2464        ret_val = hw->phy.ops.acquire(hw);
2465        if (ret_val)
2466                return ret_val;
2467        ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2468        if (ret_val)
2469                goto release;
2470        ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2471        if (ret_val)
2472                goto release;
2473
2474        /* set MSE higher to enable link to stay up when noise is high */
2475        ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2476release:
2477        hw->phy.ops.release(hw);
2478
2479        return ret_val;
2480}
2481
2482/**
2483 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2484 *  @hw:   pointer to the HW structure
2485 **/
2486void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2487{
2488        u32 mac_reg;
2489        u16 i, phy_reg = 0;
2490        s32 ret_val;
2491
2492        ret_val = hw->phy.ops.acquire(hw);
2493        if (ret_val)
2494                return;
2495        ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2496        if (ret_val)
2497                goto release;
2498
2499        /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2500        for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2501                mac_reg = er32(RAL(i));
2502                hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2503                                           (u16)(mac_reg & 0xFFFF));
2504                hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2505                                           (u16)((mac_reg >> 16) & 0xFFFF));
2506
2507                mac_reg = er32(RAH(i));
2508                hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2509                                           (u16)(mac_reg & 0xFFFF));
2510                hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2511                                           (u16)((mac_reg & E1000_RAH_AV)
2512                                                 >> 16));
2513        }
2514
2515        e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2516
2517release:
2518        hw->phy.ops.release(hw);
2519}
2520
2521/**
2522 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2523 *  with 82579 PHY
2524 *  @hw: pointer to the HW structure
2525 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2526 **/
2527s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2528{
2529        s32 ret_val = 0;
2530        u16 phy_reg, data;
2531        u32 mac_reg;
2532        u16 i;
2533
2534        if (hw->mac.type < e1000_pch2lan)
2535                return 0;
2536
2537        /* disable Rx path while enabling/disabling workaround */
2538        e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
2539        ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
2540        if (ret_val)
2541                return ret_val;
2542
2543        if (enable) {
2544                /* Write Rx addresses (rar_entry_count for RAL/H, and
2545                 * SHRAL/H) and initial CRC values to the MAC
2546                 */
2547                for (i = 0; i < hw->mac.rar_entry_count; i++) {
2548                        u8 mac_addr[ETH_ALEN] = { 0 };
2549                        u32 addr_high, addr_low;
2550
2551                        addr_high = er32(RAH(i));
2552                        if (!(addr_high & E1000_RAH_AV))
2553                                continue;
2554                        addr_low = er32(RAL(i));
2555                        mac_addr[0] = (addr_low & 0xFF);
2556                        mac_addr[1] = ((addr_low >> 8) & 0xFF);
2557                        mac_addr[2] = ((addr_low >> 16) & 0xFF);
2558                        mac_addr[3] = ((addr_low >> 24) & 0xFF);
2559                        mac_addr[4] = (addr_high & 0xFF);
2560                        mac_addr[5] = ((addr_high >> 8) & 0xFF);
2561
2562                        ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2563                }
2564
2565                /* Write Rx addresses to the PHY */
2566                e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2567
2568                /* Enable jumbo frame workaround in the MAC */
2569                mac_reg = er32(FFLT_DBG);
2570                mac_reg &= ~BIT(14);
2571                mac_reg |= (7 << 15);
2572                ew32(FFLT_DBG, mac_reg);
2573
2574                mac_reg = er32(RCTL);
2575                mac_reg |= E1000_RCTL_SECRC;
2576                ew32(RCTL, mac_reg);
2577
2578                ret_val = e1000e_read_kmrn_reg(hw,
2579                                               E1000_KMRNCTRLSTA_CTRL_OFFSET,
2580                                               &data);
2581                if (ret_val)
2582                        return ret_val;
2583                ret_val = e1000e_write_kmrn_reg(hw,
2584                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
2585                                                data | BIT(0));
2586                if (ret_val)
2587                        return ret_val;
2588                ret_val = e1000e_read_kmrn_reg(hw,
2589                                               E1000_KMRNCTRLSTA_HD_CTRL,
2590                                               &data);
2591                if (ret_val)
2592                        return ret_val;
2593                data &= ~(0xF << 8);
2594                data |= (0xB << 8);
2595                ret_val = e1000e_write_kmrn_reg(hw,
2596                                                E1000_KMRNCTRLSTA_HD_CTRL,
2597                                                data);
2598                if (ret_val)
2599                        return ret_val;
2600
2601                /* Enable jumbo frame workaround in the PHY */
2602                e1e_rphy(hw, PHY_REG(769, 23), &data);
2603                data &= ~(0x7F << 5);
2604                data |= (0x37 << 5);
2605                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2606                if (ret_val)
2607                        return ret_val;
2608                e1e_rphy(hw, PHY_REG(769, 16), &data);
2609                data &= ~BIT(13);
2610                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2611                if (ret_val)
2612                        return ret_val;
2613                e1e_rphy(hw, PHY_REG(776, 20), &data);
2614                data &= ~(0x3FF << 2);
2615                data |= (E1000_TX_PTR_GAP << 2);
2616                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2617                if (ret_val)
2618                        return ret_val;
2619                ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2620                if (ret_val)
2621                        return ret_val;
2622                e1e_rphy(hw, HV_PM_CTRL, &data);
2623                ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
2624                if (ret_val)
2625                        return ret_val;
2626        } else {
2627                /* Write MAC register values back to h/w defaults */
2628                mac_reg = er32(FFLT_DBG);
2629                mac_reg &= ~(0xF << 14);
2630                ew32(FFLT_DBG, mac_reg);
2631
2632                mac_reg = er32(RCTL);
2633                mac_reg &= ~E1000_RCTL_SECRC;
2634                ew32(RCTL, mac_reg);
2635
2636                ret_val = e1000e_read_kmrn_reg(hw,
2637                                               E1000_KMRNCTRLSTA_CTRL_OFFSET,
2638                                               &data);
2639                if (ret_val)
2640                        return ret_val;
2641                ret_val = e1000e_write_kmrn_reg(hw,
2642                                                E1000_KMRNCTRLSTA_CTRL_OFFSET,
2643                                                data & ~BIT(0));
2644                if (ret_val)
2645                        return ret_val;
2646                ret_val = e1000e_read_kmrn_reg(hw,
2647                                               E1000_KMRNCTRLSTA_HD_CTRL,
2648                                               &data);
2649                if (ret_val)
2650                        return ret_val;
2651                data &= ~(0xF << 8);
2652                data |= (0xB << 8);
2653                ret_val = e1000e_write_kmrn_reg(hw,
2654                                                E1000_KMRNCTRLSTA_HD_CTRL,
2655                                                data);
2656                if (ret_val)
2657                        return ret_val;
2658
2659                /* Write PHY register values back to h/w defaults */
2660                e1e_rphy(hw, PHY_REG(769, 23), &data);
2661                data &= ~(0x7F << 5);
2662                ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2663                if (ret_val)
2664                        return ret_val;
2665                e1e_rphy(hw, PHY_REG(769, 16), &data);
2666                data |= BIT(13);
2667                ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2668                if (ret_val)
2669                        return ret_val;
2670                e1e_rphy(hw, PHY_REG(776, 20), &data);
2671                data &= ~(0x3FF << 2);
2672                data |= (0x8 << 2);
2673                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2674                if (ret_val)
2675                        return ret_val;
2676                ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
2677                if (ret_val)
2678                        return ret_val;
2679                e1e_rphy(hw, HV_PM_CTRL, &data);
2680                ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
2681                if (ret_val)
2682                        return ret_val;
2683        }
2684
2685        /* re-enable Rx path after enabling/disabling workaround */
2686        return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
2687}
2688
2689/**
2690 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2691 *  done after every PHY reset.
2692 **/
2693static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2694{
2695        s32 ret_val = 0;
2696
2697        if (hw->mac.type != e1000_pch2lan)
2698                return 0;
2699
2700        /* Set MDIO slow mode before any other MDIO access */
2701        ret_val = e1000_set_mdio_slow_mode_hv(hw);
2702        if (ret_val)
2703                return ret_val;
2704
2705        ret_val = hw->phy.ops.acquire(hw);
2706        if (ret_val)
2707                return ret_val;
2708        /* set MSE higher to enable link to stay up when noise is high */
2709        ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2710        if (ret_val)
2711                goto release;
2712        /* drop link after 5 times MSE threshold was reached */
2713        ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2714release:
2715        hw->phy.ops.release(hw);
2716
2717        return ret_val;
2718}
2719
2720/**
2721 *  e1000_k1_gig_workaround_lv - K1 Si workaround
2722 *  @hw:   pointer to the HW structure
2723 *
2724 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2725 *  Disable K1 in 1000Mbps and 100Mbps
2726 **/
2727static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2728{
2729        s32 ret_val = 0;
2730        u16 status_reg = 0;
2731
2732        if (hw->mac.type != e1000_pch2lan)
2733                return 0;
2734
2735        /* Set K1 beacon duration based on 10Mbs speed */
2736        ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2737        if (ret_val)
2738                return ret_val;
2739
2740        if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2741            == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2742                if (status_reg &
2743                    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2744                        u16 pm_phy_reg;
2745
2746                        /* LV 1G/100 Packet drop issue wa  */
2747                        ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2748                        if (ret_val)
2749                                return ret_val;
2750                        pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2751                        ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2752                        if (ret_val)
2753                                return ret_val;
2754                } else {
2755                        u32 mac_reg;
2756
2757                        mac_reg = er32(FEXTNVM4);
2758                        mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2759                        mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2760                        ew32(FEXTNVM4, mac_reg);
2761                }
2762        }
2763
2764        return ret_val;
2765}
2766
2767/**
2768 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2769 *  @hw:   pointer to the HW structure
2770 *  @gate: boolean set to true to gate, false to ungate
2771 *
2772 *  Gate/ungate the automatic PHY configuration via hardware; perform
2773 *  the configuration via software instead.
2774 **/
2775static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2776{
2777        u32 extcnf_ctrl;
2778
2779        if (hw->mac.type < e1000_pch2lan)
2780                return;
2781
2782        extcnf_ctrl = er32(EXTCNF_CTRL);
2783
2784        if (gate)
2785                extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2786        else
2787                extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2788
2789        ew32(EXTCNF_CTRL, extcnf_ctrl);
2790}
2791
2792/**
2793 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2794 *  @hw: pointer to the HW structure
2795 *
2796 *  Check the appropriate indication the MAC has finished configuring the
2797 *  PHY after a software reset.
2798 **/
2799static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2800{
2801        u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2802
2803        /* Wait for basic configuration completes before proceeding */
2804        do {
2805                data = er32(STATUS);
2806                data &= E1000_STATUS_LAN_INIT_DONE;
2807                usleep_range(100, 200);
2808        } while ((!data) && --loop);
2809
2810        /* If basic configuration is incomplete before the above loop
2811         * count reaches 0, loading the configuration from NVM will
2812         * leave the PHY in a bad state possibly resulting in no link.
2813         */
2814        if (loop == 0)
2815                e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2816
2817        /* Clear the Init Done bit for the next init event */
2818        data = er32(STATUS);
2819        data &= ~E1000_STATUS_LAN_INIT_DONE;
2820        ew32(STATUS, data);
2821}
2822
2823/**
2824 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2825 *  @hw: pointer to the HW structure
2826 **/
2827static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2828{
2829        s32 ret_val = 0;
2830        u16 reg;
2831
2832        if (hw->phy.ops.check_reset_block(hw))
2833                return 0;
2834
2835        /* Allow time for h/w to get to quiescent state after reset */
2836        usleep_range(10000, 20000);
2837
2838        /* Perform any necessary post-reset workarounds */
2839        switch (hw->mac.type) {
2840        case e1000_pchlan:
2841                ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2842                if (ret_val)
2843                        return ret_val;
2844                break;
2845        case e1000_pch2lan:
2846                ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2847                if (ret_val)
2848                        return ret_val;
2849                break;
2850        default:
2851                break;
2852        }
2853
2854        /* Clear the host wakeup bit after lcd reset */
2855        if (hw->mac.type >= e1000_pchlan) {
2856                e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2857                reg &= ~BM_WUC_HOST_WU_BIT;
2858                e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2859        }
2860
2861        /* Configure the LCD with the extended configuration region in NVM */
2862        ret_val = e1000_sw_lcd_config_ich8lan(hw);
2863        if (ret_val)
2864                return ret_val;
2865
2866        /* Configure the LCD with the OEM bits in NVM */
2867        ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2868
2869        if (hw->mac.type == e1000_pch2lan) {
2870                /* Ungate automatic PHY configuration on non-managed 82579 */
2871                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2872                        usleep_range(10000, 20000);
2873                        e1000_gate_hw_phy_config_ich8lan(hw, false);
2874                }
2875
2876                /* Set EEE LPI Update Timer to 200usec */
2877                ret_val = hw->phy.ops.acquire(hw);
2878                if (ret_val)
2879                        return ret_val;
2880                ret_val = e1000_write_emi_reg_locked(hw,
2881                                                     I82579_LPI_UPDATE_TIMER,
2882                                                     0x1387);
2883                hw->phy.ops.release(hw);
2884        }
2885
2886        return ret_val;
2887}
2888
2889/**
2890 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2891 *  @hw: pointer to the HW structure
2892 *
2893 *  Resets the PHY
2894 *  This is a function pointer entry point called by drivers
2895 *  or other shared routines.
2896 **/
2897static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2898{
2899        s32 ret_val = 0;
2900
2901        /* Gate automatic PHY configuration by hardware on non-managed 82579 */
2902        if ((hw->mac.type == e1000_pch2lan) &&
2903            !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2904                e1000_gate_hw_phy_config_ich8lan(hw, true);
2905
2906        ret_val = e1000e_phy_hw_reset_generic(hw);
2907        if (ret_val)
2908                return ret_val;
2909
2910        return e1000_post_phy_reset_ich8lan(hw);
2911}
2912
2913/**
2914 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2915 *  @hw: pointer to the HW structure
2916 *  @active: true to enable LPLU, false to disable
2917 *
2918 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2919 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2920 *  the phy speed. This function will manually set the LPLU bit and restart
2921 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2922 *  since it configures the same bit.
2923 **/
2924static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2925{
2926        s32 ret_val;
2927        u16 oem_reg;
2928
2929        ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2930        if (ret_val)
2931                return ret_val;
2932
2933        if (active)
2934                oem_reg |= HV_OEM_BITS_LPLU;
2935        else
2936                oem_reg &= ~HV_OEM_BITS_LPLU;
2937
2938        if (!hw->phy.ops.check_reset_block(hw))
2939                oem_reg |= HV_OEM_BITS_RESTART_AN;
2940
2941        return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2942}
2943
2944/**
2945 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2946 *  @hw: pointer to the HW structure
2947 *  @active: true to enable LPLU, false to disable
2948 *
2949 *  Sets the LPLU D0 state according to the active flag.  When
2950 *  activating LPLU this function also disables smart speed
2951 *  and vice versa.  LPLU will not be activated unless the
2952 *  device autonegotiation advertisement meets standards of
2953 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2954 *  This is a function pointer entry point only called by
2955 *  PHY setup routines.
2956 **/
2957static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2958{
2959        struct e1000_phy_info *phy = &hw->phy;
2960        u32 phy_ctrl;
2961        s32 ret_val = 0;
2962        u16 data;
2963
2964        if (phy->type == e1000_phy_ife)
2965                return 0;
2966
2967        phy_ctrl = er32(PHY_CTRL);
2968
2969        if (active) {
2970                phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2971                ew32(PHY_CTRL, phy_ctrl);
2972
2973                if (phy->type != e1000_phy_igp_3)
2974                        return 0;
2975
2976                /* Call gig speed drop workaround on LPLU before accessing
2977                 * any PHY registers
2978                 */
2979                if (hw->mac.type == e1000_ich8lan)
2980                        e1000e_gig_downshift_workaround_ich8lan(hw);
2981
2982                /* When LPLU is enabled, we should disable SmartSpeed */
2983                ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2984                if (ret_val)
2985                        return ret_val;
2986                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2987                ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2988                if (ret_val)
2989                        return ret_val;
2990        } else {
2991                phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2992                ew32(PHY_CTRL, phy_ctrl);
2993
2994                if (phy->type != e1000_phy_igp_3)
2995                        return 0;
2996
2997                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2998                 * during Dx states where the power conservation is most
2999                 * important.  During driver activity we should enable
3000                 * SmartSpeed, so performance is maintained.
3001                 */
3002                if (phy->smart_speed == e1000_smart_speed_on) {
3003                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3004                                           &data);
3005                        if (ret_val)
3006                                return ret_val;
3007
3008                        data |= IGP01E1000_PSCFR_SMART_SPEED;
3009                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3010                                           data);
3011                        if (ret_val)
3012                                return ret_val;
3013                } else if (phy->smart_speed == e1000_smart_speed_off) {
3014                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3015                                           &data);
3016                        if (ret_val)
3017                                return ret_val;
3018
3019                        data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3020                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3021                                           data);
3022                        if (ret_val)
3023                                return ret_val;
3024                }
3025        }
3026
3027        return 0;
3028}
3029
3030/**
3031 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3032 *  @hw: pointer to the HW structure
3033 *  @active: true to enable LPLU, false to disable
3034 *
3035 *  Sets the LPLU D3 state according to the active flag.  When
3036 *  activating LPLU this function also disables smart speed
3037 *  and vice versa.  LPLU will not be activated unless the
3038 *  device autonegotiation advertisement meets standards of
3039 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3040 *  This is a function pointer entry point only called by
3041 *  PHY setup routines.
3042 **/
3043static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3044{
3045        struct e1000_phy_info *phy = &hw->phy;
3046        u32 phy_ctrl;
3047        s32 ret_val = 0;
3048        u16 data;
3049
3050        phy_ctrl = er32(PHY_CTRL);
3051
3052        if (!active) {
3053                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3054                ew32(PHY_CTRL, phy_ctrl);
3055
3056                if (phy->type != e1000_phy_igp_3)
3057                        return 0;
3058
3059                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3060                 * during Dx states where the power conservation is most
3061                 * important.  During driver activity we should enable
3062                 * SmartSpeed, so performance is maintained.
3063                 */
3064                if (phy->smart_speed == e1000_smart_speed_on) {
3065                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3066                                           &data);
3067                        if (ret_val)
3068                                return ret_val;
3069
3070                        data |= IGP01E1000_PSCFR_SMART_SPEED;
3071                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3072                                           data);
3073                        if (ret_val)
3074                                return ret_val;
3075                } else if (phy->smart_speed == e1000_smart_speed_off) {
3076                        ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3077                                           &data);
3078                        if (ret_val)
3079                                return ret_val;
3080
3081                        data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3082                        ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
3083                                           data);
3084                        if (ret_val)
3085                                return ret_val;
3086                }
3087        } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3088                   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3089                   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3090                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3091                ew32(PHY_CTRL, phy_ctrl);
3092
3093                if (phy->type != e1000_phy_igp_3)
3094                        return 0;
3095
3096                /* Call gig speed drop workaround on LPLU before accessing
3097                 * any PHY registers
3098                 */
3099                if (hw->mac.type == e1000_ich8lan)
3100                        e1000e_gig_downshift_workaround_ich8lan(hw);
3101
3102                /* When LPLU is enabled, we should disable SmartSpeed */
3103                ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3104                if (ret_val)
3105                        return ret_val;
3106
3107                data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3108                ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3109        }
3110
3111        return ret_val;
3112}
3113
3114/**
3115 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3116 *  @hw: pointer to the HW structure
3117 *  @bank:  pointer to the variable that returns the active bank
3118 *
3119 *  Reads signature byte from the NVM using the flash access registers.
3120 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3121 **/
3122static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3123{
3124        u32 eecd;
3125        struct e1000_nvm_info *nvm = &hw->nvm;
3126        u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3127        u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3128        u32 nvm_dword = 0;
3129        u8 sig_byte = 0;
3130        s32 ret_val;
3131
3132        switch (hw->mac.type) {
3133        case e1000_pch_spt:
3134        case e1000_pch_cnp:
3135                bank1_offset = nvm->flash_bank_size;
3136                act_offset = E1000_ICH_NVM_SIG_WORD;
3137
3138                /* set bank to 0 in case flash read fails */
3139                *bank = 0;
3140
3141                /* Check bank 0 */
3142                ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3143                                                         &nvm_dword);
3144                if (ret_val)
3145                        return ret_val;
3146                sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3147                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3148                    E1000_ICH_NVM_SIG_VALUE) {
3149                        *bank = 0;
3150                        return 0;
3151                }
3152
3153                /* Check bank 1 */
3154                ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3155                                                         bank1_offset,
3156                                                         &nvm_dword);
3157                if (ret_val)
3158                        return ret_val;
3159                sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3160                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3161                    E1000_ICH_NVM_SIG_VALUE) {
3162                        *bank = 1;
3163                        return 0;
3164                }
3165
3166                e_dbg("ERROR: No valid NVM bank present\n");
3167                return -E1000_ERR_NVM;
3168        case e1000_ich8lan:
3169        case e1000_ich9lan:
3170                eecd = er32(EECD);
3171                if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3172                    E1000_EECD_SEC1VAL_VALID_MASK) {
3173                        if (eecd & E1000_EECD_SEC1VAL)
3174                                *bank = 1;
3175                        else
3176                                *bank = 0;
3177
3178                        return 0;
3179                }
3180                e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3181                /* fall-thru */
3182        default:
3183                /* set bank to 0 in case flash read fails */
3184                *bank = 0;
3185
3186                /* Check bank 0 */
3187                ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3188                                                        &sig_byte);
3189                if (ret_val)
3190                        return ret_val;
3191                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3192                    E1000_ICH_NVM_SIG_VALUE) {
3193                        *bank = 0;
3194                        return 0;
3195                }
3196
3197                /* Check bank 1 */
3198                ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3199                                                        bank1_offset,
3200                                                        &sig_byte);
3201                if (ret_val)
3202                        return ret_val;
3203                if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3204                    E1000_ICH_NVM_SIG_VALUE) {
3205                        *bank = 1;
3206                        return 0;
3207                }
3208
3209                e_dbg("ERROR: No valid NVM bank present\n");
3210                return -E1000_ERR_NVM;
3211        }
3212}
3213
3214/**
3215 *  e1000_read_nvm_spt - NVM access for SPT
3216 *  @hw: pointer to the HW structure
3217 *  @offset: The offset (in bytes) of the word(s) to read.
3218 *  @words: Size of data to read in words.
3219 *  @data: pointer to the word(s) to read at offset.
3220 *
3221 *  Reads a word(s) from the NVM
3222 **/
3223static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3224                              u16 *data)
3225{
3226        struct e1000_nvm_info *nvm = &hw->nvm;
3227        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3228        u32 act_offset;
3229        s32 ret_val = 0;
3230        u32 bank = 0;
3231        u32 dword = 0;
3232        u16 offset_to_read;
3233        u16 i;
3234
3235        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3236            (words == 0)) {
3237                e_dbg("nvm parameter(s) out of bounds\n");
3238                ret_val = -E1000_ERR_NVM;
3239                goto out;
3240        }
3241
3242        nvm->ops.acquire(hw);
3243
3244        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3245        if (ret_val) {
3246                e_dbg("Could not detect valid bank, assuming bank 0\n");
3247                bank = 0;
3248        }
3249
3250        act_offset = (bank) ? nvm->flash_bank_size : 0;
3251        act_offset += offset;
3252
3253        ret_val = 0;
3254
3255        for (i = 0; i < words; i += 2) {
3256                if (words - i == 1) {
3257                        if (dev_spec->shadow_ram[offset + i].modified) {
3258                                data[i] =
3259                                    dev_spec->shadow_ram[offset + i].value;
3260                        } else {
3261                                offset_to_read = act_offset + i -
3262                                    ((act_offset + i) % 2);
3263                                ret_val =
3264                                  e1000_read_flash_dword_ich8lan(hw,
3265                                                                 offset_to_read,
3266                                                                 &dword);
3267                                if (ret_val)
3268                                        break;
3269                                if ((act_offset + i) % 2 == 0)
3270                                        data[i] = (u16)(dword & 0xFFFF);
3271                                else
3272                                        data[i] = (u16)((dword >> 16) & 0xFFFF);
3273                        }
3274                } else {
3275                        offset_to_read = act_offset + i;
3276                        if (!(dev_spec->shadow_ram[offset + i].modified) ||
3277                            !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3278                                ret_val =
3279                                  e1000_read_flash_dword_ich8lan(hw,
3280                                                                 offset_to_read,
3281                                                                 &dword);
3282                                if (ret_val)
3283                                        break;
3284                        }
3285                        if (dev_spec->shadow_ram[offset + i].modified)
3286                                data[i] =
3287                                    dev_spec->shadow_ram[offset + i].value;
3288                        else
3289                                data[i] = (u16)(dword & 0xFFFF);
3290                        if (dev_spec->shadow_ram[offset + i].modified)
3291                                data[i + 1] =
3292                                    dev_spec->shadow_ram[offset + i + 1].value;
3293                        else
3294                                data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3295                }
3296        }
3297
3298        nvm->ops.release(hw);
3299
3300out:
3301        if (ret_val)
3302                e_dbg("NVM read error: %d\n", ret_val);
3303
3304        return ret_val;
3305}
3306
3307/**
3308 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3309 *  @hw: pointer to the HW structure
3310 *  @offset: The offset (in bytes) of the word(s) to read.
3311 *  @words: Size of data to read in words
3312 *  @data: Pointer to the word(s) to read at offset.
3313 *
3314 *  Reads a word(s) from the NVM using the flash access registers.
3315 **/
3316static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3317                                  u16 *data)
3318{
3319        struct e1000_nvm_info *nvm = &hw->nvm;
3320        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3321        u32 act_offset;
3322        s32 ret_val = 0;
3323        u32 bank = 0;
3324        u16 i, word;
3325
3326        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3327            (words == 0)) {
3328                e_dbg("nvm parameter(s) out of bounds\n");
3329                ret_val = -E1000_ERR_NVM;
3330                goto out;
3331        }
3332
3333        nvm->ops.acquire(hw);
3334
3335        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3336        if (ret_val) {
3337                e_dbg("Could not detect valid bank, assuming bank 0\n");
3338                bank = 0;
3339        }
3340
3341        act_offset = (bank) ? nvm->flash_bank_size : 0;
3342        act_offset += offset;
3343
3344        ret_val = 0;
3345        for (i = 0; i < words; i++) {
3346                if (dev_spec->shadow_ram[offset + i].modified) {
3347                        data[i] = dev_spec->shadow_ram[offset + i].value;
3348                } else {
3349                        ret_val = e1000_read_flash_word_ich8lan(hw,
3350                                                                act_offset + i,
3351                                                                &word);
3352                        if (ret_val)
3353                                break;
3354                        data[i] = word;
3355                }
3356        }
3357
3358        nvm->ops.release(hw);
3359
3360out:
3361        if (ret_val)
3362                e_dbg("NVM read error: %d\n", ret_val);
3363
3364        return ret_val;
3365}
3366
3367/**
3368 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3369 *  @hw: pointer to the HW structure
3370 *
3371 *  This function does initial flash setup so that a new read/write/erase cycle
3372 *  can be started.
3373 **/
3374static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3375{
3376        union ich8_hws_flash_status hsfsts;
3377        s32 ret_val = -E1000_ERR_NVM;
3378
3379        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3380
3381        /* Check if the flash descriptor is valid */
3382        if (!hsfsts.hsf_status.fldesvalid) {
3383                e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
3384                return -E1000_ERR_NVM;
3385        }
3386
3387        /* Clear FCERR and DAEL in hw status by writing 1 */
3388        hsfsts.hsf_status.flcerr = 1;
3389        hsfsts.hsf_status.dael = 1;
3390        if (hw->mac.type >= e1000_pch_spt)
3391                ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3392        else
3393                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3394
3395        /* Either we should have a hardware SPI cycle in progress
3396         * bit to check against, in order to start a new cycle or
3397         * FDONE bit should be changed in the hardware so that it
3398         * is 1 after hardware reset, which can then be used as an
3399         * indication whether a cycle is in progress or has been
3400         * completed.
3401         */
3402
3403        if (!hsfsts.hsf_status.flcinprog) {
3404                /* There is no cycle running at present,
3405                 * so we can start a cycle.
3406                 * Begin by setting Flash Cycle Done.
3407                 */
3408                hsfsts.hsf_status.flcdone = 1;
3409                if (hw->mac.type >= e1000_pch_spt)
3410                        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3411                else
3412                        ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3413                ret_val = 0;
3414        } else {
3415                s32 i;
3416
3417                /* Otherwise poll for sometime so the current
3418                 * cycle has a chance to end before giving up.
3419                 */
3420                for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3421                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3422                        if (!hsfsts.hsf_status.flcinprog) {
3423                                ret_val = 0;
3424                                break;
3425                        }
3426                        udelay(1);
3427                }
3428                if (!ret_val) {
3429                        /* Successful in waiting for previous cycle to timeout,
3430                         * now set the Flash Cycle Done.
3431                         */
3432                        hsfsts.hsf_status.flcdone = 1;
3433                        if (hw->mac.type >= e1000_pch_spt)
3434                                ew32flash(ICH_FLASH_HSFSTS,
3435                                          hsfsts.regval & 0xFFFF);
3436                        else
3437                                ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3438                } else {
3439                        e_dbg("Flash controller busy, cannot get access\n");
3440                }
3441        }
3442
3443        return ret_val;
3444}
3445
3446/**
3447 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3448 *  @hw: pointer to the HW structure
3449 *  @timeout: maximum time to wait for completion
3450 *
3451 *  This function starts a flash cycle and waits for its completion.
3452 **/
3453static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3454{
3455        union ich8_hws_flash_ctrl hsflctl;
3456        union ich8_hws_flash_status hsfsts;
3457        u32 i = 0;
3458
3459        /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3460        if (hw->mac.type >= e1000_pch_spt)
3461                hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3462        else
3463                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3464        hsflctl.hsf_ctrl.flcgo = 1;
3465
3466        if (hw->mac.type >= e1000_pch_spt)
3467                ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3468        else
3469                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3470
3471        /* wait till FDONE bit is set to 1 */
3472        do {
3473                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3474                if (hsfsts.hsf_status.flcdone)
3475                        break;
3476                udelay(1);
3477        } while (i++ < timeout);
3478
3479        if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3480                return 0;
3481
3482        return -E1000_ERR_NVM;
3483}
3484
3485/**
3486 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3487 *  @hw: pointer to the HW structure
3488 *  @offset: offset to data location
3489 *  @data: pointer to the location for storing the data
3490 *
3491 *  Reads the flash dword at offset into data.  Offset is converted
3492 *  to bytes before read.
3493 **/
3494static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3495                                          u32 *data)
3496{
3497        /* Must convert word offset into bytes. */
3498        offset <<= 1;
3499        return e1000_read_flash_data32_ich8lan(hw, offset, data);
3500}
3501
3502/**
3503 *  e1000_read_flash_word_ich8lan - Read word from flash
3504 *  @hw: pointer to the HW structure
3505 *  @offset: offset to data location
3506 *  @data: pointer to the location for storing the data
3507 *
3508 *  Reads the flash word at offset into data.  Offset is converted
3509 *  to bytes before read.
3510 **/
3511static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3512                                         u16 *data)
3513{
3514        /* Must convert offset into bytes. */
3515        offset <<= 1;
3516
3517        return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3518}
3519
3520/**
3521 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3522 *  @hw: pointer to the HW structure
3523 *  @offset: The offset of the byte to read.
3524 *  @data: Pointer to a byte to store the value read.
3525 *
3526 *  Reads a single byte from the NVM using the flash access registers.
3527 **/
3528static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3529                                         u8 *data)
3530{
3531        s32 ret_val;
3532        u16 word = 0;
3533
3534        /* In SPT, only 32 bits access is supported,
3535         * so this function should not be called.
3536         */
3537        if (hw->mac.type >= e1000_pch_spt)
3538                return -E1000_ERR_NVM;
3539        else
3540                ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3541
3542        if (ret_val)
3543                return ret_val;
3544
3545        *data = (u8)word;
3546
3547        return 0;
3548}
3549
3550/**
3551 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3552 *  @hw: pointer to the HW structure
3553 *  @offset: The offset (in bytes) of the byte or word to read.
3554 *  @size: Size of data to read, 1=byte 2=word
3555 *  @data: Pointer to the word to store the value read.
3556 *
3557 *  Reads a byte or word from the NVM using the flash access registers.
3558 **/
3559static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3560                                         u8 size, u16 *data)
3561{
3562        union ich8_hws_flash_status hsfsts;
3563        union ich8_hws_flash_ctrl hsflctl;
3564        u32 flash_linear_addr;
3565        u32 flash_data = 0;
3566        s32 ret_val = -E1000_ERR_NVM;
3567        u8 count = 0;
3568
3569        if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3570                return -E1000_ERR_NVM;
3571
3572        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3573                             hw->nvm.flash_base_addr);
3574
3575        do {
3576                udelay(1);
3577                /* Steps */
3578                ret_val = e1000_flash_cycle_init_ich8lan(hw);
3579                if (ret_val)
3580                        break;
3581
3582                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3583                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3584                hsflctl.hsf_ctrl.fldbcount = size - 1;
3585                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3586                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3587
3588                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3589
3590                ret_val =
3591                    e1000_flash_cycle_ich8lan(hw,
3592                                              ICH_FLASH_READ_COMMAND_TIMEOUT);
3593
3594                /* Check if FCERR is set to 1, if set to 1, clear it
3595                 * and try the whole sequence a few more times, else
3596                 * read in (shift in) the Flash Data0, the order is
3597                 * least significant byte first msb to lsb
3598                 */
3599                if (!ret_val) {
3600                        flash_data = er32flash(ICH_FLASH_FDATA0);
3601                        if (size == 1)
3602                                *data = (u8)(flash_data & 0x000000FF);
3603                        else if (size == 2)
3604                                *data = (u16)(flash_data & 0x0000FFFF);
3605                        break;
3606                } else {
3607                        /* If we've gotten here, then things are probably
3608                         * completely hosed, but if the error condition is
3609                         * detected, it won't hurt to give it another try...
3610                         * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3611                         */
3612                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3613                        if (hsfsts.hsf_status.flcerr) {
3614                                /* Repeat for some time before giving up. */
3615                                continue;
3616                        } else if (!hsfsts.hsf_status.flcdone) {
3617                                e_dbg("Timeout error - flash cycle did not complete.\n");
3618                                break;
3619                        }
3620                }
3621        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3622
3623        return ret_val;
3624}
3625
3626/**
3627 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3628 *  @hw: pointer to the HW structure
3629 *  @offset: The offset (in bytes) of the dword to read.
3630 *  @data: Pointer to the dword to store the value read.
3631 *
3632 *  Reads a byte or word from the NVM using the flash access registers.
3633 **/
3634
3635static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3636                                           u32 *data)
3637{
3638        union ich8_hws_flash_status hsfsts;
3639        union ich8_hws_flash_ctrl hsflctl;
3640        u32 flash_linear_addr;
3641        s32 ret_val = -E1000_ERR_NVM;
3642        u8 count = 0;
3643
3644        if (offset > ICH_FLASH_LINEAR_ADDR_MASK || hw->mac.type < e1000_pch_spt)
3645                return -E1000_ERR_NVM;
3646        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3647                             hw->nvm.flash_base_addr);
3648
3649        do {
3650                udelay(1);
3651                /* Steps */
3652                ret_val = e1000_flash_cycle_init_ich8lan(hw);
3653                if (ret_val)
3654                        break;
3655                /* In SPT, This register is in Lan memory space, not flash.
3656                 * Therefore, only 32 bit access is supported
3657                 */
3658                hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3659
3660                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3661                hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3662                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3663                /* In SPT, This register is in Lan memory space, not flash.
3664                 * Therefore, only 32 bit access is supported
3665                 */
3666                ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3667                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3668
3669                ret_val =
3670                   e1000_flash_cycle_ich8lan(hw,
3671                                             ICH_FLASH_READ_COMMAND_TIMEOUT);
3672
3673                /* Check if FCERR is set to 1, if set to 1, clear it
3674                 * and try the whole sequence a few more times, else
3675                 * read in (shift in) the Flash Data0, the order is
3676                 * least significant byte first msb to lsb
3677                 */
3678                if (!ret_val) {
3679                        *data = er32flash(ICH_FLASH_FDATA0);
3680                        break;
3681                } else {
3682                        /* If we've gotten here, then things are probably
3683                         * completely hosed, but if the error condition is
3684                         * detected, it won't hurt to give it another try...
3685                         * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3686                         */
3687                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3688                        if (hsfsts.hsf_status.flcerr) {
3689                                /* Repeat for some time before giving up. */
3690                                continue;
3691                        } else if (!hsfsts.hsf_status.flcdone) {
3692                                e_dbg("Timeout error - flash cycle did not complete.\n");
3693                                break;
3694                        }
3695                }
3696        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3697
3698        return ret_val;
3699}
3700
3701/**
3702 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3703 *  @hw: pointer to the HW structure
3704 *  @offset: The offset (in bytes) of the word(s) to write.
3705 *  @words: Size of data to write in words
3706 *  @data: Pointer to the word(s) to write at offset.
3707 *
3708 *  Writes a byte or word to the NVM using the flash access registers.
3709 **/
3710static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3711                                   u16 *data)
3712{
3713        struct e1000_nvm_info *nvm = &hw->nvm;
3714        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3715        u16 i;
3716
3717        if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3718            (words == 0)) {
3719                e_dbg("nvm parameter(s) out of bounds\n");
3720                return -E1000_ERR_NVM;
3721        }
3722
3723        nvm->ops.acquire(hw);
3724
3725        for (i = 0; i < words; i++) {
3726                dev_spec->shadow_ram[offset + i].modified = true;
3727                dev_spec->shadow_ram[offset + i].value = data[i];
3728        }
3729
3730        nvm->ops.release(hw);
3731
3732        return 0;
3733}
3734
3735/**
3736 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3737 *  @hw: pointer to the HW structure
3738 *
3739 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3740 *  which writes the checksum to the shadow ram.  The changes in the shadow
3741 *  ram are then committed to the EEPROM by processing each bank at a time
3742 *  checking for the modified bit and writing only the pending changes.
3743 *  After a successful commit, the shadow ram is cleared and is ready for
3744 *  future writes.
3745 **/
3746static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3747{
3748        struct e1000_nvm_info *nvm = &hw->nvm;
3749        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3750        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3751        s32 ret_val;
3752        u32 dword = 0;
3753
3754        ret_val = e1000e_update_nvm_checksum_generic(hw);
3755        if (ret_val)
3756                goto out;
3757
3758        if (nvm->type != e1000_nvm_flash_sw)
3759                goto out;
3760
3761        nvm->ops.acquire(hw);
3762
3763        /* We're writing to the opposite bank so if we're on bank 1,
3764         * write to bank 0 etc.  We also need to erase the segment that
3765         * is going to be written
3766         */
3767        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3768        if (ret_val) {
3769                e_dbg("Could not detect valid bank, assuming bank 0\n");
3770                bank = 0;
3771        }
3772
3773        if (bank == 0) {
3774                new_bank_offset = nvm->flash_bank_size;
3775                old_bank_offset = 0;
3776                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3777                if (ret_val)
3778                        goto release;
3779        } else {
3780                old_bank_offset = nvm->flash_bank_size;
3781                new_bank_offset = 0;
3782                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3783                if (ret_val)
3784                        goto release;
3785        }
3786        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3787                /* Determine whether to write the value stored
3788                 * in the other NVM bank or a modified value stored
3789                 * in the shadow RAM
3790                 */
3791                ret_val = e1000_read_flash_dword_ich8lan(hw,
3792                                                         i + old_bank_offset,
3793                                                         &dword);
3794
3795                if (dev_spec->shadow_ram[i].modified) {
3796                        dword &= 0xffff0000;
3797                        dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3798                }
3799                if (dev_spec->shadow_ram[i + 1].modified) {
3800                        dword &= 0x0000ffff;
3801                        dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3802                                  << 16);
3803                }
3804                if (ret_val)
3805                        break;
3806
3807                /* If the word is 0x13, then make sure the signature bits
3808                 * (15:14) are 11b until the commit has completed.
3809                 * This will allow us to write 10b which indicates the
3810                 * signature is valid.  We want to do this after the write
3811                 * has completed so that we don't mark the segment valid
3812                 * while the write is still in progress
3813                 */
3814                if (i == E1000_ICH_NVM_SIG_WORD - 1)
3815                        dword |= E1000_ICH_NVM_SIG_MASK << 16;
3816
3817                /* Convert offset to bytes. */
3818                act_offset = (i + new_bank_offset) << 1;
3819
3820                usleep_range(100, 200);
3821
3822                /* Write the data to the new bank. Offset in words */
3823                act_offset = i + new_bank_offset;
3824                ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3825                                                                dword);
3826                if (ret_val)
3827                        break;
3828        }
3829
3830        /* Don't bother writing the segment valid bits if sector
3831         * programming failed.
3832         */
3833        if (ret_val) {
3834                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3835                e_dbg("Flash commit failed.\n");
3836                goto release;
3837        }
3838
3839        /* Finally validate the new segment by setting bit 15:14
3840         * to 10b in word 0x13 , this can be done without an
3841         * erase as well since these bits are 11 to start with
3842         * and we need to change bit 14 to 0b
3843         */
3844        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3845
3846        /*offset in words but we read dword */
3847        --act_offset;
3848        ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3849
3850        if (ret_val)
3851                goto release;
3852
3853        dword &= 0xBFFFFFFF;
3854        ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3855
3856        if (ret_val)
3857                goto release;
3858
3859        /* And invalidate the previously valid segment by setting
3860         * its signature word (0x13) high_byte to 0b. This can be
3861         * done without an erase because flash erase sets all bits
3862         * to 1's. We can write 1's to 0's without an erase
3863         */
3864        act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3865
3866        /* offset in words but we read dword */
3867        act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3868        ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3869
3870        if (ret_val)
3871                goto release;
3872
3873        dword &= 0x00FFFFFF;
3874        ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3875
3876        if (ret_val)
3877                goto release;
3878
3879        /* Great!  Everything worked, we can now clear the cached entries. */
3880        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3881                dev_spec->shadow_ram[i].modified = false;
3882                dev_spec->shadow_ram[i].value = 0xFFFF;
3883        }
3884
3885release:
3886        nvm->ops.release(hw);
3887
3888        /* Reload the EEPROM, or else modifications will not appear
3889         * until after the next adapter reset.
3890         */
3891        if (!ret_val) {
3892                nvm->ops.reload(hw);
3893                usleep_range(10000, 20000);
3894        }
3895
3896out:
3897        if (ret_val)
3898                e_dbg("NVM update error: %d\n", ret_val);
3899
3900        return ret_val;
3901}
3902
3903/**
3904 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3905 *  @hw: pointer to the HW structure
3906 *
3907 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3908 *  which writes the checksum to the shadow ram.  The changes in the shadow
3909 *  ram are then committed to the EEPROM by processing each bank at a time
3910 *  checking for the modified bit and writing only the pending changes.
3911 *  After a successful commit, the shadow ram is cleared and is ready for
3912 *  future writes.
3913 **/
3914static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3915{
3916        struct e1000_nvm_info *nvm = &hw->nvm;
3917        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3918        u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3919        s32 ret_val;
3920        u16 data = 0;
3921
3922        ret_val = e1000e_update_nvm_checksum_generic(hw);
3923        if (ret_val)
3924                goto out;
3925
3926        if (nvm->type != e1000_nvm_flash_sw)
3927                goto out;
3928
3929        nvm->ops.acquire(hw);
3930
3931        /* We're writing to the opposite bank so if we're on bank 1,
3932         * write to bank 0 etc.  We also need to erase the segment that
3933         * is going to be written
3934         */
3935        ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3936        if (ret_val) {
3937                e_dbg("Could not detect valid bank, assuming bank 0\n");
3938                bank = 0;
3939        }
3940
3941        if (bank == 0) {
3942                new_bank_offset = nvm->flash_bank_size;
3943                old_bank_offset = 0;
3944                ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3945                if (ret_val)
3946                        goto release;
3947        } else {
3948                old_bank_offset = nvm->flash_bank_size;
3949                new_bank_offset = 0;
3950                ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3951                if (ret_val)
3952                        goto release;
3953        }
3954        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3955                if (dev_spec->shadow_ram[i].modified) {
3956                        data = dev_spec->shadow_ram[i].value;
3957                } else {
3958                        ret_val = e1000_read_flash_word_ich8lan(hw, i +
3959                                                                old_bank_offset,
3960                                                                &data);
3961                        if (ret_val)
3962                                break;
3963                }
3964
3965                /* If the word is 0x13, then make sure the signature bits
3966                 * (15:14) are 11b until the commit has completed.
3967                 * This will allow us to write 10b which indicates the
3968                 * signature is valid.  We want to do this after the write
3969                 * has completed so that we don't mark the segment valid
3970                 * while the write is still in progress
3971                 */
3972                if (i == E1000_ICH_NVM_SIG_WORD)
3973                        data |= E1000_ICH_NVM_SIG_MASK;
3974
3975                /* Convert offset to bytes. */
3976                act_offset = (i + new_bank_offset) << 1;
3977
3978                usleep_range(100, 200);
3979                /* Write the bytes to the new bank. */
3980                ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3981                                                               act_offset,
3982                                                               (u8)data);
3983                if (ret_val)
3984                        break;
3985
3986                usleep_range(100, 200);
3987                ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3988                                                               act_offset + 1,
3989                                                               (u8)(data >> 8));
3990                if (ret_val)
3991                        break;
3992        }
3993
3994        /* Don't bother writing the segment valid bits if sector
3995         * programming failed.
3996         */
3997        if (ret_val) {
3998                /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3999                e_dbg("Flash commit failed.\n");
4000                goto release;
4001        }
4002
4003        /* Finally validate the new segment by setting bit 15:14
4004         * to 10b in word 0x13 , this can be done without an
4005         * erase as well since these bits are 11 to start with
4006         * and we need to change bit 14 to 0b
4007         */
4008        act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4009        ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4010        if (ret_val)
4011                goto release;
4012
4013        data &= 0xBFFF;
4014        ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4015                                                       act_offset * 2 + 1,
4016                                                       (u8)(data >> 8));
4017        if (ret_val)
4018                goto release;
4019
4020        /* And invalidate the previously valid segment by setting
4021         * its signature word (0x13) high_byte to 0b. This can be
4022         * done without an erase because flash erase sets all bits
4023         * to 1's. We can write 1's to 0's without an erase
4024         */
4025        act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4026        ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4027        if (ret_val)
4028                goto release;
4029
4030        /* Great!  Everything worked, we can now clear the cached entries. */
4031        for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
4032                dev_spec->shadow_ram[i].modified = false;
4033                dev_spec->shadow_ram[i].value = 0xFFFF;
4034        }
4035
4036release:
4037        nvm->ops.release(hw);
4038
4039        /* Reload the EEPROM, or else modifications will not appear
4040         * until after the next adapter reset.
4041         */
4042        if (!ret_val) {
4043                nvm->ops.reload(hw);
4044                usleep_range(10000, 20000);
4045        }
4046
4047out:
4048        if (ret_val)
4049                e_dbg("NVM update error: %d\n", ret_val);
4050
4051        return ret_val;
4052}
4053
4054/**
4055 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4056 *  @hw: pointer to the HW structure
4057 *
4058 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4059 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4060 *  calculated, in which case we need to calculate the checksum and set bit 6.
4061 **/
4062static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4063{
4064        s32 ret_val;
4065        u16 data;
4066        u16 word;
4067        u16 valid_csum_mask;
4068
4069        /* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4070         * the checksum needs to be fixed.  This bit is an indication that
4071         * the NVM was prepared by OEM software and did not calculate
4072         * the checksum...a likely scenario.
4073         */
4074        switch (hw->mac.type) {
4075        case e1000_pch_lpt:
4076        case e1000_pch_spt:
4077        case e1000_pch_cnp:
4078                word = NVM_COMPAT;
4079                valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4080                break;
4081        default:
4082                word = NVM_FUTURE_INIT_WORD1;
4083                valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4084                break;
4085        }
4086
4087        ret_val = e1000_read_nvm(hw, word, 1, &data);
4088        if (ret_val)
4089                return ret_val;
4090
4091        if (!(data & valid_csum_mask)) {
4092                data |= valid_csum_mask;
4093                ret_val = e1000_write_nvm(hw, word, 1, &data);
4094                if (ret_val)
4095                        return ret_val;
4096                ret_val = e1000e_update_nvm_checksum(hw);
4097                if (ret_val)
4098                        return ret_val;
4099        }
4100
4101        return e1000e_validate_nvm_checksum_generic(hw);
4102}
4103
4104/**
4105 *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
4106 *  @hw: pointer to the HW structure
4107 *
4108 *  To prevent malicious write/erase of the NVM, set it to be read-only
4109 *  so that the hardware ignores all write/erase cycles of the NVM via
4110 *  the flash control registers.  The shadow-ram copy of the NVM will
4111 *  still be updated, however any updates to this copy will not stick
4112 *  across driver reloads.
4113 **/
4114void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
4115{
4116        struct e1000_nvm_info *nvm = &hw->nvm;
4117        union ich8_flash_protected_range pr0;
4118        union ich8_hws_flash_status hsfsts;
4119        u32 gfpreg;
4120
4121        nvm->ops.acquire(hw);
4122
4123        gfpreg = er32flash(ICH_FLASH_GFPREG);
4124
4125        /* Write-protect GbE Sector of NVM */
4126        pr0.regval = er32flash(ICH_FLASH_PR0);
4127        pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
4128        pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
4129        pr0.range.wpe = true;
4130        ew32flash(ICH_FLASH_PR0, pr0.regval);
4131
4132        /* Lock down a subset of GbE Flash Control Registers, e.g.
4133         * PR0 to prevent the write-protection from being lifted.
4134         * Once FLOCKDN is set, the registers protected by it cannot
4135         * be written until FLOCKDN is cleared by a hardware reset.
4136         */
4137        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4138        hsfsts.hsf_status.flockdn = true;
4139        ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
4140
4141        nvm->ops.release(hw);
4142}
4143
4144/**
4145 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4146 *  @hw: pointer to the HW structure
4147 *  @offset: The offset (in bytes) of the byte/word to read.
4148 *  @size: Size of data to read, 1=byte 2=word
4149 *  @data: The byte(s) to write to the NVM.
4150 *
4151 *  Writes one/two bytes to the NVM using the flash access registers.
4152 **/
4153static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4154                                          u8 size, u16 data)
4155{
4156        union ich8_hws_flash_status hsfsts;
4157        union ich8_hws_flash_ctrl hsflctl;
4158        u32 flash_linear_addr;
4159        u32 flash_data = 0;
4160        s32 ret_val;
4161        u8 count = 0;
4162
4163        if (hw->mac.type >= e1000_pch_spt) {
4164                if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4165                        return -E1000_ERR_NVM;
4166        } else {
4167                if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4168                        return -E1000_ERR_NVM;
4169        }
4170
4171        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4172                             hw->nvm.flash_base_addr);
4173
4174        do {
4175                udelay(1);
4176                /* Steps */
4177                ret_val = e1000_flash_cycle_init_ich8lan(hw);
4178                if (ret_val)
4179                        break;
4180                /* In SPT, This register is in Lan memory space, not
4181                 * flash.  Therefore, only 32 bit access is supported
4182                 */
4183                if (hw->mac.type >= e1000_pch_spt)
4184                        hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4185                else
4186                        hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4187
4188                /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4189                hsflctl.hsf_ctrl.fldbcount = size - 1;
4190                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4191                /* In SPT, This register is in Lan memory space,
4192                 * not flash.  Therefore, only 32 bit access is
4193                 * supported
4194                 */
4195                if (hw->mac.type >= e1000_pch_spt)
4196                        ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4197                else
4198                        ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4199
4200                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4201
4202                if (size == 1)
4203                        flash_data = (u32)data & 0x00FF;
4204                else
4205                        flash_data = (u32)data;
4206
4207                ew32flash(ICH_FLASH_FDATA0, flash_data);
4208
4209                /* check if FCERR is set to 1 , if set to 1, clear it
4210                 * and try the whole sequence a few more times else done
4211                 */
4212                ret_val =
4213                    e1000_flash_cycle_ich8lan(hw,
4214                                              ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4215                if (!ret_val)
4216                        break;
4217
4218                /* If we're here, then things are most likely
4219                 * completely hosed, but if the error condition
4220                 * is detected, it won't hurt to give it another
4221                 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4222                 */
4223                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4224                if (hsfsts.hsf_status.flcerr)
4225                        /* Repeat for some time before giving up. */
4226                        continue;
4227                if (!hsfsts.hsf_status.flcdone) {
4228                        e_dbg("Timeout error - flash cycle did not complete.\n");
4229                        break;
4230                }
4231        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4232
4233        return ret_val;
4234}
4235
4236/**
4237*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4238*  @hw: pointer to the HW structure
4239*  @offset: The offset (in bytes) of the dwords to read.
4240*  @data: The 4 bytes to write to the NVM.
4241*
4242*  Writes one/two/four bytes to the NVM using the flash access registers.
4243**/
4244static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4245                                            u32 data)
4246{
4247        union ich8_hws_flash_status hsfsts;
4248        union ich8_hws_flash_ctrl hsflctl;
4249        u32 flash_linear_addr;
4250        s32 ret_val;
4251        u8 count = 0;
4252
4253        if (hw->mac.type >= e1000_pch_spt) {
4254                if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4255                        return -E1000_ERR_NVM;
4256        }
4257        flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4258                             hw->nvm.flash_base_addr);
4259        do {
4260                udelay(1);
4261                /* Steps */
4262                ret_val = e1000_flash_cycle_init_ich8lan(hw);
4263                if (ret_val)
4264                        break;
4265
4266                /* In SPT, This register is in Lan memory space, not
4267                 * flash.  Therefore, only 32 bit access is supported
4268                 */
4269                if (hw->mac.type >= e1000_pch_spt)
4270                        hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4271                            >> 16;
4272                else
4273                        hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4274
4275                hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4276                hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4277
4278                /* In SPT, This register is in Lan memory space,
4279                 * not flash.  Therefore, only 32 bit access is
4280                 * supported
4281                 */
4282                if (hw->mac.type >= e1000_pch_spt)
4283                        ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4284                else
4285                        ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4286
4287                ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4288
4289                ew32flash(ICH_FLASH_FDATA0, data);
4290
4291                /* check if FCERR is set to 1 , if set to 1, clear it
4292                 * and try the whole sequence a few more times else done
4293                 */
4294                ret_val =
4295                   e1000_flash_cycle_ich8lan(hw,
4296                                             ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4297
4298                if (!ret_val)
4299                        break;
4300
4301                /* If we're here, then things are most likely
4302                 * completely hosed, but if the error condition
4303                 * is detected, it won't hurt to give it another
4304                 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4305                 */
4306                hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4307
4308                if (hsfsts.hsf_status.flcerr)
4309                        /* Repeat for some time before giving up. */
4310                        continue;
4311                if (!hsfsts.hsf_status.flcdone) {
4312                        e_dbg("Timeout error - flash cycle did not complete.\n");
4313                        break;
4314                }
4315        } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4316
4317        return ret_val;
4318}
4319
4320/**
4321 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4322 *  @hw: pointer to the HW structure
4323 *  @offset: The index of the byte to read.
4324 *  @data: The byte to write to the NVM.
4325 *
4326 *  Writes a single byte to the NVM using the flash access registers.
4327 **/
4328static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4329                                          u8 data)
4330{
4331        u16 word = (u16)data;
4332
4333        return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4334}
4335
4336/**
4337*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4338*  @hw: pointer to the HW structure
4339*  @offset: The offset of the word to write.
4340*  @dword: The dword to write to the NVM.
4341*
4342*  Writes a single dword to the NVM using the flash access registers.
4343*  Goes through a retry algorithm before giving up.
4344**/
4345static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4346                                                 u32 offset, u32 dword)
4347{
4348        s32 ret_val;
4349        u16 program_retries;
4350
4351        /* Must convert word offset into bytes. */
4352        offset <<= 1;
4353        ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4354
4355        if (!ret_val)
4356                return ret_val;
4357        for (program_retries = 0; program_retries < 100; program_retries++) {
4358                e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4359                usleep_range(100, 200);
4360                ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4361                if (!ret_val)
4362                        break;
4363        }
4364        if (program_retries == 100)
4365                return -E1000_ERR_NVM;
4366
4367        return 0;
4368}
4369
4370/**
4371 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4372 *  @hw: pointer to the HW structure
4373 *  @offset: The offset of the byte to write.
4374 *  @byte: The byte to write to the NVM.
4375 *
4376 *  Writes a single byte to the NVM using the flash access registers.
4377 *  Goes through a retry algorithm before giving up.
4378 **/
4379static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4380                                                u32 offset, u8 byte)
4381{
4382        s32 ret_val;
4383        u16 program_retries;
4384
4385        ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4386        if (!ret_val)
4387                return ret_val;
4388
4389        for (program_retries = 0; program_retries < 100; program_retries++) {
4390                e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
4391                usleep_range(100, 200);
4392                ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4393                if (!ret_val)
4394                        break;
4395        }
4396        if (program_retries == 100)
4397                return -E1000_ERR_NVM;
4398
4399        return 0;
4400}
4401
4402/**
4403 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4404 *  @hw: pointer to the HW structure
4405 *  @bank: 0 for first bank, 1 for second bank, etc.
4406 *
4407 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4408 *  bank N is 4096 * N + flash_reg_addr.
4409 **/
4410static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4411{
4412        struct e1000_nvm_info *nvm = &hw->nvm;
4413        union ich8_hws_flash_status hsfsts;
4414        union ich8_hws_flash_ctrl hsflctl;
4415        u32 flash_linear_addr;
4416        /* bank size is in 16bit words - adjust to bytes */
4417        u32 flash_bank_size = nvm->flash_bank_size * 2;
4418        s32 ret_val;
4419        s32 count = 0;
4420        s32 j, iteration, sector_size;
4421
4422        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4423
4424        /* Determine HW Sector size: Read BERASE bits of hw flash status
4425         * register
4426         * 00: The Hw sector is 256 bytes, hence we need to erase 16
4427         *     consecutive sectors.  The start index for the nth Hw sector
4428         *     can be calculated as = bank * 4096 + n * 256
4429         * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4430         *     The start index for the nth Hw sector can be calculated
4431         *     as = bank * 4096
4432         * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4433         *     (ich9 only, otherwise error condition)
4434         * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4435         */
4436        switch (hsfsts.hsf_status.berasesz) {
4437        case 0:
4438                /* Hw sector size 256 */
4439                sector_size = ICH_FLASH_SEG_SIZE_256;
4440                iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4441                break;
4442        case 1:
4443                sector_size = ICH_FLASH_SEG_SIZE_4K;
4444                iteration = 1;
4445                break;
4446        case 2:
4447                sector_size = ICH_FLASH_SEG_SIZE_8K;
4448                iteration = 1;
4449                break;
4450        case 3:
4451                sector_size = ICH_FLASH_SEG_SIZE_64K;
4452                iteration = 1;
4453                break;
4454        default:
4455                return -E1000_ERR_NVM;
4456        }
4457
4458        /* Start with the base address, then add the sector offset. */
4459        flash_linear_addr = hw->nvm.flash_base_addr;
4460        flash_linear_addr += (bank) ? flash_bank_size : 0;
4461
4462        for (j = 0; j < iteration; j++) {
4463                do {
4464                        u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4465
4466                        /* Steps */
4467                        ret_val = e1000_flash_cycle_init_ich8lan(hw);
4468                        if (ret_val)
4469                                return ret_val;
4470
4471                        /* Write a value 11 (block Erase) in Flash
4472                         * Cycle field in hw flash control
4473                         */
4474                        if (hw->mac.type >= e1000_pch_spt)
4475                                hsflctl.regval =
4476                                    er32flash(ICH_FLASH_HSFSTS) >> 16;
4477                        else
4478                                hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4479
4480                        hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4481                        if (hw->mac.type >= e1000_pch_spt)
4482                                ew32flash(ICH_FLASH_HSFSTS,
4483                                          hsflctl.regval << 16);
4484                        else
4485                                ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4486
4487                        /* Write the last 24 bits of an index within the
4488                         * block into Flash Linear address field in Flash
4489                         * Address.
4490                         */
4491                        flash_linear_addr += (j * sector_size);
4492                        ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4493
4494                        ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4495                        if (!ret_val)
4496                                break;
4497
4498                        /* Check if FCERR is set to 1.  If 1,
4499                         * clear it and try the whole sequence
4500                         * a few more times else Done
4501                         */
4502                        hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4503                        if (hsfsts.hsf_status.flcerr)
4504                                /* repeat for some time before giving up */
4505                                continue;
4506                        else if (!hsfsts.hsf_status.flcdone)
4507                                return ret_val;
4508                } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4509        }
4510
4511        return 0;
4512}
4513
4514/**
4515 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4516 *  @hw: pointer to the HW structure
4517 *  @data: Pointer to the LED settings
4518 *
4519 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4520 *  settings is all 0's or F's, set the LED default to a valid LED default
4521 *  setting.
4522 **/
4523static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4524{
4525        s32 ret_val;
4526
4527        ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
4528        if (ret_val) {
4529                e_dbg("NVM Read Error\n");
4530                return ret_val;
4531        }
4532
4533        if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4534                *data = ID_LED_DEFAULT_ICH8LAN;
4535
4536        return 0;
4537}
4538
4539/**
4540 *  e1000_id_led_init_pchlan - store LED configurations
4541 *  @hw: pointer to the HW structure
4542 *
4543 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4544 *  the PHY LED configuration register.
4545 *
4546 *  PCH also does not have an "always on" or "always off" mode which
4547 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4548 *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
4549 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4550 *  link based on logic in e1000_led_[on|off]_pchlan().
4551 **/
4552static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4553{
4554        struct e1000_mac_info *mac = &hw->mac;
4555        s32 ret_val;
4556        const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4557        const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4558        u16 data, i, temp, shift;
4559
4560        /* Get default ID LED modes */
4561        ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4562        if (ret_val)
4563                return ret_val;
4564
4565        mac->ledctl_default = er32(LEDCTL);
4566        mac->ledctl_mode1 = mac->ledctl_default;
4567        mac->ledctl_mode2 = mac->ledctl_default;
4568
4569        for (i = 0; i < 4; i++) {
4570                temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4571                shift = (i * 5);
4572                switch (temp) {
4573                case ID_LED_ON1_DEF2:
4574                case ID_LED_ON1_ON2:
4575                case ID_LED_ON1_OFF2:
4576                        mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4577                        mac->ledctl_mode1 |= (ledctl_on << shift);
4578                        break;
4579                case ID_LED_OFF1_DEF2:
4580                case ID_LED_OFF1_ON2:
4581                case ID_LED_OFF1_OFF2:
4582                        mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4583                        mac->ledctl_mode1 |= (ledctl_off << shift);
4584                        break;
4585                default:
4586                        /* Do nothing */
4587                        break;
4588                }
4589                switch (temp) {
4590                case ID_LED_DEF1_ON2:
4591                case ID_LED_ON1_ON2:
4592                case ID_LED_OFF1_ON2:
4593                        mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4594                        mac->ledctl_mode2 |= (ledctl_on << shift);
4595                        break;
4596                case ID_LED_DEF1_OFF2:
4597                case ID_LED_ON1_OFF2:
4598                case ID_LED_OFF1_OFF2:
4599                        mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4600                        mac->ledctl_mode2 |= (ledctl_off << shift);
4601                        break;
4602                default:
4603                        /* Do nothing */
4604                        break;
4605                }
4606        }
4607
4608        return 0;
4609}
4610
4611/**
4612 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4613 *  @hw: pointer to the HW structure
4614 *
4615 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4616 *  register, so the the bus width is hard coded.
4617 **/
4618static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4619{
4620        struct e1000_bus_info *bus = &hw->bus;
4621        s32 ret_val;
4622
4623        ret_val = e1000e_get_bus_info_pcie(hw);
4624
4625        /* ICH devices are "PCI Express"-ish.  They have
4626         * a configuration space, but do not contain
4627         * PCI Express Capability registers, so bus width
4628         * must be hardcoded.
4629         */
4630        if (bus->width == e1000_bus_width_unknown)
4631                bus->width = e1000_bus_width_pcie_x1;
4632
4633        return ret_val;
4634}
4635
4636/**
4637 *  e1000_reset_hw_ich8lan - Reset the hardware
4638 *  @hw: pointer to the HW structure
4639 *
4640 *  Does a full reset of the hardware which includes a reset of the PHY and
4641 *  MAC.
4642 **/
4643static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4644{
4645        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4646        u16 kum_cfg;
4647        u32 ctrl, reg;
4648        s32 ret_val;
4649
4650        /* Prevent the PCI-E bus from sticking if there is no TLP connection
4651         * on the last TLP read/write transaction when MAC is reset.
4652         */
4653        ret_val = e1000e_disable_pcie_master(hw);
4654        if (ret_val)
4655                e_dbg("PCI-E Master disable polling has failed.\n");
4656
4657        e_dbg("Masking off all interrupts\n");
4658        ew32(IMC, 0xffffffff);
4659
4660        /* Disable the Transmit and Receive units.  Then delay to allow
4661         * any pending transactions to complete before we hit the MAC
4662         * with the global reset.
4663         */
4664        ew32(RCTL, 0);
4665        ew32(TCTL, E1000_TCTL_PSP);
4666        e1e_flush();
4667
4668        usleep_range(10000, 20000);
4669
4670        /* Workaround for ICH8 bit corruption issue in FIFO memory */
4671        if (hw->mac.type == e1000_ich8lan) {
4672                /* Set Tx and Rx buffer allocation to 8k apiece. */
4673                ew32(PBA, E1000_PBA_8K);
4674                /* Set Packet Buffer Size to 16k. */
4675                ew32(PBS, E1000_PBS_16K);
4676        }
4677
4678        if (hw->mac.type == e1000_pchlan) {
4679                /* Save the NVM K1 bit setting */
4680                ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4681                if (ret_val)
4682                        return ret_val;
4683
4684                if (kum_cfg & E1000_NVM_K1_ENABLE)
4685                        dev_spec->nvm_k1_enabled = true;
4686                else
4687                        dev_spec->nvm_k1_enabled = false;
4688        }
4689
4690        ctrl = er32(CTRL);
4691
4692        if (!hw->phy.ops.check_reset_block(hw)) {
4693                /* Full-chip reset requires MAC and PHY reset at the same
4694                 * time to make sure the interface between MAC and the
4695                 * external PHY is reset.
4696                 */
4697                ctrl |= E1000_CTRL_PHY_RST;
4698
4699                /* Gate automatic PHY configuration by hardware on
4700                 * non-managed 82579
4701                 */
4702                if ((hw->mac.type == e1000_pch2lan) &&
4703                    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
4704                        e1000_gate_hw_phy_config_ich8lan(hw, true);
4705        }
4706        ret_val = e1000_acquire_swflag_ich8lan(hw);
4707        e_dbg("Issuing a global reset to ich8lan\n");
4708        ew32(CTRL, (ctrl | E1000_CTRL_RST));
4709        /* cannot issue a flush here because it hangs the hardware */
4710        msleep(20);
4711
4712        /* Set Phy Config Counter to 50msec */
4713        if (hw->mac.type == e1000_pch2lan) {
4714                reg = er32(FEXTNVM3);
4715                reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4716                reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4717                ew32(FEXTNVM3, reg);
4718        }
4719
4720        if (!ret_val)
4721                clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4722
4723        if (ctrl & E1000_CTRL_PHY_RST) {
4724                ret_val = hw->phy.ops.get_cfg_done(hw);
4725                if (ret_val)
4726                        return ret_val;
4727
4728                ret_val = e1000_post_phy_reset_ich8lan(hw);
4729                if (ret_val)
4730                        return ret_val;
4731        }
4732
4733        /* For PCH, this write will make sure that any noise
4734         * will be detected as a CRC error and be dropped rather than show up
4735         * as a bad packet to the DMA engine.
4736         */
4737        if (hw->mac.type == e1000_pchlan)
4738                ew32(CRC_OFFSET, 0x65656565);
4739
4740        ew32(IMC, 0xffffffff);
4741        er32(ICR);
4742
4743        reg = er32(KABGTXD);
4744        reg |= E1000_KABGTXD_BGSQLBIAS;
4745        ew32(KABGTXD, reg);
4746
4747        return 0;
4748}
4749
4750/**
4751 *  e1000_init_hw_ich8lan - Initialize the hardware
4752 *  @hw: pointer to the HW structure
4753 *
4754 *  Prepares the hardware for transmit and receive by doing the following:
4755 *   - initialize hardware bits
4756 *   - initialize LED identification
4757 *   - setup receive address registers
4758 *   - setup flow control
4759 *   - setup transmit descriptors
4760 *   - clear statistics
4761 **/
4762static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4763{
4764        struct e1000_mac_info *mac = &hw->mac;
4765        u32 ctrl_ext, txdctl, snoop;
4766        s32 ret_val;
4767        u16 i;
4768
4769        e1000_initialize_hw_bits_ich8lan(hw);
4770
4771        /* Initialize identification LED */
4772        ret_val = mac->ops.id_led_init(hw);
4773        /* An error is not fatal and we should not stop init due to this */
4774        if (ret_val)
4775                e_dbg("Error initializing identification LED\n");
4776
4777        /* Setup the receive address. */
4778        e1000e_init_rx_addrs(hw, mac->rar_entry_count);
4779
4780        /* Zero out the Multicast HASH table */
4781        e_dbg("Zeroing the MTA\n");
4782        for (i = 0; i < mac->mta_reg_count; i++)
4783                E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4784
4785        /* The 82578 Rx buffer will stall if wakeup is enabled in host and
4786         * the ME.  Disable wakeup by clearing the host wakeup bit.
4787         * Reset the phy after disabling host wakeup to reset the Rx buffer.
4788         */
4789        if (hw->phy.type == e1000_phy_82578) {
4790                e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
4791                i &= ~BM_WUC_HOST_WU_BIT;
4792                e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4793                ret_val = e1000_phy_hw_reset_ich8lan(hw);
4794                if (ret_val)
4795                        return ret_val;
4796        }
4797
4798        /* Setup link and flow control */
4799        ret_val = mac->ops.setup_link(hw);
4800
4801        /* Set the transmit descriptor write-back policy for both queues */
4802        txdctl = er32(TXDCTL(0));
4803        txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4804                  E1000_TXDCTL_FULL_TX_DESC_WB);
4805        txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4806                  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4807        ew32(TXDCTL(0), txdctl);
4808        txdctl = er32(TXDCTL(1));
4809        txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4810                  E1000_TXDCTL_FULL_TX_DESC_WB);
4811        txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4812                  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4813        ew32(TXDCTL(1), txdctl);
4814
4815        /* ICH8 has opposite polarity of no_snoop bits.
4816         * By default, we should use snoop behavior.
4817         */
4818        if (mac->type == e1000_ich8lan)
4819                snoop = PCIE_ICH8_SNOOP_ALL;
4820        else
4821                snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4822        e1000e_set_pcie_no_snoop(hw, snoop);
4823
4824        ctrl_ext = er32(CTRL_EXT);
4825        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4826        ew32(CTRL_EXT, ctrl_ext);
4827
4828        /* Clear all of the statistics registers (clear on read).  It is
4829         * important that we do this after we have tried to establish link
4830         * because the symbol error count will increment wildly if there
4831         * is no link.
4832         */
4833        e1000_clear_hw_cntrs_ich8lan(hw);
4834
4835        return ret_val;
4836}
4837
4838/**
4839 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4840 *  @hw: pointer to the HW structure
4841 *
4842 *  Sets/Clears required hardware bits necessary for correctly setting up the
4843 *  hardware for transmit and receive.
4844 **/
4845static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4846{
4847        u32 reg;
4848
4849        /* Extended Device Control */
4850        reg = er32(CTRL_EXT);
4851        reg |= BIT(22);
4852        /* Enable PHY low-power state when MAC is at D3 w/o WoL */
4853        if (hw->mac.type >= e1000_pchlan)
4854                reg |= E1000_CTRL_EXT_PHYPDEN;
4855        ew32(CTRL_EXT, reg);
4856
4857        /* Transmit Descriptor Control 0 */
4858        reg = er32(TXDCTL(0));
4859        reg |= BIT(22);
4860        ew32(TXDCTL(0), reg);
4861
4862        /* Transmit Descriptor Control 1 */
4863        reg = er32(TXDCTL(1));
4864        reg |= BIT(22);
4865        ew32(TXDCTL(1), reg);
4866
4867        /* Transmit Arbitration Control 0 */
4868        reg = er32(TARC(0));
4869        if (hw->mac.type == e1000_ich8lan)
4870                reg |= BIT(28) | BIT(29);
4871        reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
4872        ew32(TARC(0), reg);
4873
4874        /* Transmit Arbitration Control 1 */
4875        reg = er32(TARC(1));
4876        if (er32(TCTL) & E1000_TCTL_MULR)
4877                reg &= ~BIT(28);
4878        else
4879                reg |= BIT(28);
4880        reg |= BIT(24) | BIT(26) | BIT(30);
4881        ew32(TARC(1), reg);
4882
4883        /* Device Status */
4884        if (hw->mac.type == e1000_ich8lan) {
4885                reg = er32(STATUS);
4886                reg &= ~BIT(31);
4887                ew32(STATUS, reg);
4888        }
4889
4890        /* work-around descriptor data corruption issue during nfs v2 udp
4891         * traffic, just disable the nfs filtering capability
4892         */
4893        reg = er32(RFCTL);
4894        reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4895
4896        /* Disable IPv6 extension header parsing because some malformed
4897         * IPv6 headers can hang the Rx.
4898         */
4899        if (hw->mac.type == e1000_ich8lan)
4900                reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4901        ew32(RFCTL, reg);
4902
4903        /* Enable ECC on Lynxpoint */
4904        if (hw->mac.type >= e1000_pch_lpt) {
4905                reg = er32(PBECCSTS);
4906                reg |= E1000_PBECCSTS_ECC_ENABLE;
4907                ew32(PBECCSTS, reg);
4908
4909                reg = er32(CTRL);
4910                reg |= E1000_CTRL_MEHE;
4911                ew32(CTRL, reg);
4912        }
4913}
4914
4915/**
4916 *  e1000_setup_link_ich8lan - Setup flow control and link settings
4917 *  @hw: pointer to the HW structure
4918 *
4919 *  Determines which flow control settings to use, then configures flow
4920 *  control.  Calls the appropriate media-specific link configuration
4921 *  function.  Assuming the adapter has a valid link partner, a valid link
4922 *  should be established.  Assumes the hardware has previously been reset
4923 *  and the transmitter and receiver are not enabled.
4924 **/
4925static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4926{
4927        s32 ret_val;
4928
4929        if (hw->phy.ops.check_reset_block(hw))
4930                return 0;
4931
4932        /* ICH parts do not have a word in the NVM to determine
4933         * the default flow control setting, so we explicitly
4934         * set it to full.
4935         */
4936        if (hw->fc.requested_mode == e1000_fc_default) {
4937                /* Workaround h/w hang when Tx flow control enabled */
4938                if (hw->mac.type == e1000_pchlan)
4939                        hw->fc.requested_mode = e1000_fc_rx_pause;
4940                else
4941                        hw->fc.requested_mode = e1000_fc_full;
4942        }
4943
4944        /* Save off the requested flow control mode for use later.  Depending
4945         * on the link partner's capabilities, we may or may not use this mode.
4946         */
4947        hw->fc.current_mode = hw->fc.requested_mode;
4948
4949        e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
4950
4951        /* Continue to configure the copper link. */
4952        ret_val = hw->mac.ops.setup_physical_interface(hw);
4953        if (ret_val)
4954                return ret_val;
4955
4956        ew32(FCTTV, hw->fc.pause_time);
4957        if ((hw->phy.type == e1000_phy_82578) ||
4958            (hw->phy.type == e1000_phy_82579) ||
4959            (hw->phy.type == e1000_phy_i217) ||
4960            (hw->phy.type == e1000_phy_82577)) {
4961                ew32(FCRTV_PCH, hw->fc.refresh_time);
4962
4963                ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
4964                                   hw->fc.pause_time);
4965                if (ret_val)
4966                        return ret_val;
4967        }
4968
4969        return e1000e_set_fc_watermarks(hw);
4970}
4971
4972/**
4973 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4974 *  @hw: pointer to the HW structure
4975 *
4976 *  Configures the kumeran interface to the PHY to wait the appropriate time
4977 *  when polling the PHY, then call the generic setup_copper_link to finish
4978 *  configuring the copper link.
4979 **/
4980static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4981{
4982        u32 ctrl;
4983        s32 ret_val;
4984        u16 reg_data;
4985
4986        ctrl = er32(CTRL);
4987        ctrl |= E1000_CTRL_SLU;
4988        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4989        ew32(CTRL, ctrl);
4990
4991        /* Set the mac to wait the maximum time between each iteration
4992         * and increase the max iterations when polling the phy;
4993         * this fixes erroneous timeouts at 10Mbps.
4994         */
4995        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
4996        if (ret_val)
4997                return ret_val;
4998        ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
4999                                       &reg_data);
5000        if (ret_val)
5001                return ret_val;
5002        reg_data |= 0x3F;
5003        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
5004                                        reg_data);
5005        if (ret_val)
5006                return ret_val;
5007
5008        switch (hw->phy.type) {
5009        case e1000_phy_igp_3:
5010                ret_val = e1000e_copper_link_setup_igp(hw);
5011                if (ret_val)
5012                        return ret_val;
5013                break;
5014        case e1000_phy_bm:
5015        case e1000_phy_82578:
5016                ret_val = e1000e_copper_link_setup_m88(hw);
5017                if (ret_val)
5018                        return ret_val;
5019                break;
5020        case e1000_phy_82577:
5021        case e1000_phy_82579:
5022                ret_val = e1000_copper_link_setup_82577(hw);
5023                if (ret_val)
5024                        return ret_val;
5025                break;
5026        case e1000_phy_ife:
5027                ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
5028                if (ret_val)
5029                        return ret_val;
5030
5031                reg_data &= ~IFE_PMC_AUTO_MDIX;
5032
5033                switch (hw->phy.mdix) {
5034                case 1:
5035                        reg_data &= ~IFE_PMC_FORCE_MDIX;
5036                        break;
5037                case 2:
5038                        reg_data |= IFE_PMC_FORCE_MDIX;
5039                        break;
5040                case 0:
5041                default:
5042                        reg_data |= IFE_PMC_AUTO_MDIX;
5043                        break;
5044                }
5045                ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
5046                if (ret_val)
5047                        return ret_val;
5048                break;
5049        default:
5050                break;
5051        }
5052
5053        return e1000e_setup_copper_link(hw);
5054}
5055
5056/**
5057 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5058 *  @hw: pointer to the HW structure
5059 *
5060 *  Calls the PHY specific link setup function and then calls the
5061 *  generic setup_copper_link to finish configuring the link for
5062 *  Lynxpoint PCH devices
5063 **/
5064static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5065{
5066        u32 ctrl;
5067        s32 ret_val;
5068
5069        ctrl = er32(CTRL);
5070        ctrl |= E1000_CTRL_SLU;
5071        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5072        ew32(CTRL, ctrl);
5073
5074        ret_val = e1000_copper_link_setup_82577(hw);
5075        if (ret_val)
5076                return ret_val;
5077
5078        return e1000e_setup_copper_link(hw);
5079}
5080
5081/**
5082 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5083 *  @hw: pointer to the HW structure
5084 *  @speed: pointer to store current link speed
5085 *  @duplex: pointer to store the current link duplex
5086 *
5087 *  Calls the generic get_speed_and_duplex to retrieve the current link
5088 *  information and then calls the Kumeran lock loss workaround for links at
5089 *  gigabit speeds.
5090 **/
5091static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5092                                          u16 *duplex)
5093{
5094        s32 ret_val;
5095
5096        ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
5097        if (ret_val)
5098                return ret_val;
5099
5100        if ((hw->mac.type == e1000_ich8lan) &&
5101            (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
5102                ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5103        }
5104
5105        return ret_val;
5106}
5107
5108/**
5109 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5110 *  @hw: pointer to the HW structure
5111 *
5112 *  Work-around for 82566 Kumeran PCS lock loss:
5113 *  On link status change (i.e. PCI reset, speed change) and link is up and
5114 *  speed is gigabit-
5115 *    0) if workaround is optionally disabled do nothing
5116 *    1) wait 1ms for Kumeran link to come up
5117 *    2) check Kumeran Diagnostic register PCS lock loss bit
5118 *    3) if not set the link is locked (all is good), otherwise...
5119 *    4) reset the PHY
5120 *    5) repeat up to 10 times
5121 *  Note: this is only called for IGP3 copper when speed is 1gb.
5122 **/
5123static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5124{
5125        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5126        u32 phy_ctrl;
5127        s32 ret_val;
5128        u16 i, data;
5129        bool link;
5130
5131        if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5132                return 0;
5133
5134        /* Make sure link is up before proceeding.  If not just return.
5135         * Attempting this while link is negotiating fouled up link
5136         * stability
5137         */
5138        ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
5139        if (!link)
5140                return 0;
5141
5142        for (i = 0; i < 10; i++) {
5143                /* read once to clear */
5144                ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5145                if (ret_val)
5146                        return ret_val;
5147                /* and again to get new status */
5148                ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5149                if (ret_val)
5150                        return ret_val;
5151
5152                /* check for PCS lock */
5153                if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5154                        return 0;
5155
5156                /* Issue PHY reset */
5157                e1000_phy_hw_reset(hw);
5158                mdelay(5);
5159        }
5160        /* Disable GigE link negotiation */
5161        phy_ctrl = er32(PHY_CTRL);
5162        phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5163                     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5164        ew32(PHY_CTRL, phy_ctrl);
5165
5166        /* Call gig speed drop workaround on Gig disable before accessing
5167         * any PHY registers
5168         */
5169        e1000e_gig_downshift_workaround_ich8lan(hw);
5170
5171        /* unable to acquire PCS lock */
5172        return -E1000_ERR_PHY;
5173}
5174
5175/**
5176 *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5177 *  @hw: pointer to the HW structure
5178 *  @state: boolean value used to set the current Kumeran workaround state
5179 *
5180 *  If ICH8, set the current Kumeran workaround state (enabled - true
5181 *  /disabled - false).
5182 **/
5183void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5184                                                  bool state)
5185{
5186        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5187
5188        if (hw->mac.type != e1000_ich8lan) {
5189                e_dbg("Workaround applies to ICH8 only.\n");
5190                return;
5191        }
5192
5193        dev_spec->kmrn_lock_loss_workaround_enabled = state;
5194}
5195
5196/**
5197 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5198 *  @hw: pointer to the HW structure
5199 *
5200 *  Workaround for 82566 power-down on D3 entry:
5201 *    1) disable gigabit link
5202 *    2) write VR power-down enable
5203 *    3) read it back
5204 *  Continue if successful, else issue LCD reset and repeat
5205 **/
5206void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5207{
5208        u32 reg;
5209        u16 data;
5210        u8 retry = 0;
5211
5212        if (hw->phy.type != e1000_phy_igp_3)
5213                return;
5214
5215        /* Try the workaround twice (if needed) */
5216        do {
5217                /* Disable link */
5218                reg = er32(PHY_CTRL);
5219                reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5220                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5221                ew32(PHY_CTRL, reg);
5222
5223                /* Call gig speed drop workaround on Gig disable before
5224                 * accessing any PHY registers
5225                 */
5226                if (hw->mac.type == e1000_ich8lan)
5227                        e1000e_gig_downshift_workaround_ich8lan(hw);
5228
5229                /* Write VR power-down enable */
5230                e1e_rphy(hw, IGP3_VR_CTRL, &data);
5231                data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5232                e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5233
5234                /* Read it back and test */
5235                e1e_rphy(hw, IGP3_VR_CTRL, &data);
5236                data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5237                if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5238                        break;
5239
5240                /* Issue PHY reset and repeat at most one more time */
5241                reg = er32(CTRL);
5242                ew32(CTRL, reg | E1000_CTRL_PHY_RST);
5243                retry++;
5244        } while (retry);
5245}
5246
5247/**
5248 *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5249 *  @hw: pointer to the HW structure
5250 *
5251 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5252 *  LPLU, Gig disable, MDIC PHY reset):
5253 *    1) Set Kumeran Near-end loopback
5254 *    2) Clear Kumeran Near-end loopback
5255 *  Should only be called for ICH8[m] devices with any 1G Phy.
5256 **/
5257void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5258{
5259        s32 ret_val;
5260        u16 reg_data;
5261
5262        if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
5263                return;
5264
5265        ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5266                                       &reg_data);
5267        if (ret_val)
5268                return;
5269        reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5270        ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5271                                        reg_data);
5272        if (ret_val)
5273                return;
5274        reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5275        e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
5276}
5277
5278/**
5279 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5280 *  @hw: pointer to the HW structure
5281 *
5282 *  During S0 to Sx transition, it is possible the link remains at gig
5283 *  instead of negotiating to a lower speed.  Before going to Sx, set
5284 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5285 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5286 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5287 *  needs to be written.
5288 *  Parts that support (and are linked to a partner which support) EEE in
5289 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5290 *  than 10Mbps w/o EEE.
5291 **/
5292void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5293{
5294        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5295        u32 phy_ctrl;
5296        s32 ret_val;
5297
5298        phy_ctrl = er32(PHY_CTRL);
5299        phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5300
5301        if (hw->phy.type == e1000_phy_i217) {
5302                u16 phy_reg, device_id = hw->adapter->pdev->device;
5303
5304                if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5305                    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5306                    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5307                    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5308                    (hw->mac.type >= e1000_pch_spt)) {
5309                        u32 fextnvm6 = er32(FEXTNVM6);
5310
5311                        ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5312                }
5313
5314                ret_val = hw->phy.ops.acquire(hw);
5315                if (ret_val)
5316                        goto out;
5317
5318                if (!dev_spec->eee_disable) {
5319                        u16 eee_advert;
5320
5321                        ret_val =
5322                            e1000_read_emi_reg_locked(hw,
5323                                                      I217_EEE_ADVERTISEMENT,
5324                                                      &eee_advert);
5325                        if (ret_val)
5326                                goto release;
5327
5328                        /* Disable LPLU if both link partners support 100BaseT
5329                         * EEE and 100Full is advertised on both ends of the
5330                         * link, and enable Auto Enable LPI since there will
5331                         * be no driver to enable LPI while in Sx.
5332                         */
5333                        if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5334                            (dev_spec->eee_lp_ability &
5335                             I82579_EEE_100_SUPPORTED) &&
5336                            (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5337                                phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5338                                              E1000_PHY_CTRL_NOND0A_LPLU);
5339
5340                                /* Set Auto Enable LPI after link up */
5341                                e1e_rphy_locked(hw,
5342                                                I217_LPI_GPIO_CTRL, &phy_reg);
5343                                phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5344                                e1e_wphy_locked(hw,
5345                                                I217_LPI_GPIO_CTRL, phy_reg);
5346                        }
5347                }
5348
5349                /* For i217 Intel Rapid Start Technology support,
5350                 * when the system is going into Sx and no manageability engine
5351                 * is present, the driver must configure proxy to reset only on
5352                 * power good.  LPI (Low Power Idle) state must also reset only
5353                 * on power good, as well as the MTA (Multicast table array).
5354                 * The SMBus release must also be disabled on LCD reset.
5355                 */
5356                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5357                        /* Enable proxy to reset only on power good. */
5358                        e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
5359                        phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5360                        e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
5361
5362                        /* Set bit enable LPI (EEE) to reset only on
5363                         * power good.
5364                         */
5365                        e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
5366                        phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5367                        e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
5368
5369                        /* Disable the SMB release on LCD reset. */
5370                        e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5371                        phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5372                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5373                }
5374
5375                /* Enable MTA to reset for Intel Rapid Start Technology
5376                 * Support
5377                 */
5378                e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5379                phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5380                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5381
5382release:
5383                hw->phy.ops.release(hw);
5384        }
5385out:
5386        ew32(PHY_CTRL, phy_ctrl);
5387
5388        if (hw->mac.type == e1000_ich8lan)
5389                e1000e_gig_downshift_workaround_ich8lan(hw);
5390
5391        if (hw->mac.type >= e1000_pchlan) {
5392                e1000_oem_bits_config_ich8lan(hw, false);
5393
5394                /* Reset PHY to activate OEM bits on 82577/8 */
5395                if (hw->mac.type == e1000_pchlan)
5396                        e1000e_phy_hw_reset_generic(hw);
5397
5398                ret_val = hw->phy.ops.acquire(hw);
5399                if (ret_val)
5400                        return;
5401                e1000_write_smbus_addr(hw);
5402                hw->phy.ops.release(hw);
5403        }
5404}
5405
5406/**
5407 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5408 *  @hw: pointer to the HW structure
5409 *
5410 *  During Sx to S0 transitions on non-managed devices or managed devices
5411 *  on which PHY resets are not blocked, if the PHY registers cannot be
5412 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5413 *  the PHY.
5414 *  On i217, setup Intel Rapid Start Technology.
5415 **/
5416void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5417{
5418        s32 ret_val;
5419
5420        if (hw->mac.type < e1000_pch2lan)
5421                return;
5422
5423        ret_val = e1000_init_phy_workarounds_pchlan(hw);
5424        if (ret_val) {
5425                e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
5426                return;
5427        }
5428
5429        /* For i217 Intel Rapid Start Technology support when the system
5430         * is transitioning from Sx and no manageability engine is present
5431         * configure SMBus to restore on reset, disable proxy, and enable
5432         * the reset on MTA (Multicast table array).
5433         */
5434        if (hw->phy.type == e1000_phy_i217) {
5435                u16 phy_reg;
5436
5437                ret_val = hw->phy.ops.acquire(hw);
5438                if (ret_val) {
5439                        e_dbg("Failed to setup iRST\n");
5440                        return;
5441                }
5442
5443                /* Clear Auto Enable LPI after link up */
5444                e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5445                phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5446                e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5447
5448                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5449                        /* Restore clear on SMB if no manageability engine
5450                         * is present
5451                         */
5452                        ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5453                        if (ret_val)
5454                                goto release;
5455                        phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5456                        e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5457
5458                        /* Disable Proxy */
5459                        e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
5460                }
5461                /* Enable reset on MTA */
5462                ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5463                if (ret_val)
5464                        goto release;
5465                phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5466                e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5467release:
5468                if (ret_val)
5469                        e_dbg("Error %d in resume workarounds\n", ret_val);
5470                hw->phy.ops.release(hw);
5471        }
5472}
5473
5474/**
5475 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5476 *  @hw: pointer to the HW structure
5477 *
5478 *  Return the LED back to the default configuration.
5479 **/
5480static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5481{
5482        if (hw->phy.type == e1000_phy_ife)
5483                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
5484
5485        ew32(LEDCTL, hw->mac.ledctl_default);
5486        return 0;
5487}
5488
5489/**
5490 *  e1000_led_on_ich8lan - Turn LEDs on
5491 *  @hw: pointer to the HW structure
5492 *
5493 *  Turn on the LEDs.
5494 **/
5495static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5496{
5497        if (hw->phy.type == e1000_phy_ife)
5498                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5499                                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5500
5501        ew32(LEDCTL, hw->mac.ledctl_mode2);
5502        return 0;
5503}
5504
5505/**
5506 *  e1000_led_off_ich8lan - Turn LEDs off
5507 *  @hw: pointer to the HW structure
5508 *
5509 *  Turn off the LEDs.
5510 **/
5511static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5512{
5513        if (hw->phy.type == e1000_phy_ife)
5514                return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5515                                (IFE_PSCL_PROBE_MODE |
5516                                 IFE_PSCL_PROBE_LEDS_OFF));
5517
5518        ew32(LEDCTL, hw->mac.ledctl_mode1);
5519        return 0;
5520}
5521
5522/**
5523 *  e1000_setup_led_pchlan - Configures SW controllable LED
5524 *  @hw: pointer to the HW structure
5525 *
5526 *  This prepares the SW controllable LED for use.
5527 **/
5528static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5529{
5530        return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
5531}
5532
5533/**
5534 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5535 *  @hw: pointer to the HW structure
5536 *
5537 *  Return the LED back to the default configuration.
5538 **/
5539static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5540{
5541        return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
5542}
5543
5544/**
5545 *  e1000_led_on_pchlan - Turn LEDs on
5546 *  @hw: pointer to the HW structure
5547 *
5548 *  Turn on the LEDs.
5549 **/
5550static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5551{
5552        u16 data = (u16)hw->mac.ledctl_mode2;
5553        u32 i, led;
5554
5555        /* If no link, then turn LED on by setting the invert bit
5556         * for each LED that's mode is "link_up" in ledctl_mode2.
5557         */
5558        if (!(er32(STATUS) & E1000_STATUS_LU)) {
5559                for (i = 0; i < 3; i++) {
5560                        led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5561                        if ((led & E1000_PHY_LED0_MODE_MASK) !=
5562                            E1000_LEDCTL_MODE_LINK_UP)
5563                                continue;
5564                        if (led & E1000_PHY_LED0_IVRT)
5565                                data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5566                        else
5567                                data |= (E1000_PHY_LED0_IVRT << (i * 5));
5568                }
5569        }
5570
5571        return e1e_wphy(hw, HV_LED_CONFIG, data);
5572}
5573
5574/**
5575 *  e1000_led_off_pchlan - Turn LEDs off
5576 *  @hw: pointer to the HW structure
5577 *
5578 *  Turn off the LEDs.
5579 **/
5580static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5581{
5582        u16 data = (u16)hw->mac.ledctl_mode1;
5583        u32 i, led;
5584
5585        /* If no link, then turn LED off by clearing the invert bit
5586         * for each LED that's mode is "link_up" in ledctl_mode1.
5587         */
5588        if (!(er32(STATUS) & E1000_STATUS_LU)) {
5589                for (i = 0; i < 3; i++) {
5590                        led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5591                        if ((led & E1000_PHY_LED0_MODE_MASK) !=
5592                            E1000_LEDCTL_MODE_LINK_UP)
5593                                continue;
5594                        if (led & E1000_PHY_LED0_IVRT)
5595                                data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5596                        else
5597                                data |= (E1000_PHY_LED0_IVRT << (i * 5));
5598                }
5599        }
5600
5601        return e1e_wphy(hw, HV_LED_CONFIG, data);
5602}
5603
5604/**
5605 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5606 *  @hw: pointer to the HW structure
5607 *
5608 *  Read appropriate register for the config done bit for completion status
5609 *  and configure the PHY through s/w for EEPROM-less parts.
5610 *
5611 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5612 *  config done bit, so only an error is logged and continues.  If we were
5613 *  to return with error, EEPROM-less silicon would not be able to be reset
5614 *  or change link.
5615 **/
5616static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5617{
5618        s32 ret_val = 0;
5619        u32 bank = 0;
5620        u32 status;
5621
5622        e1000e_get_cfg_done_generic(hw);
5623
5624        /* Wait for indication from h/w that it has completed basic config */
5625        if (hw->mac.type >= e1000_ich10lan) {
5626                e1000_lan_init_done_ich8lan(hw);
5627        } else {
5628                ret_val = e1000e_get_auto_rd_done(hw);
5629                if (ret_val) {
5630                        /* When auto config read does not complete, do not
5631                         * return with an error. This can happen in situations
5632                         * where there is no eeprom and prevents getting link.
5633                         */
5634                        e_dbg("Auto Read Done did not complete\n");
5635                        ret_val = 0;
5636                }
5637        }
5638
5639        /* Clear PHY Reset Asserted bit */
5640        status = er32(STATUS);
5641        if (status & E1000_STATUS_PHYRA)
5642                ew32(STATUS, status & ~E1000_STATUS_PHYRA);
5643        else
5644                e_dbg("PHY Reset Asserted not set - needs delay\n");
5645
5646        /* If EEPROM is not marked present, init the IGP 3 PHY manually */
5647        if (hw->mac.type <= e1000_ich9lan) {
5648                if (!(er32(EECD) & E1000_EECD_PRES) &&
5649                    (hw->phy.type == e1000_phy_igp_3)) {
5650                        e1000e_phy_init_script_igp3(hw);
5651                }
5652        } else {
5653                if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5654                        /* Maybe we should do a basic PHY config */
5655                        e_dbg("EEPROM not present\n");
5656                        ret_val = -E1000_ERR_CONFIG;
5657                }
5658        }
5659
5660        return ret_val;
5661}
5662
5663/**
5664 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5665 * @hw: pointer to the HW structure
5666 *
5667 * In the case of a PHY power down to save power, or to turn off link during a
5668 * driver unload, or wake on lan is not enabled, remove the link.
5669 **/
5670static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5671{
5672        /* If the management interface is not enabled, then power down */
5673        if (!(hw->mac.ops.check_mng_mode(hw) ||
5674              hw->phy.ops.check_reset_block(hw)))
5675                e1000_power_down_phy_copper(hw);
5676}
5677
5678/**
5679 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5680 *  @hw: pointer to the HW structure
5681 *
5682 *  Clears hardware counters specific to the silicon family and calls
5683 *  clear_hw_cntrs_generic to clear all general purpose counters.
5684 **/
5685static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5686{
5687        u16 phy_data;
5688        s32 ret_val;
5689
5690        e1000e_clear_hw_cntrs_base(hw);
5691
5692        er32(ALGNERRC);
5693        er32(RXERRC);
5694        er32(TNCRS);
5695        er32(CEXTERR);
5696        er32(TSCTC);
5697        er32(TSCTFC);
5698
5699        er32(MGTPRC);
5700        er32(MGTPDC);
5701        er32(MGTPTC);
5702
5703        er32(IAC);
5704        er32(ICRXOC);
5705
5706        /* Clear PHY statistics registers */
5707        if ((hw->phy.type == e1000_phy_82578) ||
5708            (hw->phy.type == e1000_phy_82579) ||
5709            (hw->phy.type == e1000_phy_i217) ||
5710            (hw->phy.type == e1000_phy_82577)) {
5711                ret_val = hw->phy.ops.acquire(hw);
5712                if (ret_val)
5713                        return;
5714                ret_val = hw->phy.ops.set_page(hw,
5715                                               HV_STATS_PAGE << IGP_PAGE_SHIFT);
5716                if (ret_val)
5717                        goto release;
5718                hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5719                hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5720                hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5721                hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5722                hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5723                hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5724                hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5725                hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5726                hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5727                hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5728                hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5729                hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5730                hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5731                hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5732release:
5733                hw->phy.ops.release(hw);
5734        }
5735}
5736
5737static const struct e1000_mac_operations ich8_mac_ops = {
5738        /* check_mng_mode dependent on mac type */
5739        .check_for_link         = e1000_check_for_copper_link_ich8lan,
5740        /* cleanup_led dependent on mac type */
5741        .clear_hw_cntrs         = e1000_clear_hw_cntrs_ich8lan,
5742        .get_bus_info           = e1000_get_bus_info_ich8lan,
5743        .set_lan_id             = e1000_set_lan_id_single_port,
5744        .get_link_up_info       = e1000_get_link_up_info_ich8lan,
5745        /* led_on dependent on mac type */
5746        /* led_off dependent on mac type */
5747        .update_mc_addr_list    = e1000e_update_mc_addr_list_generic,
5748        .reset_hw               = e1000_reset_hw_ich8lan,
5749        .init_hw                = e1000_init_hw_ich8lan,
5750        .setup_link             = e1000_setup_link_ich8lan,
5751        .setup_physical_interface = e1000_setup_copper_link_ich8lan,
5752        /* id_led_init dependent on mac type */
5753        .config_collision_dist  = e1000e_config_collision_dist_generic,
5754        .rar_set                = e1000e_rar_set_generic,
5755        .rar_get_count          = e1000e_rar_get_count_generic,
5756};
5757
5758static const struct e1000_phy_operations ich8_phy_ops = {
5759        .acquire                = e1000_acquire_swflag_ich8lan,
5760        .check_reset_block      = e1000_check_reset_block_ich8lan,
5761        .commit                 = NULL,
5762        .get_cfg_done           = e1000_get_cfg_done_ich8lan,
5763        .get_cable_length       = e1000e_get_cable_length_igp_2,
5764        .read_reg               = e1000e_read_phy_reg_igp,
5765        .release                = e1000_release_swflag_ich8lan,
5766        .reset                  = e1000_phy_hw_reset_ich8lan,
5767        .set_d0_lplu_state      = e1000_set_d0_lplu_state_ich8lan,
5768        .set_d3_lplu_state      = e1000_set_d3_lplu_state_ich8lan,
5769        .write_reg              = e1000e_write_phy_reg_igp,
5770};
5771
5772static const struct e1000_nvm_operations ich8_nvm_ops = {
5773        .acquire                = e1000_acquire_nvm_ich8lan,
5774        .read                   = e1000_read_nvm_ich8lan,
5775        .release                = e1000_release_nvm_ich8lan,
5776        .reload                 = e1000e_reload_nvm_generic,
5777        .update                 = e1000_update_nvm_checksum_ich8lan,
5778        .valid_led_default      = e1000_valid_led_default_ich8lan,
5779        .validate               = e1000_validate_nvm_checksum_ich8lan,
5780        .write                  = e1000_write_nvm_ich8lan,
5781};
5782
5783static const struct e1000_nvm_operations spt_nvm_ops = {
5784        .acquire                = e1000_acquire_nvm_ich8lan,
5785        .release                = e1000_release_nvm_ich8lan,
5786        .read                   = e1000_read_nvm_spt,
5787        .update                 = e1000_update_nvm_checksum_spt,
5788        .reload                 = e1000e_reload_nvm_generic,
5789        .valid_led_default      = e1000_valid_led_default_ich8lan,
5790        .validate               = e1000_validate_nvm_checksum_ich8lan,
5791        .write                  = e1000_write_nvm_ich8lan,
5792};
5793
5794const struct e1000_info e1000_ich8_info = {
5795        .mac                    = e1000_ich8lan,
5796        .flags                  = FLAG_HAS_WOL
5797                                  | FLAG_IS_ICH
5798                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5799                                  | FLAG_HAS_AMT
5800                                  | FLAG_HAS_FLASH
5801                                  | FLAG_APME_IN_WUC,
5802        .pba                    = 8,
5803        .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
5804        .get_variants           = e1000_get_variants_ich8lan,
5805        .mac_ops                = &ich8_mac_ops,
5806        .phy_ops                = &ich8_phy_ops,
5807        .nvm_ops                = &ich8_nvm_ops,
5808};
5809
5810const struct e1000_info e1000_ich9_info = {
5811        .mac                    = e1000_ich9lan,
5812        .flags                  = FLAG_HAS_JUMBO_FRAMES
5813                                  | FLAG_IS_ICH
5814                                  | FLAG_HAS_WOL
5815                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5816                                  | FLAG_HAS_AMT
5817                                  | FLAG_HAS_FLASH
5818                                  | FLAG_APME_IN_WUC,
5819        .pba                    = 18,
5820        .max_hw_frame_size      = DEFAULT_JUMBO,
5821        .get_variants           = e1000_get_variants_ich8lan,
5822        .mac_ops                = &ich8_mac_ops,
5823        .phy_ops                = &ich8_phy_ops,
5824        .nvm_ops                = &ich8_nvm_ops,
5825};
5826
5827const struct e1000_info e1000_ich10_info = {
5828        .mac                    = e1000_ich10lan,
5829        .flags                  = FLAG_HAS_JUMBO_FRAMES
5830                                  | FLAG_IS_ICH
5831                                  | FLAG_HAS_WOL
5832                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5833                                  | FLAG_HAS_AMT
5834                                  | FLAG_HAS_FLASH
5835                                  | FLAG_APME_IN_WUC,
5836        .pba                    = 18,
5837        .max_hw_frame_size      = DEFAULT_JUMBO,
5838        .get_variants           = e1000_get_variants_ich8lan,
5839        .mac_ops                = &ich8_mac_ops,
5840        .phy_ops                = &ich8_phy_ops,
5841        .nvm_ops                = &ich8_nvm_ops,
5842};
5843
5844const struct e1000_info e1000_pch_info = {
5845        .mac                    = e1000_pchlan,
5846        .flags                  = FLAG_IS_ICH
5847                                  | FLAG_HAS_WOL
5848                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5849                                  | FLAG_HAS_AMT
5850                                  | FLAG_HAS_FLASH
5851                                  | FLAG_HAS_JUMBO_FRAMES
5852                                  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
5853                                  | FLAG_APME_IN_WUC,
5854        .flags2                 = FLAG2_HAS_PHY_STATS,
5855        .pba                    = 26,
5856        .max_hw_frame_size      = 4096,
5857        .get_variants           = e1000_get_variants_ich8lan,
5858        .mac_ops                = &ich8_mac_ops,
5859        .phy_ops                = &ich8_phy_ops,
5860        .nvm_ops                = &ich8_nvm_ops,
5861};
5862
5863const struct e1000_info e1000_pch2_info = {
5864        .mac                    = e1000_pch2lan,
5865        .flags                  = FLAG_IS_ICH
5866                                  | FLAG_HAS_WOL
5867                                  | FLAG_HAS_HW_TIMESTAMP
5868                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5869                                  | FLAG_HAS_AMT
5870                                  | FLAG_HAS_FLASH
5871                                  | FLAG_HAS_JUMBO_FRAMES
5872                                  | FLAG_APME_IN_WUC,
5873        .flags2                 = FLAG2_HAS_PHY_STATS
5874                                  | FLAG2_HAS_EEE
5875                                  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5876        .pba                    = 26,
5877        .max_hw_frame_size      = 9022,
5878        .get_variants           = e1000_get_variants_ich8lan,
5879        .mac_ops                = &ich8_mac_ops,
5880        .phy_ops                = &ich8_phy_ops,
5881        .nvm_ops                = &ich8_nvm_ops,
5882};
5883
5884const struct e1000_info e1000_pch_lpt_info = {
5885        .mac                    = e1000_pch_lpt,
5886        .flags                  = FLAG_IS_ICH
5887                                  | FLAG_HAS_WOL
5888                                  | FLAG_HAS_HW_TIMESTAMP
5889                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5890                                  | FLAG_HAS_AMT
5891                                  | FLAG_HAS_FLASH
5892                                  | FLAG_HAS_JUMBO_FRAMES
5893                                  | FLAG_APME_IN_WUC,
5894        .flags2                 = FLAG2_HAS_PHY_STATS
5895                                  | FLAG2_HAS_EEE
5896                                  | FLAG2_CHECK_SYSTIM_OVERFLOW,
5897        .pba                    = 26,
5898        .max_hw_frame_size      = 9022,
5899        .get_variants           = e1000_get_variants_ich8lan,
5900        .mac_ops                = &ich8_mac_ops,
5901        .phy_ops                = &ich8_phy_ops,
5902        .nvm_ops                = &ich8_nvm_ops,
5903};
5904
5905const struct e1000_info e1000_pch_spt_info = {
5906        .mac                    = e1000_pch_spt,
5907        .flags                  = FLAG_IS_ICH
5908                                  | FLAG_HAS_WOL
5909                                  | FLAG_HAS_HW_TIMESTAMP
5910                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5911                                  | FLAG_HAS_AMT
5912                                  | FLAG_HAS_FLASH
5913                                  | FLAG_HAS_JUMBO_FRAMES
5914                                  | FLAG_APME_IN_WUC,
5915        .flags2                 = FLAG2_HAS_PHY_STATS
5916                                  | FLAG2_HAS_EEE,
5917        .pba                    = 26,
5918        .max_hw_frame_size      = 9022,
5919        .get_variants           = e1000_get_variants_ich8lan,
5920        .mac_ops                = &ich8_mac_ops,
5921        .phy_ops                = &ich8_phy_ops,
5922        .nvm_ops                = &spt_nvm_ops,
5923};
5924
5925const struct e1000_info e1000_pch_cnp_info = {
5926        .mac                    = e1000_pch_cnp,
5927        .flags                  = FLAG_IS_ICH
5928                                  | FLAG_HAS_WOL
5929                                  | FLAG_HAS_HW_TIMESTAMP
5930                                  | FLAG_HAS_CTRLEXT_ON_LOAD
5931                                  | FLAG_HAS_AMT
5932                                  | FLAG_HAS_FLASH
5933                                  | FLAG_HAS_JUMBO_FRAMES
5934                                  | FLAG_APME_IN_WUC,
5935        .flags2                 = FLAG2_HAS_PHY_STATS
5936                                  | FLAG2_HAS_EEE,
5937        .pba                    = 26,
5938        .max_hw_frame_size      = 9022,
5939        .get_variants           = e1000_get_variants_ich8lan,
5940        .mac_ops                = &ich8_mac_ops,
5941        .phy_ops                = &ich8_phy_ops,
5942        .nvm_ops                = &spt_nvm_ops,
5943};
5944