linux/drivers/net/ethernet/intel/igc/igc_i225.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c)  2018 Intel Corporation */
   3
   4#include <linux/delay.h>
   5
   6#include "igc_hw.h"
   7
   8/**
   9 * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM
  10 * @hw: pointer to the HW structure
  11 *
  12 * Acquire the necessary semaphores for exclusive access to the EEPROM.
  13 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
  14 * Return successful if access grant bit set, else clear the request for
  15 * EEPROM access and return -IGC_ERR_NVM (-1).
  16 */
  17static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
  18{
  19        return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
  20}
  21
  22/**
  23 * igc_release_nvm_i225 - Release exclusive access to EEPROM
  24 * @hw: pointer to the HW structure
  25 *
  26 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
  27 * then release the semaphores acquired.
  28 */
  29static void igc_release_nvm_i225(struct igc_hw *hw)
  30{
  31        igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
  32}
  33
  34/**
  35 * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
  36 * @hw: pointer to the HW structure
  37 *
  38 * Acquire the HW semaphore to access the PHY or NVM
  39 */
  40static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
  41{
  42        s32 timeout = hw->nvm.word_size + 1;
  43        s32 i = 0;
  44        u32 swsm;
  45
  46        /* Get the SW semaphore */
  47        while (i < timeout) {
  48                swsm = rd32(IGC_SWSM);
  49                if (!(swsm & IGC_SWSM_SMBI))
  50                        break;
  51
  52                usleep_range(500, 600);
  53                i++;
  54        }
  55
  56        if (i == timeout) {
  57                /* In rare circumstances, the SW semaphore may already be held
  58                 * unintentionally. Clear the semaphore once before giving up.
  59                 */
  60                if (hw->dev_spec._base.clear_semaphore_once) {
  61                        hw->dev_spec._base.clear_semaphore_once = false;
  62                        igc_put_hw_semaphore(hw);
  63                        for (i = 0; i < timeout; i++) {
  64                                swsm = rd32(IGC_SWSM);
  65                                if (!(swsm & IGC_SWSM_SMBI))
  66                                        break;
  67
  68                                usleep_range(500, 600);
  69                        }
  70                }
  71
  72                /* If we do not have the semaphore here, we have to give up. */
  73                if (i == timeout) {
  74                        hw_dbg("Driver can't access device - SMBI bit is set.\n");
  75                        return -IGC_ERR_NVM;
  76                }
  77        }
  78
  79        /* Get the FW semaphore. */
  80        for (i = 0; i < timeout; i++) {
  81                swsm = rd32(IGC_SWSM);
  82                wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
  83
  84                /* Semaphore acquired if bit latched */
  85                if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
  86                        break;
  87
  88                usleep_range(500, 600);
  89        }
  90
  91        if (i == timeout) {
  92                /* Release semaphores */
  93                igc_put_hw_semaphore(hw);
  94                hw_dbg("Driver can't access the NVM\n");
  95                return -IGC_ERR_NVM;
  96        }
  97
  98        return 0;
  99}
 100
 101/**
 102 * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
 103 * @hw: pointer to the HW structure
 104 * @mask: specifies which semaphore to acquire
 105 *
 106 * Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
 107 * will also specify which port we're acquiring the lock for.
 108 */
 109s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
 110{
 111        s32 i = 0, timeout = 200;
 112        u32 fwmask = mask << 16;
 113        u32 swmask = mask;
 114        s32 ret_val = 0;
 115        u32 swfw_sync;
 116
 117        while (i < timeout) {
 118                if (igc_get_hw_semaphore_i225(hw)) {
 119                        ret_val = -IGC_ERR_SWFW_SYNC;
 120                        goto out;
 121                }
 122
 123                swfw_sync = rd32(IGC_SW_FW_SYNC);
 124                if (!(swfw_sync & (fwmask | swmask)))
 125                        break;
 126
 127                /* Firmware currently using resource (fwmask) */
 128                igc_put_hw_semaphore(hw);
 129                mdelay(5);
 130                i++;
 131        }
 132
 133        if (i == timeout) {
 134                hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
 135                ret_val = -IGC_ERR_SWFW_SYNC;
 136                goto out;
 137        }
 138
 139        swfw_sync |= swmask;
 140        wr32(IGC_SW_FW_SYNC, swfw_sync);
 141
 142        igc_put_hw_semaphore(hw);
 143out:
 144        return ret_val;
 145}
 146
 147/**
 148 * igc_release_swfw_sync_i225 - Release SW/FW semaphore
 149 * @hw: pointer to the HW structure
 150 * @mask: specifies which semaphore to acquire
 151 *
 152 * Release the SW/FW semaphore used to access the PHY or NVM.  The mask
 153 * will also specify which port we're releasing the lock for.
 154 */
 155void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
 156{
 157        u32 swfw_sync;
 158
 159        while (igc_get_hw_semaphore_i225(hw))
 160                ; /* Empty */
 161
 162        swfw_sync = rd32(IGC_SW_FW_SYNC);
 163        swfw_sync &= ~mask;
 164        wr32(IGC_SW_FW_SYNC, swfw_sync);
 165
 166        igc_put_hw_semaphore(hw);
 167}
 168
 169/**
 170 * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
 171 * @hw: pointer to the HW structure
 172 * @offset: offset of word in the Shadow Ram to read
 173 * @words: number of words to read
 174 * @data: word read from the Shadow Ram
 175 *
 176 * Reads a 16 bit word from the Shadow Ram using the EERD register.
 177 * Uses necessary synchronization semaphores.
 178 */
 179static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
 180                                  u16 *data)
 181{
 182        s32 status = 0;
 183        u16 i, count;
 184
 185        /* We cannot hold synchronization semaphores for too long,
 186         * because of forceful takeover procedure. However it is more efficient
 187         * to read in bursts than synchronizing access for each word.
 188         */
 189        for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
 190                count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
 191                        IGC_EERD_EEWR_MAX_COUNT : (words - i);
 192
 193                status = hw->nvm.ops.acquire(hw);
 194                if (status)
 195                        break;
 196
 197                status = igc_read_nvm_eerd(hw, offset, count, data + i);
 198                hw->nvm.ops.release(hw);
 199                if (status)
 200                        break;
 201        }
 202
 203        return status;
 204}
 205
 206/**
 207 * igc_write_nvm_srwr - Write to Shadow Ram using EEWR
 208 * @hw: pointer to the HW structure
 209 * @offset: offset within the Shadow Ram to be written to
 210 * @words: number of words to write
 211 * @data: 16 bit word(s) to be written to the Shadow Ram
 212 *
 213 * Writes data to Shadow Ram at offset using EEWR register.
 214 *
 215 * If igc_update_nvm_checksum is not called after this function , the
 216 * Shadow Ram will most likely contain an invalid checksum.
 217 */
 218static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
 219                              u16 *data)
 220{
 221        struct igc_nvm_info *nvm = &hw->nvm;
 222        s32 ret_val = -IGC_ERR_NVM;
 223        u32 attempts = 100000;
 224        u32 i, k, eewr = 0;
 225
 226        /* A check for invalid values:  offset too large, too many words,
 227         * too many words for the offset, and not enough words.
 228         */
 229        if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
 230            words == 0) {
 231                hw_dbg("nvm parameter(s) out of bounds\n");
 232                return ret_val;
 233        }
 234
 235        for (i = 0; i < words; i++) {
 236                ret_val = -IGC_ERR_NVM;
 237                eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
 238                        (data[i] << IGC_NVM_RW_REG_DATA) |
 239                        IGC_NVM_RW_REG_START;
 240
 241                wr32(IGC_SRWR, eewr);
 242
 243                for (k = 0; k < attempts; k++) {
 244                        if (IGC_NVM_RW_REG_DONE &
 245                            rd32(IGC_SRWR)) {
 246                                ret_val = 0;
 247                                break;
 248                        }
 249                        udelay(5);
 250                }
 251
 252                if (ret_val) {
 253                        hw_dbg("Shadow RAM write EEWR timed out\n");
 254                        break;
 255                }
 256        }
 257
 258        return ret_val;
 259}
 260
 261/**
 262 * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
 263 * @hw: pointer to the HW structure
 264 * @offset: offset within the Shadow RAM to be written to
 265 * @words: number of words to write
 266 * @data: 16 bit word(s) to be written to the Shadow RAM
 267 *
 268 * Writes data to Shadow RAM at offset using EEWR register.
 269 *
 270 * If igc_update_nvm_checksum is not called after this function , the
 271 * data will not be committed to FLASH and also Shadow RAM will most likely
 272 * contain an invalid checksum.
 273 *
 274 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
 275 * partially written.
 276 */
 277static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
 278                                   u16 *data)
 279{
 280        s32 status = 0;
 281        u16 i, count;
 282
 283        /* We cannot hold synchronization semaphores for too long,
 284         * because of forceful takeover procedure. However it is more efficient
 285         * to write in bursts than synchronizing access for each word.
 286         */
 287        for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
 288                count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
 289                        IGC_EERD_EEWR_MAX_COUNT : (words - i);
 290
 291                status = hw->nvm.ops.acquire(hw);
 292                if (status)
 293                        break;
 294
 295                status = igc_write_nvm_srwr(hw, offset, count, data + i);
 296                hw->nvm.ops.release(hw);
 297                if (status)
 298                        break;
 299        }
 300
 301        return status;
 302}
 303
 304/**
 305 * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
 306 * @hw: pointer to the HW structure
 307 *
 308 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
 309 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
 310 */
 311static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
 312{
 313        s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
 314                           u16 *data);
 315        s32 status = 0;
 316
 317        status = hw->nvm.ops.acquire(hw);
 318        if (status)
 319                goto out;
 320
 321        /* Replace the read function with semaphore grabbing with
 322         * the one that skips this for a while.
 323         * We have semaphore taken already here.
 324         */
 325        read_op_ptr = hw->nvm.ops.read;
 326        hw->nvm.ops.read = igc_read_nvm_eerd;
 327
 328        status = igc_validate_nvm_checksum(hw);
 329
 330        /* Revert original read operation. */
 331        hw->nvm.ops.read = read_op_ptr;
 332
 333        hw->nvm.ops.release(hw);
 334
 335out:
 336        return status;
 337}
 338
 339/**
 340 * igc_pool_flash_update_done_i225 - Pool FLUDONE status
 341 * @hw: pointer to the HW structure
 342 */
 343static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
 344{
 345        s32 ret_val = -IGC_ERR_NVM;
 346        u32 i, reg;
 347
 348        for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
 349                reg = rd32(IGC_EECD);
 350                if (reg & IGC_EECD_FLUDONE_I225) {
 351                        ret_val = 0;
 352                        break;
 353                }
 354                udelay(5);
 355        }
 356
 357        return ret_val;
 358}
 359
 360/**
 361 * igc_update_flash_i225 - Commit EEPROM to the flash
 362 * @hw: pointer to the HW structure
 363 */
 364static s32 igc_update_flash_i225(struct igc_hw *hw)
 365{
 366        s32 ret_val = 0;
 367        u32 flup;
 368
 369        ret_val = igc_pool_flash_update_done_i225(hw);
 370        if (ret_val == -IGC_ERR_NVM) {
 371                hw_dbg("Flash update time out\n");
 372                goto out;
 373        }
 374
 375        flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
 376        wr32(IGC_EECD, flup);
 377
 378        ret_val = igc_pool_flash_update_done_i225(hw);
 379        if (ret_val)
 380                hw_dbg("Flash update time out\n");
 381        else
 382                hw_dbg("Flash update complete\n");
 383
 384out:
 385        return ret_val;
 386}
 387
 388/**
 389 * igc_update_nvm_checksum_i225 - Update EEPROM checksum
 390 * @hw: pointer to the HW structure
 391 *
 392 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
 393 * up to the checksum.  Then calculates the EEPROM checksum and writes the
 394 * value to the EEPROM. Next commit EEPROM data onto the Flash.
 395 */
 396static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
 397{
 398        u16 checksum = 0;
 399        s32 ret_val = 0;
 400        u16 i, nvm_data;
 401
 402        /* Read the first word from the EEPROM. If this times out or fails, do
 403         * not continue or we could be in for a very long wait while every
 404         * EEPROM read fails
 405         */
 406        ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
 407        if (ret_val) {
 408                hw_dbg("EEPROM read failed\n");
 409                goto out;
 410        }
 411
 412        ret_val = hw->nvm.ops.acquire(hw);
 413        if (ret_val)
 414                goto out;
 415
 416        /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
 417         * because we do not want to take the synchronization
 418         * semaphores twice here.
 419         */
 420
 421        for (i = 0; i < NVM_CHECKSUM_REG; i++) {
 422                ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
 423                if (ret_val) {
 424                        hw->nvm.ops.release(hw);
 425                        hw_dbg("NVM Read Error while updating checksum.\n");
 426                        goto out;
 427                }
 428                checksum += nvm_data;
 429        }
 430        checksum = (u16)NVM_SUM - checksum;
 431        ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
 432                                     &checksum);
 433        if (ret_val) {
 434                hw->nvm.ops.release(hw);
 435                hw_dbg("NVM Write Error while updating checksum.\n");
 436                goto out;
 437        }
 438
 439        hw->nvm.ops.release(hw);
 440
 441        ret_val = igc_update_flash_i225(hw);
 442
 443out:
 444        return ret_val;
 445}
 446
 447/**
 448 * igc_get_flash_presence_i225 - Check if flash device is detected
 449 * @hw: pointer to the HW structure
 450 */
 451bool igc_get_flash_presence_i225(struct igc_hw *hw)
 452{
 453        bool ret_val = false;
 454        u32 eec = 0;
 455
 456        eec = rd32(IGC_EECD);
 457        if (eec & IGC_EECD_FLASH_DETECTED_I225)
 458                ret_val = true;
 459
 460        return ret_val;
 461}
 462
 463/**
 464 * igc_init_nvm_params_i225 - Init NVM func ptrs.
 465 * @hw: pointer to the HW structure
 466 */
 467s32 igc_init_nvm_params_i225(struct igc_hw *hw)
 468{
 469        struct igc_nvm_info *nvm = &hw->nvm;
 470
 471        nvm->ops.acquire = igc_acquire_nvm_i225;
 472        nvm->ops.release = igc_release_nvm_i225;
 473
 474        /* NVM Function Pointers */
 475        if (igc_get_flash_presence_i225(hw)) {
 476                hw->nvm.type = igc_nvm_flash_hw;
 477                nvm->ops.read = igc_read_nvm_srrd_i225;
 478                nvm->ops.write = igc_write_nvm_srwr_i225;
 479                nvm->ops.validate = igc_validate_nvm_checksum_i225;
 480                nvm->ops.update = igc_update_nvm_checksum_i225;
 481        } else {
 482                hw->nvm.type = igc_nvm_invm;
 483                nvm->ops.read = igc_read_nvm_eerd;
 484                nvm->ops.write = NULL;
 485                nvm->ops.validate = NULL;
 486                nvm->ops.update = NULL;
 487        }
 488        return 0;
 489}
 490
 491/**
 492 *  igc_set_eee_i225 - Enable/disable EEE support
 493 *  @hw: pointer to the HW structure
 494 *  @adv2p5G: boolean flag enabling 2.5G EEE advertisement
 495 *  @adv1G: boolean flag enabling 1G EEE advertisement
 496 *  @adv100M: boolean flag enabling 100M EEE advertisement
 497 *
 498 *  Enable/disable EEE based on setting in dev_spec structure.
 499 **/
 500s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
 501                     bool adv100M)
 502{
 503        u32 ipcnfg, eeer;
 504
 505        ipcnfg = rd32(IGC_IPCNFG);
 506        eeer = rd32(IGC_EEER);
 507
 508        /* enable or disable per user setting */
 509        if (hw->dev_spec._base.eee_enable) {
 510                u32 eee_su = rd32(IGC_EEE_SU);
 511
 512                if (adv100M)
 513                        ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
 514                else
 515                        ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
 516
 517                if (adv1G)
 518                        ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
 519                else
 520                        ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
 521
 522                if (adv2p5G)
 523                        ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
 524                else
 525                        ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
 526
 527                eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
 528                         IGC_EEER_LPI_FC);
 529
 530                /* This bit should not be set in normal operation. */
 531                if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
 532                        hw_dbg("LPI Clock Stop Bit should not be set!\n");
 533        } else {
 534                ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
 535                            IGC_IPCNFG_EEE_100M_AN);
 536                eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
 537                          IGC_EEER_LPI_FC);
 538        }
 539        wr32(IGC_IPCNFG, ipcnfg);
 540        wr32(IGC_EEER, eeer);
 541        rd32(IGC_IPCNFG);
 542        rd32(IGC_EEER);
 543
 544        return IGC_SUCCESS;
 545}
 546
 547/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds
 548 * @hw: pointer to the HW structure
 549 * @link: bool indicating link status
 550 *
 551 * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
 552 * settings, otherwise specify that there is no LTR requirement.
 553 */
 554s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
 555{
 556        u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
 557        u16 speed, duplex;
 558        s32 size;
 559
 560        /* If we do not have link, LTR thresholds are zero. */
 561        if (link) {
 562                hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
 563
 564                /* Check if using copper interface with EEE enabled or if the
 565                 * link speed is 10 Mbps.
 566                 */
 567                if (hw->dev_spec._base.eee_enable &&
 568                    speed != SPEED_10) {
 569                        /* EEE enabled, so send LTRMAX threshold. */
 570                        ltrc = rd32(IGC_LTRC) |
 571                               IGC_LTRC_EEEMS_EN;
 572                        wr32(IGC_LTRC, ltrc);
 573
 574                        /* Calculate tw_system (nsec). */
 575                        if (speed == SPEED_100) {
 576                                tw_system = ((rd32(IGC_EEE_SU) &
 577                                             IGC_TW_SYSTEM_100_MASK) >>
 578                                             IGC_TW_SYSTEM_100_SHIFT) * 500;
 579                        } else {
 580                                tw_system = (rd32(IGC_EEE_SU) &
 581                                             IGC_TW_SYSTEM_1000_MASK) * 500;
 582                        }
 583                } else {
 584                        tw_system = 0;
 585                }
 586
 587                /* Get the Rx packet buffer size. */
 588                size = rd32(IGC_RXPBS) &
 589                       IGC_RXPBS_SIZE_I225_MASK;
 590
 591                /* Calculations vary based on DMAC settings. */
 592                if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
 593                        size -= (rd32(IGC_DMACR) &
 594                                 IGC_DMACR_DMACTHR_MASK) >>
 595                                 IGC_DMACR_DMACTHR_SHIFT;
 596                        /* Convert size to bits. */
 597                        size *= 1024 * 8;
 598                } else {
 599                        /* Convert size to bytes, subtract the MTU, and then
 600                         * convert the size to bits.
 601                         */
 602                        size *= 1024;
 603                        size *= 8;
 604                }
 605
 606                if (size < 0) {
 607                        hw_dbg("Invalid effective Rx buffer size %d\n",
 608                               size);
 609                        return -IGC_ERR_CONFIG;
 610                }
 611
 612                /* Calculate the thresholds. Since speed is in Mbps, simplify
 613                 * the calculation by multiplying size/speed by 1000 for result
 614                 * to be in nsec before dividing by the scale in nsec. Set the
 615                 * scale such that the LTR threshold fits in the register.
 616                 */
 617                ltr_min = (1000 * size) / speed;
 618                ltr_max = ltr_min + tw_system;
 619                scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
 620                            IGC_LTRMINV_SCALE_32768;
 621                scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
 622                            IGC_LTRMAXV_SCALE_32768;
 623                ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
 624                ltr_min -= 1;
 625                ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
 626                ltr_max -= 1;
 627
 628                /* Only write the LTR thresholds if they differ from before. */
 629                ltrv = rd32(IGC_LTRMINV);
 630                if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
 631                        ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
 632                               (scale_min << IGC_LTRMINV_SCALE_SHIFT);
 633                        wr32(IGC_LTRMINV, ltrv);
 634                }
 635
 636                ltrv = rd32(IGC_LTRMAXV);
 637                if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
 638                        ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
 639                               (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
 640                        wr32(IGC_LTRMAXV, ltrv);
 641                }
 642        }
 643
 644        return IGC_SUCCESS;
 645}
 646