linux/drivers/net/ethernet/intel/igc/igc_i225.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c)  2018 Intel Corporation */
   3
   4#include <linux/delay.h>
   5
   6#include "igc_hw.h"
   7
   8/**
   9 * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
  10 * @hw: pointer to the HW structure
  11 *
  12 * Acquire the necessary semaphores for exclusive access to the EEPROM.
  13 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
  14 * Return successful if access grant bit set, else clear the request for
  15 * EEPROM access and return -IGC_ERR_NVM (-1).
  16 */
  17static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
  18{
  19        return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
  20}
  21
  22/**
  23 * igc_release_nvm_i225 - Release exclusive access to EEPROM
  24 * @hw: pointer to the HW structure
  25 *
  26 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
  27 * then release the semaphores acquired.
  28 */
  29static void igc_release_nvm_i225(struct igc_hw *hw)
  30{
  31        igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
  32}
  33
  34/**
  35 * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
  36 * @hw: pointer to the HW structure
  37 *
  38 * Acquire the HW semaphore to access the PHY or NVM
  39 */
  40static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
  41{
  42        s32 timeout = hw->nvm.word_size + 1;
  43        s32 i = 0;
  44        u32 swsm;
  45
  46        /* Get the SW semaphore */
  47        while (i < timeout) {
  48                swsm = rd32(IGC_SWSM);
  49                if (!(swsm & IGC_SWSM_SMBI))
  50                        break;
  51
  52                usleep_range(500, 600);
  53                i++;
  54        }
  55
  56        if (i == timeout) {
  57                /* In rare circumstances, the SW semaphore may already be held
  58                 * unintentionally. Clear the semaphore once before giving up.
  59                 */
  60                if (hw->dev_spec._base.clear_semaphore_once) {
  61                        hw->dev_spec._base.clear_semaphore_once = false;
  62                        igc_put_hw_semaphore(hw);
  63                        for (i = 0; i < timeout; i++) {
  64                                swsm = rd32(IGC_SWSM);
  65                                if (!(swsm & IGC_SWSM_SMBI))
  66                                        break;
  67
  68                                usleep_range(500, 600);
  69                        }
  70                }
  71
  72                /* If we do not have the semaphore here, we have to give up. */
  73                if (i == timeout) {
  74                        hw_dbg("Driver can't access device - SMBI bit is set.\n");
  75                        return -IGC_ERR_NVM;
  76                }
  77        }
  78
  79        /* Get the FW semaphore. */
  80        for (i = 0; i < timeout; i++) {
  81                swsm = rd32(IGC_SWSM);
  82                wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
  83
  84                /* Semaphore acquired if bit latched */
  85                if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
  86                        break;
  87
  88                usleep_range(500, 600);
  89        }
  90
  91        if (i == timeout) {
  92                /* Release semaphores */
  93                igc_put_hw_semaphore(hw);
  94                hw_dbg("Driver can't access the NVM\n");
  95                return -IGC_ERR_NVM;
  96        }
  97
  98        return 0;
  99}
 100
 101/**
 102 * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
 103 * @hw: pointer to the HW structure
 104 * @mask: specifies which semaphore to acquire
 105 *
 106 * Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
 107 * will also specify which port we're acquiring the lock for.
 108 */
 109s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
 110{
 111        s32 i = 0, timeout = 200;
 112        u32 fwmask = mask << 16;
 113        u32 swmask = mask;
 114        s32 ret_val = 0;
 115        u32 swfw_sync;
 116
 117        while (i < timeout) {
 118                if (igc_get_hw_semaphore_i225(hw)) {
 119                        ret_val = -IGC_ERR_SWFW_SYNC;
 120                        goto out;
 121                }
 122
 123                swfw_sync = rd32(IGC_SW_FW_SYNC);
 124                if (!(swfw_sync & (fwmask | swmask)))
 125                        break;
 126
 127                /* Firmware currently using resource (fwmask) */
 128                igc_put_hw_semaphore(hw);
 129                mdelay(5);
 130                i++;
 131        }
 132
 133        if (i == timeout) {
 134                hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
 135                ret_val = -IGC_ERR_SWFW_SYNC;
 136                goto out;
 137        }
 138
 139        swfw_sync |= swmask;
 140        wr32(IGC_SW_FW_SYNC, swfw_sync);
 141
 142        igc_put_hw_semaphore(hw);
 143out:
 144        return ret_val;
 145}
 146
 147/**
 148 * igc_release_swfw_sync_i225 - Release SW/FW semaphore
 149 * @hw: pointer to the HW structure
 150 * @mask: specifies which semaphore to acquire
 151 *
 152 * Release the SW/FW semaphore used to access the PHY or NVM.  The mask
 153 * will also specify which port we're releasing the lock for.
 154 */
 155void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
 156{
 157        u32 swfw_sync;
 158
 159        while (igc_get_hw_semaphore_i225(hw))
 160                ; /* Empty */
 161
 162        swfw_sync = rd32(IGC_SW_FW_SYNC);
 163        swfw_sync &= ~mask;
 164        wr32(IGC_SW_FW_SYNC, swfw_sync);
 165
 166        igc_put_hw_semaphore(hw);
 167}
 168
 169/**
 170 * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
 171 * @hw: pointer to the HW structure
 172 * @offset: offset of word in the Shadow Ram to read
 173 * @words: number of words to read
 174 * @data: word read from the Shadow Ram
 175 *
 176 * Reads a 16 bit word from the Shadow Ram using the EERD register.
 177 * Uses necessary synchronization semaphores.
 178 */
 179static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
 180                                  u16 *data)
 181{
 182        s32 status = 0;
 183        u16 i, count;
 184
 185        /* We cannot hold synchronization semaphores for too long,
 186         * because of forceful takeover procedure. However it is more efficient
 187         * to read in bursts than synchronizing access for each word.
 188         */
 189        for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
 190                count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
 191                        IGC_EERD_EEWR_MAX_COUNT : (words - i);
 192
 193                status = hw->nvm.ops.acquire(hw);
 194                if (status)
 195                        break;
 196
 197                status = igc_read_nvm_eerd(hw, offset, count, data + i);
 198                hw->nvm.ops.release(hw);
 199                if (status)
 200                        break;
 201        }
 202
 203        return status;
 204}
 205
 206/**
 207 * igc_write_nvm_srwr - Write to Shadow Ram using EEWR
 208 * @hw: pointer to the HW structure
 209 * @offset: offset within the Shadow Ram to be written to
 210 * @words: number of words to write
 211 * @data: 16 bit word(s) to be written to the Shadow Ram
 212 *
 213 * Writes data to Shadow Ram at offset using EEWR register.
 214 *
 215 * If igc_update_nvm_checksum is not called after this function , the
 216 * Shadow Ram will most likely contain an invalid checksum.
 217 */
 218static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
 219                              u16 *data)
 220{
 221        struct igc_nvm_info *nvm = &hw->nvm;
 222        u32 attempts = 100000;
 223        u32 i, k, eewr = 0;
 224        s32 ret_val = 0;
 225
 226        /* A check for invalid values:  offset too large, too many words,
 227         * too many words for the offset, and not enough words.
 228         */
 229        if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
 230            words == 0) {
 231                hw_dbg("nvm parameter(s) out of bounds\n");
 232                ret_val = -IGC_ERR_NVM;
 233                goto out;
 234        }
 235
 236        for (i = 0; i < words; i++) {
 237                eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
 238                        (data[i] << IGC_NVM_RW_REG_DATA) |
 239                        IGC_NVM_RW_REG_START;
 240
 241                wr32(IGC_SRWR, eewr);
 242
 243                for (k = 0; k < attempts; k++) {
 244                        if (IGC_NVM_RW_REG_DONE &
 245                            rd32(IGC_SRWR)) {
 246                                ret_val = 0;
 247                                break;
 248                        }
 249                        udelay(5);
 250                }
 251
 252                if (ret_val) {
 253                        hw_dbg("Shadow RAM write EEWR timed out\n");
 254                        break;
 255                }
 256        }
 257
 258out:
 259        return ret_val;
 260}
 261
 262/**
 263 * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
 264 * @hw: pointer to the HW structure
 265 * @offset: offset within the Shadow RAM to be written to
 266 * @words: number of words to write
 267 * @data: 16 bit word(s) to be written to the Shadow RAM
 268 *
 269 * Writes data to Shadow RAM at offset using EEWR register.
 270 *
 271 * If igc_update_nvm_checksum is not called after this function , the
 272 * data will not be committed to FLASH and also Shadow RAM will most likely
 273 * contain an invalid checksum.
 274 *
 275 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
 276 * partially written.
 277 */
 278static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
 279                                   u16 *data)
 280{
 281        s32 status = 0;
 282        u16 i, count;
 283
 284        /* We cannot hold synchronization semaphores for too long,
 285         * because of forceful takeover procedure. However it is more efficient
 286         * to write in bursts than synchronizing access for each word.
 287         */
 288        for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
 289                count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
 290                        IGC_EERD_EEWR_MAX_COUNT : (words - i);
 291
 292                status = hw->nvm.ops.acquire(hw);
 293                if (status)
 294                        break;
 295
 296                status = igc_write_nvm_srwr(hw, offset, count, data + i);
 297                hw->nvm.ops.release(hw);
 298                if (status)
 299                        break;
 300        }
 301
 302        return status;
 303}
 304
 305/**
 306 * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
 307 * @hw: pointer to the HW structure
 308 *
 309 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
 310 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
 311 */
 312static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
 313{
 314        s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
 315                           u16 *data);
 316        s32 status = 0;
 317
 318        status = hw->nvm.ops.acquire(hw);
 319        if (status)
 320                goto out;
 321
 322        /* Replace the read function with semaphore grabbing with
 323         * the one that skips this for a while.
 324         * We have semaphore taken already here.
 325         */
 326        read_op_ptr = hw->nvm.ops.read;
 327        hw->nvm.ops.read = igc_read_nvm_eerd;
 328
 329        status = igc_validate_nvm_checksum(hw);
 330
 331        /* Revert original read operation. */
 332        hw->nvm.ops.read = read_op_ptr;
 333
 334        hw->nvm.ops.release(hw);
 335
 336out:
 337        return status;
 338}
 339
 340/**
 341 * igc_pool_flash_update_done_i225 - Pool FLUDONE status
 342 * @hw: pointer to the HW structure
 343 */
 344static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
 345{
 346        s32 ret_val = -IGC_ERR_NVM;
 347        u32 i, reg;
 348
 349        for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
 350                reg = rd32(IGC_EECD);
 351                if (reg & IGC_EECD_FLUDONE_I225) {
 352                        ret_val = 0;
 353                        break;
 354                }
 355                udelay(5);
 356        }
 357
 358        return ret_val;
 359}
 360
 361/**
 362 * igc_update_flash_i225 - Commit EEPROM to the flash
 363 * @hw: pointer to the HW structure
 364 */
 365static s32 igc_update_flash_i225(struct igc_hw *hw)
 366{
 367        s32 ret_val = 0;
 368        u32 flup;
 369
 370        ret_val = igc_pool_flash_update_done_i225(hw);
 371        if (ret_val == -IGC_ERR_NVM) {
 372                hw_dbg("Flash update time out\n");
 373                goto out;
 374        }
 375
 376        flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
 377        wr32(IGC_EECD, flup);
 378
 379        ret_val = igc_pool_flash_update_done_i225(hw);
 380        if (ret_val)
 381                hw_dbg("Flash update time out\n");
 382        else
 383                hw_dbg("Flash update complete\n");
 384
 385out:
 386        return ret_val;
 387}
 388
 389/**
 390 * igc_update_nvm_checksum_i225 - Update EEPROM checksum
 391 * @hw: pointer to the HW structure
 392 *
 393 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
 394 * up to the checksum.  Then calculates the EEPROM checksum and writes the
 395 * value to the EEPROM. Next commit EEPROM data onto the Flash.
 396 */
 397static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
 398{
 399        u16 checksum = 0;
 400        s32 ret_val = 0;
 401        u16 i, nvm_data;
 402
 403        /* Read the first word from the EEPROM. If this times out or fails, do
 404         * not continue or we could be in for a very long wait while every
 405         * EEPROM read fails
 406         */
 407        ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
 408        if (ret_val) {
 409                hw_dbg("EEPROM read failed\n");
 410                goto out;
 411        }
 412
 413        ret_val = hw->nvm.ops.acquire(hw);
 414        if (ret_val)
 415                goto out;
 416
 417        /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
 418         * because we do not want to take the synchronization
 419         * semaphores twice here.
 420         */
 421
 422        for (i = 0; i < NVM_CHECKSUM_REG; i++) {
 423                ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
 424                if (ret_val) {
 425                        hw->nvm.ops.release(hw);
 426                        hw_dbg("NVM Read Error while updating checksum.\n");
 427                        goto out;
 428                }
 429                checksum += nvm_data;
 430        }
 431        checksum = (u16)NVM_SUM - checksum;
 432        ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
 433                                     &checksum);
 434        if (ret_val) {
 435                hw->nvm.ops.release(hw);
 436                hw_dbg("NVM Write Error while updating checksum.\n");
 437                goto out;
 438        }
 439
 440        hw->nvm.ops.release(hw);
 441
 442        ret_val = igc_update_flash_i225(hw);
 443
 444out:
 445        return ret_val;
 446}
 447
 448/**
 449 * igc_get_flash_presence_i225 - Check if flash device is detected
 450 * @hw: pointer to the HW structure
 451 */
 452bool igc_get_flash_presence_i225(struct igc_hw *hw)
 453{
 454        bool ret_val = false;
 455        u32 eec = 0;
 456
 457        eec = rd32(IGC_EECD);
 458        if (eec & IGC_EECD_FLASH_DETECTED_I225)
 459                ret_val = true;
 460
 461        return ret_val;
 462}
 463
 464/**
 465 * igc_init_nvm_params_i225 - Init NVM func ptrs.
 466 * @hw: pointer to the HW structure
 467 */
 468s32 igc_init_nvm_params_i225(struct igc_hw *hw)
 469{
 470        struct igc_nvm_info *nvm = &hw->nvm;
 471
 472        nvm->ops.acquire = igc_acquire_nvm_i225;
 473        nvm->ops.release = igc_release_nvm_i225;
 474
 475        /* NVM Function Pointers */
 476        if (igc_get_flash_presence_i225(hw)) {
 477                hw->nvm.type = igc_nvm_flash_hw;
 478                nvm->ops.read = igc_read_nvm_srrd_i225;
 479                nvm->ops.write = igc_write_nvm_srwr_i225;
 480                nvm->ops.validate = igc_validate_nvm_checksum_i225;
 481                nvm->ops.update = igc_update_nvm_checksum_i225;
 482        } else {
 483                hw->nvm.type = igc_nvm_invm;
 484                nvm->ops.read = igc_read_nvm_eerd;
 485                nvm->ops.write = NULL;
 486                nvm->ops.validate = NULL;
 487                nvm->ops.update = NULL;
 488        }
 489        return 0;
 490}
 491