linux/drivers/net/ethernet/intel/i40e/i40e_nvm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "i40e_prototype.h"
   5
   6/**
   7 * i40e_init_nvm_ops - Initialize NVM function pointers
   8 * @hw: pointer to the HW structure
   9 *
  10 * Setup the function pointers and the NVM info structure. Should be called
  11 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
  12 * Please notice that the NVM term is used here (& in all methods covered
  13 * in this file) as an equivalent of the FLASH part mapped into the SR.
  14 * We are accessing FLASH always thru the Shadow RAM.
  15 **/
  16i40e_status i40e_init_nvm(struct i40e_hw *hw)
  17{
  18        struct i40e_nvm_info *nvm = &hw->nvm;
  19        i40e_status ret_code = 0;
  20        u32 fla, gens;
  21        u8 sr_size;
  22
  23        /* The SR size is stored regardless of the nvm programming mode
  24         * as the blank mode may be used in the factory line.
  25         */
  26        gens = rd32(hw, I40E_GLNVM_GENS);
  27        sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
  28                           I40E_GLNVM_GENS_SR_SIZE_SHIFT);
  29        /* Switching to words (sr_size contains power of 2KB) */
  30        nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
  31
  32        /* Check if we are in the normal or blank NVM programming mode */
  33        fla = rd32(hw, I40E_GLNVM_FLA);
  34        if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
  35                /* Max NVM timeout */
  36                nvm->timeout = I40E_MAX_NVM_TIMEOUT;
  37                nvm->blank_nvm_mode = false;
  38        } else { /* Blank programming mode */
  39                nvm->blank_nvm_mode = true;
  40                ret_code = I40E_ERR_NVM_BLANK_MODE;
  41                i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
  42        }
  43
  44        return ret_code;
  45}
  46
  47/**
  48 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
  49 * @hw: pointer to the HW structure
  50 * @access: NVM access type (read or write)
  51 *
  52 * This function will request NVM ownership for reading
  53 * via the proper Admin Command.
  54 **/
  55i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
  56                                       enum i40e_aq_resource_access_type access)
  57{
  58        i40e_status ret_code = 0;
  59        u64 gtime, timeout;
  60        u64 time_left = 0;
  61
  62        if (hw->nvm.blank_nvm_mode)
  63                goto i40e_i40e_acquire_nvm_exit;
  64
  65        ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
  66                                            0, &time_left, NULL);
  67        /* Reading the Global Device Timer */
  68        gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  69
  70        /* Store the timeout */
  71        hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
  72
  73        if (ret_code)
  74                i40e_debug(hw, I40E_DEBUG_NVM,
  75                           "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
  76                           access, time_left, ret_code, hw->aq.asq_last_status);
  77
  78        if (ret_code && time_left) {
  79                /* Poll until the current NVM owner timeouts */
  80                timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
  81                while ((gtime < timeout) && time_left) {
  82                        usleep_range(10000, 20000);
  83                        gtime = rd32(hw, I40E_GLVFGEN_TIMER);
  84                        ret_code = i40e_aq_request_resource(hw,
  85                                                        I40E_NVM_RESOURCE_ID,
  86                                                        access, 0, &time_left,
  87                                                        NULL);
  88                        if (!ret_code) {
  89                                hw->nvm.hw_semaphore_timeout =
  90                                            I40E_MS_TO_GTIME(time_left) + gtime;
  91                                break;
  92                        }
  93                }
  94                if (ret_code) {
  95                        hw->nvm.hw_semaphore_timeout = 0;
  96                        i40e_debug(hw, I40E_DEBUG_NVM,
  97                                   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
  98                                   time_left, ret_code, hw->aq.asq_last_status);
  99                }
 100        }
 101
 102i40e_i40e_acquire_nvm_exit:
 103        return ret_code;
 104}
 105
 106/**
 107 * i40e_release_nvm - Generic request for releasing the NVM ownership
 108 * @hw: pointer to the HW structure
 109 *
 110 * This function will release NVM resource via the proper Admin Command.
 111 **/
 112void i40e_release_nvm(struct i40e_hw *hw)
 113{
 114        i40e_status ret_code = I40E_SUCCESS;
 115        u32 total_delay = 0;
 116
 117        if (hw->nvm.blank_nvm_mode)
 118                return;
 119
 120        ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
 121
 122        /* there are some rare cases when trying to release the resource
 123         * results in an admin Q timeout, so handle them correctly
 124         */
 125        while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
 126               (total_delay < hw->aq.asq_cmd_timeout)) {
 127                usleep_range(1000, 2000);
 128                ret_code = i40e_aq_release_resource(hw,
 129                                                    I40E_NVM_RESOURCE_ID,
 130                                                    0, NULL);
 131                total_delay++;
 132        }
 133}
 134
 135/**
 136 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
 137 * @hw: pointer to the HW structure
 138 *
 139 * Polls the SRCTL Shadow RAM register done bit.
 140 **/
 141static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 142{
 143        i40e_status ret_code = I40E_ERR_TIMEOUT;
 144        u32 srctl, wait_cnt;
 145
 146        /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
 147        for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
 148                srctl = rd32(hw, I40E_GLNVM_SRCTL);
 149                if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
 150                        ret_code = 0;
 151                        break;
 152                }
 153                udelay(5);
 154        }
 155        if (ret_code == I40E_ERR_TIMEOUT)
 156                i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
 157        return ret_code;
 158}
 159
 160/**
 161 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
 162 * @hw: pointer to the HW structure
 163 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 164 * @data: word read from the Shadow RAM
 165 *
 166 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
 167 **/
 168static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
 169                                            u16 *data)
 170{
 171        i40e_status ret_code = I40E_ERR_TIMEOUT;
 172        u32 sr_reg;
 173
 174        if (offset >= hw->nvm.sr_size) {
 175                i40e_debug(hw, I40E_DEBUG_NVM,
 176                           "NVM read error: offset %d beyond Shadow RAM limit %d\n",
 177                           offset, hw->nvm.sr_size);
 178                ret_code = I40E_ERR_PARAM;
 179                goto read_nvm_exit;
 180        }
 181
 182        /* Poll the done bit first */
 183        ret_code = i40e_poll_sr_srctl_done_bit(hw);
 184        if (!ret_code) {
 185                /* Write the address and start reading */
 186                sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
 187                         BIT(I40E_GLNVM_SRCTL_START_SHIFT);
 188                wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 189
 190                /* Poll I40E_GLNVM_SRCTL until the done bit is set */
 191                ret_code = i40e_poll_sr_srctl_done_bit(hw);
 192                if (!ret_code) {
 193                        sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
 194                        *data = (u16)((sr_reg &
 195                                       I40E_GLNVM_SRDATA_RDDATA_MASK)
 196                                    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
 197                }
 198        }
 199        if (ret_code)
 200                i40e_debug(hw, I40E_DEBUG_NVM,
 201                           "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
 202                           offset);
 203
 204read_nvm_exit:
 205        return ret_code;
 206}
 207
 208/**
 209 * i40e_read_nvm_aq - Read Shadow RAM.
 210 * @hw: pointer to the HW structure.
 211 * @module_pointer: module pointer location in words from the NVM beginning
 212 * @offset: offset in words from module start
 213 * @words: number of words to write
 214 * @data: buffer with words to write to the Shadow RAM
 215 * @last_command: tells the AdminQ that this is the last command
 216 *
 217 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
 218 **/
 219static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
 220                                    u8 module_pointer, u32 offset,
 221                                    u16 words, void *data,
 222                                    bool last_command)
 223{
 224        i40e_status ret_code = I40E_ERR_NVM;
 225        struct i40e_asq_cmd_details cmd_details;
 226
 227        memset(&cmd_details, 0, sizeof(cmd_details));
 228        cmd_details.wb_desc = &hw->nvm_wb_desc;
 229
 230        /* Here we are checking the SR limit only for the flat memory model.
 231         * We cannot do it for the module-based model, as we did not acquire
 232         * the NVM resource yet (we cannot get the module pointer value).
 233         * Firmware will check the module-based model.
 234         */
 235        if ((offset + words) > hw->nvm.sr_size)
 236                i40e_debug(hw, I40E_DEBUG_NVM,
 237                           "NVM write error: offset %d beyond Shadow RAM limit %d\n",
 238                           (offset + words), hw->nvm.sr_size);
 239        else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
 240                /* We can write only up to 4KB (one sector), in one AQ write */
 241                i40e_debug(hw, I40E_DEBUG_NVM,
 242                           "NVM write fail error: tried to write %d words, limit is %d.\n",
 243                           words, I40E_SR_SECTOR_SIZE_IN_WORDS);
 244        else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
 245                 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
 246                /* A single write cannot spread over two sectors */
 247                i40e_debug(hw, I40E_DEBUG_NVM,
 248                           "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
 249                           offset, words);
 250        else
 251                ret_code = i40e_aq_read_nvm(hw, module_pointer,
 252                                            2 * offset,  /*bytes*/
 253                                            2 * words,   /*bytes*/
 254                                            data, last_command, &cmd_details);
 255
 256        return ret_code;
 257}
 258
 259/**
 260 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
 261 * @hw: pointer to the HW structure
 262 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 263 * @data: word read from the Shadow RAM
 264 *
 265 * Reads one 16 bit word from the Shadow RAM using the AdminQ
 266 **/
 267static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
 268                                         u16 *data)
 269{
 270        i40e_status ret_code = I40E_ERR_TIMEOUT;
 271
 272        ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
 273        *data = le16_to_cpu(*(__le16 *)data);
 274
 275        return ret_code;
 276}
 277
 278/**
 279 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
 280 * @hw: pointer to the HW structure
 281 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 282 * @data: word read from the Shadow RAM
 283 *
 284 * Reads one 16 bit word from the Shadow RAM.
 285 *
 286 * Do not use this function except in cases where the nvm lock is already
 287 * taken via i40e_acquire_nvm().
 288 **/
 289static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
 290                                        u16 offset, u16 *data)
 291{
 292        if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
 293                return i40e_read_nvm_word_aq(hw, offset, data);
 294
 295        return i40e_read_nvm_word_srctl(hw, offset, data);
 296}
 297
 298/**
 299 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
 300 * @hw: pointer to the HW structure
 301 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 302 * @data: word read from the Shadow RAM
 303 *
 304 * Reads one 16 bit word from the Shadow RAM.
 305 **/
 306i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 307                               u16 *data)
 308{
 309        i40e_status ret_code = 0;
 310
 311        if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
 312                ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 313        if (ret_code)
 314                return ret_code;
 315
 316        ret_code = __i40e_read_nvm_word(hw, offset, data);
 317
 318        if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
 319                i40e_release_nvm(hw);
 320
 321        return ret_code;
 322}
 323
 324/**
 325 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
 326 * @hw: Pointer to the HW structure
 327 * @module_ptr: Pointer to module in words with respect to NVM beginning
 328 * @module_offset: Offset in words from module start
 329 * @data_offset: Offset in words from reading data area start
 330 * @words_data_size: Words to read from NVM
 331 * @data_ptr: Pointer to memory location where resulting buffer will be stored
 332 **/
 333enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
 334                                                u8 module_ptr,
 335                                                u16 module_offset,
 336                                                u16 data_offset,
 337                                                u16 words_data_size,
 338                                                u16 *data_ptr)
 339{
 340        i40e_status status;
 341        u16 specific_ptr = 0;
 342        u16 ptr_value = 0;
 343        u32 offset = 0;
 344
 345        if (module_ptr != 0) {
 346                status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
 347                if (status) {
 348                        i40e_debug(hw, I40E_DEBUG_ALL,
 349                                   "Reading nvm word failed.Error code: %d.\n",
 350                                   status);
 351                        return I40E_ERR_NVM;
 352                }
 353        }
 354#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
 355#define I40E_NVM_INVALID_VAL 0xFFFF
 356
 357        /* Pointer not initialized */
 358        if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
 359            ptr_value == I40E_NVM_INVALID_VAL) {
 360                i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
 361                return I40E_ERR_BAD_PTR;
 362        }
 363
 364        /* Check whether the module is in SR mapped area or outside */
 365        if (ptr_value & I40E_PTR_TYPE) {
 366                /* Pointer points outside of the Shared RAM mapped area */
 367                i40e_debug(hw, I40E_DEBUG_ALL,
 368                           "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
 369
 370                return I40E_ERR_PARAM;
 371        } else {
 372                /* Read from the Shadow RAM */
 373
 374                status = i40e_read_nvm_word(hw, ptr_value + module_offset,
 375                                            &specific_ptr);
 376                if (status) {
 377                        i40e_debug(hw, I40E_DEBUG_ALL,
 378                                   "Reading nvm word failed.Error code: %d.\n",
 379                                   status);
 380                        return I40E_ERR_NVM;
 381                }
 382
 383                offset = ptr_value + module_offset + specific_ptr +
 384                        data_offset;
 385
 386                status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
 387                                              data_ptr);
 388                if (status) {
 389                        i40e_debug(hw, I40E_DEBUG_ALL,
 390                                   "Reading nvm buffer failed.Error code: %d.\n",
 391                                   status);
 392                }
 393        }
 394
 395        return status;
 396}
 397
 398/**
 399 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
 400 * @hw: pointer to the HW structure
 401 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 402 * @words: (in) number of words to read; (out) number of words actually read
 403 * @data: words read from the Shadow RAM
 404 *
 405 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
 406 * method. The buffer read is preceded by the NVM ownership take
 407 * and followed by the release.
 408 **/
 409static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
 410                                              u16 *words, u16 *data)
 411{
 412        i40e_status ret_code = 0;
 413        u16 index, word;
 414
 415        /* Loop thru the selected region */
 416        for (word = 0; word < *words; word++) {
 417                index = offset + word;
 418                ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
 419                if (ret_code)
 420                        break;
 421        }
 422
 423        /* Update the number of words read from the Shadow RAM */
 424        *words = word;
 425
 426        return ret_code;
 427}
 428
 429/**
 430 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
 431 * @hw: pointer to the HW structure
 432 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 433 * @words: (in) number of words to read; (out) number of words actually read
 434 * @data: words read from the Shadow RAM
 435 *
 436 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
 437 * method. The buffer read is preceded by the NVM ownership take
 438 * and followed by the release.
 439 **/
 440static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
 441                                           u16 *words, u16 *data)
 442{
 443        i40e_status ret_code;
 444        u16 read_size;
 445        bool last_cmd = false;
 446        u16 words_read = 0;
 447        u16 i = 0;
 448
 449        do {
 450                /* Calculate number of bytes we should read in this step.
 451                 * FVL AQ do not allow to read more than one page at a time or
 452                 * to cross page boundaries.
 453                 */
 454                if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
 455                        read_size = min(*words,
 456                                        (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
 457                                      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
 458                else
 459                        read_size = min((*words - words_read),
 460                                        I40E_SR_SECTOR_SIZE_IN_WORDS);
 461
 462                /* Check if this is last command, if so set proper flag */
 463                if ((words_read + read_size) >= *words)
 464                        last_cmd = true;
 465
 466                ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
 467                                            data + words_read, last_cmd);
 468                if (ret_code)
 469                        goto read_nvm_buffer_aq_exit;
 470
 471                /* Increment counter for words already read and move offset to
 472                 * new read location
 473                 */
 474                words_read += read_size;
 475                offset += read_size;
 476        } while (words_read < *words);
 477
 478        for (i = 0; i < *words; i++)
 479                data[i] = le16_to_cpu(((__le16 *)data)[i]);
 480
 481read_nvm_buffer_aq_exit:
 482        *words = words_read;
 483        return ret_code;
 484}
 485
 486/**
 487 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
 488 * @hw: pointer to the HW structure
 489 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 490 * @words: (in) number of words to read; (out) number of words actually read
 491 * @data: words read from the Shadow RAM
 492 *
 493 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
 494 * method.
 495 **/
 496static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
 497                                          u16 offset, u16 *words,
 498                                          u16 *data)
 499{
 500        if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
 501                return i40e_read_nvm_buffer_aq(hw, offset, words, data);
 502
 503        return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 504}
 505
 506/**
 507 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
 508 * @hw: pointer to the HW structure
 509 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
 510 * @words: (in) number of words to read; (out) number of words actually read
 511 * @data: words read from the Shadow RAM
 512 *
 513 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
 514 * method. The buffer read is preceded by the NVM ownership take
 515 * and followed by the release.
 516 **/
 517i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 518                                 u16 *words, u16 *data)
 519{
 520        i40e_status ret_code = 0;
 521
 522        if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
 523                ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 524                if (!ret_code) {
 525                        ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
 526                                                           data);
 527                        i40e_release_nvm(hw);
 528                }
 529        } else {
 530                ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 531        }
 532
 533        return ret_code;
 534}
 535
 536/**
 537 * i40e_write_nvm_aq - Writes Shadow RAM.
 538 * @hw: pointer to the HW structure.
 539 * @module_pointer: module pointer location in words from the NVM beginning
 540 * @offset: offset in words from module start
 541 * @words: number of words to write
 542 * @data: buffer with words to write to the Shadow RAM
 543 * @last_command: tells the AdminQ that this is the last command
 544 *
 545 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
 546 **/
 547static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
 548                                     u32 offset, u16 words, void *data,
 549                                     bool last_command)
 550{
 551        i40e_status ret_code = I40E_ERR_NVM;
 552        struct i40e_asq_cmd_details cmd_details;
 553
 554        memset(&cmd_details, 0, sizeof(cmd_details));
 555        cmd_details.wb_desc = &hw->nvm_wb_desc;
 556
 557        /* Here we are checking the SR limit only for the flat memory model.
 558         * We cannot do it for the module-based model, as we did not acquire
 559         * the NVM resource yet (we cannot get the module pointer value).
 560         * Firmware will check the module-based model.
 561         */
 562        if ((offset + words) > hw->nvm.sr_size)
 563                i40e_debug(hw, I40E_DEBUG_NVM,
 564                           "NVM write error: offset %d beyond Shadow RAM limit %d\n",
 565                           (offset + words), hw->nvm.sr_size);
 566        else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
 567                /* We can write only up to 4KB (one sector), in one AQ write */
 568                i40e_debug(hw, I40E_DEBUG_NVM,
 569                           "NVM write fail error: tried to write %d words, limit is %d.\n",
 570                           words, I40E_SR_SECTOR_SIZE_IN_WORDS);
 571        else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
 572                 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
 573                /* A single write cannot spread over two sectors */
 574                i40e_debug(hw, I40E_DEBUG_NVM,
 575                           "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
 576                           offset, words);
 577        else
 578                ret_code = i40e_aq_update_nvm(hw, module_pointer,
 579                                              2 * offset,  /*bytes*/
 580                                              2 * words,   /*bytes*/
 581                                              data, last_command, 0,
 582                                              &cmd_details);
 583
 584        return ret_code;
 585}
 586
 587/**
 588 * i40e_calc_nvm_checksum - Calculates and returns the checksum
 589 * @hw: pointer to hardware structure
 590 * @checksum: pointer to the checksum
 591 *
 592 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
 593 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
 594 * is customer specific and unknown. Therefore, this function skips all maximum
 595 * possible size of VPD (1kB).
 596 **/
 597static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 598                                                    u16 *checksum)
 599{
 600        i40e_status ret_code;
 601        struct i40e_virt_mem vmem;
 602        u16 pcie_alt_module = 0;
 603        u16 checksum_local = 0;
 604        u16 vpd_module = 0;
 605        u16 *data;
 606        u16 i = 0;
 607
 608        ret_code = i40e_allocate_virt_mem(hw, &vmem,
 609                                    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
 610        if (ret_code)
 611                goto i40e_calc_nvm_checksum_exit;
 612        data = (u16 *)vmem.va;
 613
 614        /* read pointer to VPD area */
 615        ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
 616        if (ret_code) {
 617                ret_code = I40E_ERR_NVM_CHECKSUM;
 618                goto i40e_calc_nvm_checksum_exit;
 619        }
 620
 621        /* read pointer to PCIe Alt Auto-load module */
 622        ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
 623                                        &pcie_alt_module);
 624        if (ret_code) {
 625                ret_code = I40E_ERR_NVM_CHECKSUM;
 626                goto i40e_calc_nvm_checksum_exit;
 627        }
 628
 629        /* Calculate SW checksum that covers the whole 64kB shadow RAM
 630         * except the VPD and PCIe ALT Auto-load modules
 631         */
 632        for (i = 0; i < hw->nvm.sr_size; i++) {
 633                /* Read SR page */
 634                if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
 635                        u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
 636
 637                        ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
 638                        if (ret_code) {
 639                                ret_code = I40E_ERR_NVM_CHECKSUM;
 640                                goto i40e_calc_nvm_checksum_exit;
 641                        }
 642                }
 643
 644                /* Skip Checksum word */
 645                if (i == I40E_SR_SW_CHECKSUM_WORD)
 646                        continue;
 647                /* Skip VPD module (convert byte size to word count) */
 648                if ((i >= (u32)vpd_module) &&
 649                    (i < ((u32)vpd_module +
 650                     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
 651                        continue;
 652                }
 653                /* Skip PCIe ALT module (convert byte size to word count) */
 654                if ((i >= (u32)pcie_alt_module) &&
 655                    (i < ((u32)pcie_alt_module +
 656                     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
 657                        continue;
 658                }
 659
 660                checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
 661        }
 662
 663        *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
 664
 665i40e_calc_nvm_checksum_exit:
 666        i40e_free_virt_mem(hw, &vmem);
 667        return ret_code;
 668}
 669
 670/**
 671 * i40e_update_nvm_checksum - Updates the NVM checksum
 672 * @hw: pointer to hardware structure
 673 *
 674 * NVM ownership must be acquired before calling this function and released
 675 * on ARQ completion event reception by caller.
 676 * This function will commit SR to NVM.
 677 **/
 678i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 679{
 680        i40e_status ret_code;
 681        u16 checksum;
 682        __le16 le_sum;
 683
 684        ret_code = i40e_calc_nvm_checksum(hw, &checksum);
 685        le_sum = cpu_to_le16(checksum);
 686        if (!ret_code)
 687                ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
 688                                             1, &le_sum, true);
 689
 690        return ret_code;
 691}
 692
 693/**
 694 * i40e_validate_nvm_checksum - Validate EEPROM checksum
 695 * @hw: pointer to hardware structure
 696 * @checksum: calculated checksum
 697 *
 698 * Performs checksum calculation and validates the NVM SW checksum. If the
 699 * caller does not need checksum, the value can be NULL.
 700 **/
 701i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
 702                                                 u16 *checksum)
 703{
 704        i40e_status ret_code = 0;
 705        u16 checksum_sr = 0;
 706        u16 checksum_local = 0;
 707
 708        /* We must acquire the NVM lock in order to correctly synchronize the
 709         * NVM accesses across multiple PFs. Without doing so it is possible
 710         * for one of the PFs to read invalid data potentially indicating that
 711         * the checksum is invalid.
 712         */
 713        ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 714        if (ret_code)
 715                return ret_code;
 716        ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
 717        __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
 718        i40e_release_nvm(hw);
 719        if (ret_code)
 720                return ret_code;
 721
 722        /* Verify read checksum from EEPROM is the same as
 723         * calculated checksum
 724         */
 725        if (checksum_local != checksum_sr)
 726                ret_code = I40E_ERR_NVM_CHECKSUM;
 727
 728        /* If the user cares, return the calculated checksum */
 729        if (checksum)
 730                *checksum = checksum_local;
 731
 732        return ret_code;
 733}
 734
 735static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 736                                          struct i40e_nvm_access *cmd,
 737                                          u8 *bytes, int *perrno);
 738static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
 739                                             struct i40e_nvm_access *cmd,
 740                                             u8 *bytes, int *perrno);
 741static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
 742                                             struct i40e_nvm_access *cmd,
 743                                             u8 *bytes, int *errno);
 744static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
 745                                                struct i40e_nvm_access *cmd,
 746                                                int *perrno);
 747static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
 748                                         struct i40e_nvm_access *cmd,
 749                                         int *perrno);
 750static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
 751                                         struct i40e_nvm_access *cmd,
 752                                         u8 *bytes, int *perrno);
 753static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
 754                                        struct i40e_nvm_access *cmd,
 755                                        u8 *bytes, int *perrno);
 756static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
 757                                       struct i40e_nvm_access *cmd,
 758                                       u8 *bytes, int *perrno);
 759static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
 760                                             struct i40e_nvm_access *cmd,
 761                                             u8 *bytes, int *perrno);
 762static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
 763                                            struct i40e_nvm_access *cmd,
 764                                            u8 *bytes, int *perrno);
 765static inline u8 i40e_nvmupd_get_module(u32 val)
 766{
 767        return (u8)(val & I40E_NVM_MOD_PNT_MASK);
 768}
 769static inline u8 i40e_nvmupd_get_transaction(u32 val)
 770{
 771        return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 772}
 773
 774static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
 775{
 776        return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
 777                    I40E_NVM_PRESERVATION_FLAGS_SHIFT);
 778}
 779
 780static const char * const i40e_nvm_update_state_str[] = {
 781        "I40E_NVMUPD_INVALID",
 782        "I40E_NVMUPD_READ_CON",
 783        "I40E_NVMUPD_READ_SNT",
 784        "I40E_NVMUPD_READ_LCB",
 785        "I40E_NVMUPD_READ_SA",
 786        "I40E_NVMUPD_WRITE_ERA",
 787        "I40E_NVMUPD_WRITE_CON",
 788        "I40E_NVMUPD_WRITE_SNT",
 789        "I40E_NVMUPD_WRITE_LCB",
 790        "I40E_NVMUPD_WRITE_SA",
 791        "I40E_NVMUPD_CSUM_CON",
 792        "I40E_NVMUPD_CSUM_SA",
 793        "I40E_NVMUPD_CSUM_LCB",
 794        "I40E_NVMUPD_STATUS",
 795        "I40E_NVMUPD_EXEC_AQ",
 796        "I40E_NVMUPD_GET_AQ_RESULT",
 797        "I40E_NVMUPD_GET_AQ_EVENT",
 798};
 799
 800/**
 801 * i40e_nvmupd_command - Process an NVM update command
 802 * @hw: pointer to hardware structure
 803 * @cmd: pointer to nvm update command
 804 * @bytes: pointer to the data buffer
 805 * @perrno: pointer to return error code
 806 *
 807 * Dispatches command depending on what update state is current
 808 **/
 809i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
 810                                struct i40e_nvm_access *cmd,
 811                                u8 *bytes, int *perrno)
 812{
 813        i40e_status status;
 814        enum i40e_nvmupd_cmd upd_cmd;
 815
 816        /* assume success */
 817        *perrno = 0;
 818
 819        /* early check for status command and debug msgs */
 820        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 821
 822        i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
 823                   i40e_nvm_update_state_str[upd_cmd],
 824                   hw->nvmupd_state,
 825                   hw->nvm_release_on_done, hw->nvm_wait_opcode,
 826                   cmd->command, cmd->config, cmd->offset, cmd->data_size);
 827
 828        if (upd_cmd == I40E_NVMUPD_INVALID) {
 829                *perrno = -EFAULT;
 830                i40e_debug(hw, I40E_DEBUG_NVM,
 831                           "i40e_nvmupd_validate_command returns %d errno %d\n",
 832                           upd_cmd, *perrno);
 833        }
 834
 835        /* a status request returns immediately rather than
 836         * going into the state machine
 837         */
 838        if (upd_cmd == I40E_NVMUPD_STATUS) {
 839                if (!cmd->data_size) {
 840                        *perrno = -EFAULT;
 841                        return I40E_ERR_BUF_TOO_SHORT;
 842                }
 843
 844                bytes[0] = hw->nvmupd_state;
 845
 846                if (cmd->data_size >= 4) {
 847                        bytes[1] = 0;
 848                        *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
 849                }
 850
 851                /* Clear error status on read */
 852                if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
 853                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 854
 855                return 0;
 856        }
 857
 858        /* Clear status even it is not read and log */
 859        if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
 860                i40e_debug(hw, I40E_DEBUG_NVM,
 861                           "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
 862                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 863        }
 864
 865        /* Acquire lock to prevent race condition where adminq_task
 866         * can execute after i40e_nvmupd_nvm_read/write but before state
 867         * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
 868         *
 869         * During NVMUpdate, it is observed that lock could be held for
 870         * ~5ms for most commands. However lock is held for ~60ms for
 871         * NVMUPD_CSUM_LCB command.
 872         */
 873        mutex_lock(&hw->aq.arq_mutex);
 874        switch (hw->nvmupd_state) {
 875        case I40E_NVMUPD_STATE_INIT:
 876                status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
 877                break;
 878
 879        case I40E_NVMUPD_STATE_READING:
 880                status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
 881                break;
 882
 883        case I40E_NVMUPD_STATE_WRITING:
 884                status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
 885                break;
 886
 887        case I40E_NVMUPD_STATE_INIT_WAIT:
 888        case I40E_NVMUPD_STATE_WRITE_WAIT:
 889                /* if we need to stop waiting for an event, clear
 890                 * the wait info and return before doing anything else
 891                 */
 892                if (cmd->offset == 0xffff) {
 893                        i40e_nvmupd_clear_wait_state(hw);
 894                        status = 0;
 895                        break;
 896                }
 897
 898                status = I40E_ERR_NOT_READY;
 899                *perrno = -EBUSY;
 900                break;
 901
 902        default:
 903                /* invalid state, should never happen */
 904                i40e_debug(hw, I40E_DEBUG_NVM,
 905                           "NVMUPD: no such state %d\n", hw->nvmupd_state);
 906                status = I40E_NOT_SUPPORTED;
 907                *perrno = -ESRCH;
 908                break;
 909        }
 910
 911        mutex_unlock(&hw->aq.arq_mutex);
 912        return status;
 913}
 914
 915/**
 916 * i40e_nvmupd_state_init - Handle NVM update state Init
 917 * @hw: pointer to hardware structure
 918 * @cmd: pointer to nvm update command buffer
 919 * @bytes: pointer to the data buffer
 920 * @perrno: pointer to return error code
 921 *
 922 * Process legitimate commands of the Init state and conditionally set next
 923 * state. Reject all other commands.
 924 **/
 925static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
 926                                          struct i40e_nvm_access *cmd,
 927                                          u8 *bytes, int *perrno)
 928{
 929        i40e_status status = 0;
 930        enum i40e_nvmupd_cmd upd_cmd;
 931
 932        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
 933
 934        switch (upd_cmd) {
 935        case I40E_NVMUPD_READ_SA:
 936                status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 937                if (status) {
 938                        *perrno = i40e_aq_rc_to_posix(status,
 939                                                     hw->aq.asq_last_status);
 940                } else {
 941                        status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 942                        i40e_release_nvm(hw);
 943                }
 944                break;
 945
 946        case I40E_NVMUPD_READ_SNT:
 947                status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
 948                if (status) {
 949                        *perrno = i40e_aq_rc_to_posix(status,
 950                                                     hw->aq.asq_last_status);
 951                } else {
 952                        status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
 953                        if (status)
 954                                i40e_release_nvm(hw);
 955                        else
 956                                hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
 957                }
 958                break;
 959
 960        case I40E_NVMUPD_WRITE_ERA:
 961                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 962                if (status) {
 963                        *perrno = i40e_aq_rc_to_posix(status,
 964                                                     hw->aq.asq_last_status);
 965                } else {
 966                        status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
 967                        if (status) {
 968                                i40e_release_nvm(hw);
 969                        } else {
 970                                hw->nvm_release_on_done = true;
 971                                hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
 972                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 973                        }
 974                }
 975                break;
 976
 977        case I40E_NVMUPD_WRITE_SA:
 978                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 979                if (status) {
 980                        *perrno = i40e_aq_rc_to_posix(status,
 981                                                     hw->aq.asq_last_status);
 982                } else {
 983                        status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
 984                        if (status) {
 985                                i40e_release_nvm(hw);
 986                        } else {
 987                                hw->nvm_release_on_done = true;
 988                                hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
 989                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
 990                        }
 991                }
 992                break;
 993
 994        case I40E_NVMUPD_WRITE_SNT:
 995                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
 996                if (status) {
 997                        *perrno = i40e_aq_rc_to_posix(status,
 998                                                     hw->aq.asq_last_status);
 999                } else {
1000                        status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1001                        if (status) {
1002                                i40e_release_nvm(hw);
1003                        } else {
1004                                hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1005                                hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1006                        }
1007                }
1008                break;
1009
1010        case I40E_NVMUPD_CSUM_SA:
1011                status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1012                if (status) {
1013                        *perrno = i40e_aq_rc_to_posix(status,
1014                                                     hw->aq.asq_last_status);
1015                } else {
1016                        status = i40e_update_nvm_checksum(hw);
1017                        if (status) {
1018                                *perrno = hw->aq.asq_last_status ?
1019                                   i40e_aq_rc_to_posix(status,
1020                                                       hw->aq.asq_last_status) :
1021                                   -EIO;
1022                                i40e_release_nvm(hw);
1023                        } else {
1024                                hw->nvm_release_on_done = true;
1025                                hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1026                                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1027                        }
1028                }
1029                break;
1030
1031        case I40E_NVMUPD_EXEC_AQ:
1032                status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1033                break;
1034
1035        case I40E_NVMUPD_GET_AQ_RESULT:
1036                status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1037                break;
1038
1039        case I40E_NVMUPD_GET_AQ_EVENT:
1040                status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1041                break;
1042
1043        default:
1044                i40e_debug(hw, I40E_DEBUG_NVM,
1045                           "NVMUPD: bad cmd %s in init state\n",
1046                           i40e_nvm_update_state_str[upd_cmd]);
1047                status = I40E_ERR_NVM;
1048                *perrno = -ESRCH;
1049                break;
1050        }
1051        return status;
1052}
1053
1054/**
1055 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1056 * @hw: pointer to hardware structure
1057 * @cmd: pointer to nvm update command buffer
1058 * @bytes: pointer to the data buffer
1059 * @perrno: pointer to return error code
1060 *
1061 * NVM ownership is already held.  Process legitimate commands and set any
1062 * change in state; reject all other commands.
1063 **/
1064static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
1065                                             struct i40e_nvm_access *cmd,
1066                                             u8 *bytes, int *perrno)
1067{
1068        i40e_status status = 0;
1069        enum i40e_nvmupd_cmd upd_cmd;
1070
1071        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1072
1073        switch (upd_cmd) {
1074        case I40E_NVMUPD_READ_SA:
1075        case I40E_NVMUPD_READ_CON:
1076                status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1077                break;
1078
1079        case I40E_NVMUPD_READ_LCB:
1080                status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1081                i40e_release_nvm(hw);
1082                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1083                break;
1084
1085        default:
1086                i40e_debug(hw, I40E_DEBUG_NVM,
1087                           "NVMUPD: bad cmd %s in reading state.\n",
1088                           i40e_nvm_update_state_str[upd_cmd]);
1089                status = I40E_NOT_SUPPORTED;
1090                *perrno = -ESRCH;
1091                break;
1092        }
1093        return status;
1094}
1095
1096/**
1097 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1098 * @hw: pointer to hardware structure
1099 * @cmd: pointer to nvm update command buffer
1100 * @bytes: pointer to the data buffer
1101 * @perrno: pointer to return error code
1102 *
1103 * NVM ownership is already held.  Process legitimate commands and set any
1104 * change in state; reject all other commands
1105 **/
1106static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
1107                                             struct i40e_nvm_access *cmd,
1108                                             u8 *bytes, int *perrno)
1109{
1110        i40e_status status = 0;
1111        enum i40e_nvmupd_cmd upd_cmd;
1112        bool retry_attempt = false;
1113
1114        upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1115
1116retry:
1117        switch (upd_cmd) {
1118        case I40E_NVMUPD_WRITE_CON:
1119                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1120                if (!status) {
1121                        hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1122                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1123                }
1124                break;
1125
1126        case I40E_NVMUPD_WRITE_LCB:
1127                status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1128                if (status) {
1129                        *perrno = hw->aq.asq_last_status ?
1130                                   i40e_aq_rc_to_posix(status,
1131                                                       hw->aq.asq_last_status) :
1132                                   -EIO;
1133                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1134                } else {
1135                        hw->nvm_release_on_done = true;
1136                        hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1137                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1138                }
1139                break;
1140
1141        case I40E_NVMUPD_CSUM_CON:
1142                /* Assumes the caller has acquired the nvm */
1143                status = i40e_update_nvm_checksum(hw);
1144                if (status) {
1145                        *perrno = hw->aq.asq_last_status ?
1146                                   i40e_aq_rc_to_posix(status,
1147                                                       hw->aq.asq_last_status) :
1148                                   -EIO;
1149                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1150                } else {
1151                        hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1152                        hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1153                }
1154                break;
1155
1156        case I40E_NVMUPD_CSUM_LCB:
1157                /* Assumes the caller has acquired the nvm */
1158                status = i40e_update_nvm_checksum(hw);
1159                if (status) {
1160                        *perrno = hw->aq.asq_last_status ?
1161                                   i40e_aq_rc_to_posix(status,
1162                                                       hw->aq.asq_last_status) :
1163                                   -EIO;
1164                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1165                } else {
1166                        hw->nvm_release_on_done = true;
1167                        hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1168                        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1169                }
1170                break;
1171
1172        default:
1173                i40e_debug(hw, I40E_DEBUG_NVM,
1174                           "NVMUPD: bad cmd %s in writing state.\n",
1175                           i40e_nvm_update_state_str[upd_cmd]);
1176                status = I40E_NOT_SUPPORTED;
1177                *perrno = -ESRCH;
1178                break;
1179        }
1180
1181        /* In some circumstances, a multi-write transaction takes longer
1182         * than the default 3 minute timeout on the write semaphore.  If
1183         * the write failed with an EBUSY status, this is likely the problem,
1184         * so here we try to reacquire the semaphore then retry the write.
1185         * We only do one retry, then give up.
1186         */
1187        if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1188            !retry_attempt) {
1189                i40e_status old_status = status;
1190                u32 old_asq_status = hw->aq.asq_last_status;
1191                u32 gtime;
1192
1193                gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1194                if (gtime >= hw->nvm.hw_semaphore_timeout) {
1195                        i40e_debug(hw, I40E_DEBUG_ALL,
1196                                   "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1197                                   gtime, hw->nvm.hw_semaphore_timeout);
1198                        i40e_release_nvm(hw);
1199                        status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1200                        if (status) {
1201                                i40e_debug(hw, I40E_DEBUG_ALL,
1202                                           "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1203                                           hw->aq.asq_last_status);
1204                                status = old_status;
1205                                hw->aq.asq_last_status = old_asq_status;
1206                        } else {
1207                                retry_attempt = true;
1208                                goto retry;
1209                        }
1210                }
1211        }
1212
1213        return status;
1214}
1215
1216/**
1217 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1218 * @hw: pointer to the hardware structure
1219 **/
1220void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1221{
1222        i40e_debug(hw, I40E_DEBUG_NVM,
1223                   "NVMUPD: clearing wait on opcode 0x%04x\n",
1224                   hw->nvm_wait_opcode);
1225
1226        if (hw->nvm_release_on_done) {
1227                i40e_release_nvm(hw);
1228                hw->nvm_release_on_done = false;
1229        }
1230        hw->nvm_wait_opcode = 0;
1231
1232        if (hw->aq.arq_last_status) {
1233                hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1234                return;
1235        }
1236
1237        switch (hw->nvmupd_state) {
1238        case I40E_NVMUPD_STATE_INIT_WAIT:
1239                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1240                break;
1241
1242        case I40E_NVMUPD_STATE_WRITE_WAIT:
1243                hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1244                break;
1245
1246        default:
1247                break;
1248        }
1249}
1250
1251/**
1252 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1253 * @hw: pointer to the hardware structure
1254 * @opcode: the event that just happened
1255 * @desc: AdminQ descriptor
1256 **/
1257void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1258                                  struct i40e_aq_desc *desc)
1259{
1260        u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1261
1262        if (opcode == hw->nvm_wait_opcode) {
1263                memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
1264                i40e_nvmupd_clear_wait_state(hw);
1265        }
1266}
1267
1268/**
1269 * i40e_nvmupd_validate_command - Validate given command
1270 * @hw: pointer to hardware structure
1271 * @cmd: pointer to nvm update command buffer
1272 * @perrno: pointer to return error code
1273 *
1274 * Return one of the valid command types or I40E_NVMUPD_INVALID
1275 **/
1276static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1277                                                 struct i40e_nvm_access *cmd,
1278                                                 int *perrno)
1279{
1280        enum i40e_nvmupd_cmd upd_cmd;
1281        u8 module, transaction;
1282
1283        /* anything that doesn't match a recognized case is an error */
1284        upd_cmd = I40E_NVMUPD_INVALID;
1285
1286        transaction = i40e_nvmupd_get_transaction(cmd->config);
1287        module = i40e_nvmupd_get_module(cmd->config);
1288
1289        /* limits on data size */
1290        if ((cmd->data_size < 1) ||
1291            (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1292                i40e_debug(hw, I40E_DEBUG_NVM,
1293                           "i40e_nvmupd_validate_command data_size %d\n",
1294                           cmd->data_size);
1295                *perrno = -EFAULT;
1296                return I40E_NVMUPD_INVALID;
1297        }
1298
1299        switch (cmd->command) {
1300        case I40E_NVM_READ:
1301                switch (transaction) {
1302                case I40E_NVM_CON:
1303                        upd_cmd = I40E_NVMUPD_READ_CON;
1304                        break;
1305                case I40E_NVM_SNT:
1306                        upd_cmd = I40E_NVMUPD_READ_SNT;
1307                        break;
1308                case I40E_NVM_LCB:
1309                        upd_cmd = I40E_NVMUPD_READ_LCB;
1310                        break;
1311                case I40E_NVM_SA:
1312                        upd_cmd = I40E_NVMUPD_READ_SA;
1313                        break;
1314                case I40E_NVM_EXEC:
1315                        if (module == 0xf)
1316                                upd_cmd = I40E_NVMUPD_STATUS;
1317                        else if (module == 0)
1318                                upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1319                        break;
1320                case I40E_NVM_AQE:
1321                        upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1322                        break;
1323                }
1324                break;
1325
1326        case I40E_NVM_WRITE:
1327                switch (transaction) {
1328                case I40E_NVM_CON:
1329                        upd_cmd = I40E_NVMUPD_WRITE_CON;
1330                        break;
1331                case I40E_NVM_SNT:
1332                        upd_cmd = I40E_NVMUPD_WRITE_SNT;
1333                        break;
1334                case I40E_NVM_LCB:
1335                        upd_cmd = I40E_NVMUPD_WRITE_LCB;
1336                        break;
1337                case I40E_NVM_SA:
1338                        upd_cmd = I40E_NVMUPD_WRITE_SA;
1339                        break;
1340                case I40E_NVM_ERA:
1341                        upd_cmd = I40E_NVMUPD_WRITE_ERA;
1342                        break;
1343                case I40E_NVM_CSUM:
1344                        upd_cmd = I40E_NVMUPD_CSUM_CON;
1345                        break;
1346                case (I40E_NVM_CSUM|I40E_NVM_SA):
1347                        upd_cmd = I40E_NVMUPD_CSUM_SA;
1348                        break;
1349                case (I40E_NVM_CSUM|I40E_NVM_LCB):
1350                        upd_cmd = I40E_NVMUPD_CSUM_LCB;
1351                        break;
1352                case I40E_NVM_EXEC:
1353                        if (module == 0)
1354                                upd_cmd = I40E_NVMUPD_EXEC_AQ;
1355                        break;
1356                }
1357                break;
1358        }
1359
1360        return upd_cmd;
1361}
1362
1363/**
1364 * i40e_nvmupd_exec_aq - Run an AQ command
1365 * @hw: pointer to hardware structure
1366 * @cmd: pointer to nvm update command buffer
1367 * @bytes: pointer to the data buffer
1368 * @perrno: pointer to return error code
1369 *
1370 * cmd structure contains identifiers and data buffer
1371 **/
1372static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1373                                       struct i40e_nvm_access *cmd,
1374                                       u8 *bytes, int *perrno)
1375{
1376        struct i40e_asq_cmd_details cmd_details;
1377        i40e_status status;
1378        struct i40e_aq_desc *aq_desc;
1379        u32 buff_size = 0;
1380        u8 *buff = NULL;
1381        u32 aq_desc_len;
1382        u32 aq_data_len;
1383
1384        i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1385        if (cmd->offset == 0xffff)
1386                return 0;
1387
1388        memset(&cmd_details, 0, sizeof(cmd_details));
1389        cmd_details.wb_desc = &hw->nvm_wb_desc;
1390
1391        aq_desc_len = sizeof(struct i40e_aq_desc);
1392        memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1393
1394        /* get the aq descriptor */
1395        if (cmd->data_size < aq_desc_len) {
1396                i40e_debug(hw, I40E_DEBUG_NVM,
1397                           "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1398                           cmd->data_size, aq_desc_len);
1399                *perrno = -EINVAL;
1400                return I40E_ERR_PARAM;
1401        }
1402        aq_desc = (struct i40e_aq_desc *)bytes;
1403
1404        /* if data buffer needed, make sure it's ready */
1405        aq_data_len = cmd->data_size - aq_desc_len;
1406        buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
1407        if (buff_size) {
1408                if (!hw->nvm_buff.va) {
1409                        status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1410                                                        hw->aq.asq_buf_size);
1411                        if (status)
1412                                i40e_debug(hw, I40E_DEBUG_NVM,
1413                                           "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1414                                           status);
1415                }
1416
1417                if (hw->nvm_buff.va) {
1418                        buff = hw->nvm_buff.va;
1419                        memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1420                }
1421        }
1422
1423        if (cmd->offset)
1424                memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1425
1426        /* and away we go! */
1427        status = i40e_asq_send_command(hw, aq_desc, buff,
1428                                       buff_size, &cmd_details);
1429        if (status) {
1430                i40e_debug(hw, I40E_DEBUG_NVM,
1431                           "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1432                           i40e_stat_str(hw, status),
1433                           i40e_aq_str(hw, hw->aq.asq_last_status));
1434                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1435                return status;
1436        }
1437
1438        /* should we wait for a followup event? */
1439        if (cmd->offset) {
1440                hw->nvm_wait_opcode = cmd->offset;
1441                hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1442        }
1443
1444        return status;
1445}
1446
1447/**
1448 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1449 * @hw: pointer to hardware structure
1450 * @cmd: pointer to nvm update command buffer
1451 * @bytes: pointer to the data buffer
1452 * @perrno: pointer to return error code
1453 *
1454 * cmd structure contains identifiers and data buffer
1455 **/
1456static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1457                                             struct i40e_nvm_access *cmd,
1458                                             u8 *bytes, int *perrno)
1459{
1460        u32 aq_total_len;
1461        u32 aq_desc_len;
1462        int remainder;
1463        u8 *buff;
1464
1465        i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1466
1467        aq_desc_len = sizeof(struct i40e_aq_desc);
1468        aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
1469
1470        /* check offset range */
1471        if (cmd->offset > aq_total_len) {
1472                i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1473                           __func__, cmd->offset, aq_total_len);
1474                *perrno = -EINVAL;
1475                return I40E_ERR_PARAM;
1476        }
1477
1478        /* check copylength range */
1479        if (cmd->data_size > (aq_total_len - cmd->offset)) {
1480                int new_len = aq_total_len - cmd->offset;
1481
1482                i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1483                           __func__, cmd->data_size, new_len);
1484                cmd->data_size = new_len;
1485        }
1486
1487        remainder = cmd->data_size;
1488        if (cmd->offset < aq_desc_len) {
1489                u32 len = aq_desc_len - cmd->offset;
1490
1491                len = min(len, cmd->data_size);
1492                i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1493                           __func__, cmd->offset, cmd->offset + len);
1494
1495                buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1496                memcpy(bytes, buff, len);
1497
1498                bytes += len;
1499                remainder -= len;
1500                buff = hw->nvm_buff.va;
1501        } else {
1502                buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1503        }
1504
1505        if (remainder > 0) {
1506                int start_byte = buff - (u8 *)hw->nvm_buff.va;
1507
1508                i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1509                           __func__, start_byte, start_byte + remainder);
1510                memcpy(bytes, buff, remainder);
1511        }
1512
1513        return 0;
1514}
1515
1516/**
1517 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1518 * @hw: pointer to hardware structure
1519 * @cmd: pointer to nvm update command buffer
1520 * @bytes: pointer to the data buffer
1521 * @perrno: pointer to return error code
1522 *
1523 * cmd structure contains identifiers and data buffer
1524 **/
1525static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1526                                            struct i40e_nvm_access *cmd,
1527                                            u8 *bytes, int *perrno)
1528{
1529        u32 aq_total_len;
1530        u32 aq_desc_len;
1531
1532        i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1533
1534        aq_desc_len = sizeof(struct i40e_aq_desc);
1535        aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
1536
1537        /* check copylength range */
1538        if (cmd->data_size > aq_total_len) {
1539                i40e_debug(hw, I40E_DEBUG_NVM,
1540                           "%s: copy length %d too big, trimming to %d\n",
1541                           __func__, cmd->data_size, aq_total_len);
1542                cmd->data_size = aq_total_len;
1543        }
1544
1545        memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
1546
1547        return 0;
1548}
1549
1550/**
1551 * i40e_nvmupd_nvm_read - Read NVM
1552 * @hw: pointer to hardware structure
1553 * @cmd: pointer to nvm update command buffer
1554 * @bytes: pointer to the data buffer
1555 * @perrno: pointer to return error code
1556 *
1557 * cmd structure contains identifiers and data buffer
1558 **/
1559static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1560                                        struct i40e_nvm_access *cmd,
1561                                        u8 *bytes, int *perrno)
1562{
1563        struct i40e_asq_cmd_details cmd_details;
1564        i40e_status status;
1565        u8 module, transaction;
1566        bool last;
1567
1568        transaction = i40e_nvmupd_get_transaction(cmd->config);
1569        module = i40e_nvmupd_get_module(cmd->config);
1570        last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1571
1572        memset(&cmd_details, 0, sizeof(cmd_details));
1573        cmd_details.wb_desc = &hw->nvm_wb_desc;
1574
1575        status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1576                                  bytes, last, &cmd_details);
1577        if (status) {
1578                i40e_debug(hw, I40E_DEBUG_NVM,
1579                           "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1580                           module, cmd->offset, cmd->data_size);
1581                i40e_debug(hw, I40E_DEBUG_NVM,
1582                           "i40e_nvmupd_nvm_read status %d aq %d\n",
1583                           status, hw->aq.asq_last_status);
1584                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1585        }
1586
1587        return status;
1588}
1589
1590/**
1591 * i40e_nvmupd_nvm_erase - Erase an NVM module
1592 * @hw: pointer to hardware structure
1593 * @cmd: pointer to nvm update command buffer
1594 * @perrno: pointer to return error code
1595 *
1596 * module, offset, data_size and data are in cmd structure
1597 **/
1598static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1599                                         struct i40e_nvm_access *cmd,
1600                                         int *perrno)
1601{
1602        i40e_status status = 0;
1603        struct i40e_asq_cmd_details cmd_details;
1604        u8 module, transaction;
1605        bool last;
1606
1607        transaction = i40e_nvmupd_get_transaction(cmd->config);
1608        module = i40e_nvmupd_get_module(cmd->config);
1609        last = (transaction & I40E_NVM_LCB);
1610
1611        memset(&cmd_details, 0, sizeof(cmd_details));
1612        cmd_details.wb_desc = &hw->nvm_wb_desc;
1613
1614        status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1615                                   last, &cmd_details);
1616        if (status) {
1617                i40e_debug(hw, I40E_DEBUG_NVM,
1618                           "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1619                           module, cmd->offset, cmd->data_size);
1620                i40e_debug(hw, I40E_DEBUG_NVM,
1621                           "i40e_nvmupd_nvm_erase status %d aq %d\n",
1622                           status, hw->aq.asq_last_status);
1623                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1624        }
1625
1626        return status;
1627}
1628
1629/**
1630 * i40e_nvmupd_nvm_write - Write NVM
1631 * @hw: pointer to hardware structure
1632 * @cmd: pointer to nvm update command buffer
1633 * @bytes: pointer to the data buffer
1634 * @perrno: pointer to return error code
1635 *
1636 * module, offset, data_size and data are in cmd structure
1637 **/
1638static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1639                                         struct i40e_nvm_access *cmd,
1640                                         u8 *bytes, int *perrno)
1641{
1642        i40e_status status = 0;
1643        struct i40e_asq_cmd_details cmd_details;
1644        u8 module, transaction;
1645        u8 preservation_flags;
1646        bool last;
1647
1648        transaction = i40e_nvmupd_get_transaction(cmd->config);
1649        module = i40e_nvmupd_get_module(cmd->config);
1650        last = (transaction & I40E_NVM_LCB);
1651        preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1652
1653        memset(&cmd_details, 0, sizeof(cmd_details));
1654        cmd_details.wb_desc = &hw->nvm_wb_desc;
1655
1656        status = i40e_aq_update_nvm(hw, module, cmd->offset,
1657                                    (u16)cmd->data_size, bytes, last,
1658                                    preservation_flags, &cmd_details);
1659        if (status) {
1660                i40e_debug(hw, I40E_DEBUG_NVM,
1661                           "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1662                           module, cmd->offset, cmd->data_size);
1663                i40e_debug(hw, I40E_DEBUG_NVM,
1664                           "i40e_nvmupd_nvm_write status %d aq %d\n",
1665                           status, hw->aq.asq_last_status);
1666                *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1667        }
1668
1669        return status;
1670}
1671