linux/drivers/net/ethernet/intel/ice/ice_nvm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_common.h"
   5
   6/**
   7 * ice_aq_read_nvm
   8 * @hw: pointer to the HW struct
   9 * @module_typeid: module pointer location in words from the NVM beginning
  10 * @offset: byte offset from the module beginning
  11 * @length: length of the section to be read (in bytes from the offset)
  12 * @data: command buffer (size [bytes] = length)
  13 * @last_command: tells if this is the last command in a series
  14 * @read_shadow_ram: tell if this is a shadow RAM read
  15 * @cd: pointer to command details structure or NULL
  16 *
  17 * Read the NVM using the admin queue commands (0x0701)
  18 */
  19static enum ice_status
  20ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
  21                void *data, bool last_command, bool read_shadow_ram,
  22                struct ice_sq_cd *cd)
  23{
  24        struct ice_aq_desc desc;
  25        struct ice_aqc_nvm *cmd;
  26
  27        cmd = &desc.params.nvm;
  28
  29        if (offset > ICE_AQC_NVM_MAX_OFFSET)
  30                return ICE_ERR_PARAM;
  31
  32        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
  33
  34        if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
  35                cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
  36
  37        /* If this is the last command in a series, set the proper flag. */
  38        if (last_command)
  39                cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
  40        cmd->module_typeid = cpu_to_le16(module_typeid);
  41        cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
  42        cmd->offset_high = (offset >> 16) & 0xFF;
  43        cmd->length = cpu_to_le16(length);
  44
  45        return ice_aq_send_cmd(hw, &desc, data, length, cd);
  46}
  47
  48/**
  49 * ice_read_flat_nvm - Read portion of NVM by flat offset
  50 * @hw: pointer to the HW struct
  51 * @offset: offset from beginning of NVM
  52 * @length: (in) number of bytes to read; (out) number of bytes actually read
  53 * @data: buffer to return data in (sized to fit the specified length)
  54 * @read_shadow_ram: if true, read from shadow RAM instead of NVM
  55 *
  56 * Reads a portion of the NVM, as a flat memory space. This function correctly
  57 * breaks read requests across Shadow RAM sectors and ensures that no single
  58 * read request exceeds the maximum 4Kb read for a single AdminQ command.
  59 *
  60 * Returns a status code on failure. Note that the data pointer may be
  61 * partially updated if some reads succeed before a failure.
  62 */
  63enum ice_status
  64ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
  65                  bool read_shadow_ram)
  66{
  67        enum ice_status status;
  68        u32 inlen = *length;
  69        u32 bytes_read = 0;
  70        bool last_cmd;
  71
  72        *length = 0;
  73
  74        /* Verify the length of the read if this is for the Shadow RAM */
  75        if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
  76                ice_debug(hw, ICE_DBG_NVM,
  77                          "NVM error: requested offset is beyond Shadow RAM limit\n");
  78                return ICE_ERR_PARAM;
  79        }
  80
  81        do {
  82                u32 read_size, sector_offset;
  83
  84                /* ice_aq_read_nvm cannot read more than 4Kb at a time.
  85                 * Additionally, a read from the Shadow RAM may not cross over
  86                 * a sector boundary. Conveniently, the sector size is also
  87                 * 4Kb.
  88                 */
  89                sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
  90                read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
  91                                  inlen - bytes_read);
  92
  93                last_cmd = !(bytes_read + read_size < inlen);
  94
  95                status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
  96                                         offset, read_size,
  97                                         data + bytes_read, last_cmd,
  98                                         read_shadow_ram, NULL);
  99                if (status)
 100                        break;
 101
 102                bytes_read += read_size;
 103                offset += read_size;
 104        } while (!last_cmd);
 105
 106        *length = bytes_read;
 107        return status;
 108}
 109
 110/**
 111 * ice_aq_update_nvm
 112 * @hw: pointer to the HW struct
 113 * @module_typeid: module pointer location in words from the NVM beginning
 114 * @offset: byte offset from the module beginning
 115 * @length: length of the section to be written (in bytes from the offset)
 116 * @data: command buffer (size [bytes] = length)
 117 * @last_command: tells if this is the last command in a series
 118 * @command_flags: command parameters
 119 * @cd: pointer to command details structure or NULL
 120 *
 121 * Update the NVM using the admin queue commands (0x0703)
 122 */
 123enum ice_status
 124ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
 125                  u16 length, void *data, bool last_command, u8 command_flags,
 126                  struct ice_sq_cd *cd)
 127{
 128        struct ice_aq_desc desc;
 129        struct ice_aqc_nvm *cmd;
 130
 131        cmd = &desc.params.nvm;
 132
 133        /* In offset the highest byte must be zeroed. */
 134        if (offset & 0xFF000000)
 135                return ICE_ERR_PARAM;
 136
 137        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
 138
 139        cmd->cmd_flags |= command_flags;
 140
 141        /* If this is the last command in a series, set the proper flag. */
 142        if (last_command)
 143                cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
 144        cmd->module_typeid = cpu_to_le16(module_typeid);
 145        cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
 146        cmd->offset_high = (offset >> 16) & 0xFF;
 147        cmd->length = cpu_to_le16(length);
 148
 149        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 150
 151        return ice_aq_send_cmd(hw, &desc, data, length, cd);
 152}
 153
 154/**
 155 * ice_aq_erase_nvm
 156 * @hw: pointer to the HW struct
 157 * @module_typeid: module pointer location in words from the NVM beginning
 158 * @cd: pointer to command details structure or NULL
 159 *
 160 * Erase the NVM sector using the admin queue commands (0x0702)
 161 */
 162enum ice_status
 163ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
 164{
 165        struct ice_aq_desc desc;
 166        struct ice_aqc_nvm *cmd;
 167
 168        cmd = &desc.params.nvm;
 169
 170        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
 171
 172        cmd->module_typeid = cpu_to_le16(module_typeid);
 173        cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
 174        cmd->offset_low = 0;
 175        cmd->offset_high = 0;
 176
 177        return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
 178}
 179
 180/**
 181 * ice_read_sr_word_aq - Reads Shadow RAM via AQ
 182 * @hw: pointer to the HW structure
 183 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 184 * @data: word read from the Shadow RAM
 185 *
 186 * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
 187 */
 188static enum ice_status
 189ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
 190{
 191        u32 bytes = sizeof(u16);
 192        enum ice_status status;
 193        __le16 data_local;
 194
 195        /* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
 196         * Shadow RAM sector restrictions necessary when reading from the NVM.
 197         */
 198        status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
 199                                   (u8 *)&data_local, true);
 200        if (status)
 201                return status;
 202
 203        *data = le16_to_cpu(data_local);
 204        return 0;
 205}
 206
 207/**
 208 * ice_acquire_nvm - Generic request for acquiring the NVM ownership
 209 * @hw: pointer to the HW structure
 210 * @access: NVM access type (read or write)
 211 *
 212 * This function will request NVM ownership.
 213 */
 214enum ice_status
 215ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
 216{
 217        if (hw->nvm.blank_nvm_mode)
 218                return 0;
 219
 220        return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
 221}
 222
 223/**
 224 * ice_release_nvm - Generic request for releasing the NVM ownership
 225 * @hw: pointer to the HW structure
 226 *
 227 * This function will release NVM ownership.
 228 */
 229void ice_release_nvm(struct ice_hw *hw)
 230{
 231        if (hw->nvm.blank_nvm_mode)
 232                return;
 233
 234        ice_release_res(hw, ICE_NVM_RES_ID);
 235}
 236
 237/**
 238 * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
 239 * @hw: pointer to the HW structure
 240 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
 241 * @data: word read from the Shadow RAM
 242 *
 243 * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
 244 */
 245enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
 246{
 247        enum ice_status status;
 248
 249        status = ice_acquire_nvm(hw, ICE_RES_READ);
 250        if (!status) {
 251                status = ice_read_sr_word_aq(hw, offset, data);
 252                ice_release_nvm(hw);
 253        }
 254
 255        return status;
 256}
 257
 258/**
 259 * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
 260 * @hw: pointer to hardware structure
 261 * @module_tlv: pointer to module TLV to return
 262 * @module_tlv_len: pointer to module TLV length to return
 263 * @module_type: module type requested
 264 *
 265 * Finds the requested sub module TLV type from the Preserved Field
 266 * Area (PFA) and returns the TLV pointer and length. The caller can
 267 * use these to read the variable length TLV value.
 268 */
 269enum ice_status
 270ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
 271                       u16 module_type)
 272{
 273        enum ice_status status;
 274        u16 pfa_len, pfa_ptr;
 275        u16 next_tlv;
 276
 277        status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
 278        if (status) {
 279                ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
 280                return status;
 281        }
 282        status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
 283        if (status) {
 284                ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
 285                return status;
 286        }
 287        /* Starting with first TLV after PFA length, iterate through the list
 288         * of TLVs to find the requested one.
 289         */
 290        next_tlv = pfa_ptr + 1;
 291        while (next_tlv < pfa_ptr + pfa_len) {
 292                u16 tlv_sub_module_type;
 293                u16 tlv_len;
 294
 295                /* Read TLV type */
 296                status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
 297                if (status) {
 298                        ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
 299                        break;
 300                }
 301                /* Read TLV length */
 302                status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
 303                if (status) {
 304                        ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
 305                        break;
 306                }
 307                if (tlv_sub_module_type == module_type) {
 308                        if (tlv_len) {
 309                                *module_tlv = next_tlv;
 310                                *module_tlv_len = tlv_len;
 311                                return 0;
 312                        }
 313                        return ICE_ERR_INVAL_SIZE;
 314                }
 315                /* Check next TLV, i.e. current TLV pointer + length + 2 words
 316                 * (for current TLV's type and length)
 317                 */
 318                next_tlv = next_tlv + tlv_len + 2;
 319        }
 320        /* Module does not exist */
 321        return ICE_ERR_DOES_NOT_EXIST;
 322}
 323
 324/**
 325 * ice_read_pba_string - Reads part number string from NVM
 326 * @hw: pointer to hardware structure
 327 * @pba_num: stores the part number string from the NVM
 328 * @pba_num_size: part number string buffer length
 329 *
 330 * Reads the part number string from the NVM.
 331 */
 332enum ice_status
 333ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
 334{
 335        u16 pba_tlv, pba_tlv_len;
 336        enum ice_status status;
 337        u16 pba_word, pba_size;
 338        u16 i;
 339
 340        status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
 341                                        ICE_SR_PBA_BLOCK_PTR);
 342        if (status) {
 343                ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
 344                return status;
 345        }
 346
 347        /* pba_size is the next word */
 348        status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
 349        if (status) {
 350                ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
 351                return status;
 352        }
 353
 354        if (pba_tlv_len < pba_size) {
 355                ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
 356                return ICE_ERR_INVAL_SIZE;
 357        }
 358
 359        /* Subtract one to get PBA word count (PBA Size word is included in
 360         * total size)
 361         */
 362        pba_size--;
 363        if (pba_num_size < (((u32)pba_size * 2) + 1)) {
 364                ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
 365                return ICE_ERR_PARAM;
 366        }
 367
 368        for (i = 0; i < pba_size; i++) {
 369                status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
 370                if (status) {
 371                        ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
 372                        return status;
 373                }
 374
 375                pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
 376                pba_num[(i * 2) + 1] = pba_word & 0xFF;
 377        }
 378        pba_num[(pba_size * 2)] = '\0';
 379
 380        return status;
 381}
 382
 383/**
 384 * ice_get_orom_ver_info - Read Option ROM version information
 385 * @hw: pointer to the HW struct
 386 *
 387 * Read the Combo Image version data from the Boot Configuration TLV and fill
 388 * in the option ROM version data.
 389 */
 390static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
 391{
 392        u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
 393        struct ice_orom_info *orom = &hw->nvm.orom;
 394        enum ice_status status;
 395        u32 combo_ver;
 396
 397        status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
 398                                        ICE_SR_BOOT_CFG_PTR);
 399        if (status) {
 400                ice_debug(hw, ICE_DBG_INIT,
 401                          "Failed to read Boot Configuration Block TLV.\n");
 402                return status;
 403        }
 404
 405        /* Boot Configuration Block must have length at least 2 words
 406         * (Combo Image Version High and Combo Image Version Low)
 407         */
 408        if (boot_cfg_tlv_len < 2) {
 409                ice_debug(hw, ICE_DBG_INIT,
 410                          "Invalid Boot Configuration Block TLV size.\n");
 411                return ICE_ERR_INVAL_SIZE;
 412        }
 413
 414        status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
 415                                  &combo_hi);
 416        if (status) {
 417                ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
 418                return status;
 419        }
 420
 421        status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
 422                                  &combo_lo);
 423        if (status) {
 424                ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
 425                return status;
 426        }
 427
 428        combo_ver = ((u32)combo_hi << 16) | combo_lo;
 429
 430        orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
 431                           ICE_OROM_VER_SHIFT);
 432        orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
 433        orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
 434                            ICE_OROM_VER_BUILD_SHIFT);
 435
 436        return 0;
 437}
 438
 439/**
 440 * ice_get_netlist_ver_info
 441 * @hw: pointer to the HW struct
 442 *
 443 * Get the netlist version information
 444 */
 445static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
 446{
 447        struct ice_netlist_ver_info *ver = &hw->netlist_ver;
 448        enum ice_status ret;
 449        u32 id_blk_start;
 450        __le16 raw_data;
 451        u16 data, i;
 452        u16 *buff;
 453
 454        ret = ice_acquire_nvm(hw, ICE_RES_READ);
 455        if (ret)
 456                return ret;
 457        buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
 458                       GFP_KERNEL);
 459        if (!buff) {
 460                ret = ICE_ERR_NO_MEMORY;
 461                goto exit_no_mem;
 462        }
 463
 464        /* read module length */
 465        ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
 466                              ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
 467                              ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
 468                              false, false, NULL);
 469        if (ret)
 470                goto exit_error;
 471
 472        data = le16_to_cpu(raw_data);
 473        /* exit if length is = 0 */
 474        if (!data)
 475                goto exit_error;
 476
 477        /* read node count */
 478        ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
 479                              ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
 480                              ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
 481                              false, false, NULL);
 482        if (ret)
 483                goto exit_error;
 484        data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
 485
 486        /* netlist ID block starts from offset 4 + node count * 2 */
 487        id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
 488
 489        /* read the entire netlist ID block */
 490        ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
 491                              id_blk_start * 2,
 492                              ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
 493                              false, NULL);
 494        if (ret)
 495                goto exit_error;
 496
 497        for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
 498                buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
 499
 500        ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
 501                buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
 502        ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
 503                buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
 504        ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
 505                buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
 506        ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
 507                buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
 508        ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
 509        /* Read the left most 4 bytes of SHA */
 510        ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
 511                buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
 512
 513exit_error:
 514        kfree(buff);
 515exit_no_mem:
 516        ice_release_nvm(hw);
 517        return ret;
 518}
 519
 520/**
 521 * ice_discover_flash_size - Discover the available flash size.
 522 * @hw: pointer to the HW struct
 523 *
 524 * The device flash could be up to 16MB in size. However, it is possible that
 525 * the actual size is smaller. Use bisection to determine the accessible size
 526 * of flash memory.
 527 */
 528static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
 529{
 530        u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
 531        enum ice_status status;
 532
 533        status = ice_acquire_nvm(hw, ICE_RES_READ);
 534        if (status)
 535                return status;
 536
 537        while ((max_size - min_size) > 1) {
 538                u32 offset = (max_size + min_size) / 2;
 539                u32 len = 1;
 540                u8 data;
 541
 542                status = ice_read_flat_nvm(hw, offset, &len, &data, false);
 543                if (status == ICE_ERR_AQ_ERROR &&
 544                    hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
 545                        ice_debug(hw, ICE_DBG_NVM,
 546                                  "%s: New upper bound of %u bytes\n",
 547                                  __func__, offset);
 548                        status = 0;
 549                        max_size = offset;
 550                } else if (!status) {
 551                        ice_debug(hw, ICE_DBG_NVM,
 552                                  "%s: New lower bound of %u bytes\n",
 553                                  __func__, offset);
 554                        min_size = offset;
 555                } else {
 556                        /* an unexpected error occurred */
 557                        goto err_read_flat_nvm;
 558                }
 559        }
 560
 561        ice_debug(hw, ICE_DBG_NVM,
 562                  "Predicted flash size is %u bytes\n", max_size);
 563
 564        hw->nvm.flash_size = max_size;
 565
 566err_read_flat_nvm:
 567        ice_release_nvm(hw);
 568
 569        return status;
 570}
 571
 572/**
 573 * ice_init_nvm - initializes NVM setting
 574 * @hw: pointer to the HW struct
 575 *
 576 * This function reads and populates NVM settings such as Shadow RAM size,
 577 * max_timeout, and blank_nvm_mode
 578 */
 579enum ice_status ice_init_nvm(struct ice_hw *hw)
 580{
 581        struct ice_nvm_info *nvm = &hw->nvm;
 582        u16 eetrack_lo, eetrack_hi, ver;
 583        enum ice_status status;
 584        u32 fla, gens_stat;
 585        u8 sr_size;
 586
 587        /* The SR size is stored regardless of the NVM programming mode
 588         * as the blank mode may be used in the factory line.
 589         */
 590        gens_stat = rd32(hw, GLNVM_GENS);
 591        sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
 592
 593        /* Switching to words (sr_size contains power of 2) */
 594        nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
 595
 596        /* Check if we are in the normal or blank NVM programming mode */
 597        fla = rd32(hw, GLNVM_FLA);
 598        if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
 599                nvm->blank_nvm_mode = false;
 600        } else {
 601                /* Blank programming mode */
 602                nvm->blank_nvm_mode = true;
 603                ice_debug(hw, ICE_DBG_NVM,
 604                          "NVM init error: unsupported blank mode.\n");
 605                return ICE_ERR_NVM_BLANK_MODE;
 606        }
 607
 608        status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
 609        if (status) {
 610                ice_debug(hw, ICE_DBG_INIT,
 611                          "Failed to read DEV starter version.\n");
 612                return status;
 613        }
 614        nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
 615        nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
 616
 617        status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
 618        if (status) {
 619                ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
 620                return status;
 621        }
 622        status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
 623        if (status) {
 624                ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
 625                return status;
 626        }
 627
 628        nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
 629
 630        status = ice_discover_flash_size(hw);
 631        if (status) {
 632                ice_debug(hw, ICE_DBG_NVM,
 633                          "NVM init error: failed to discover flash size.\n");
 634                return status;
 635        }
 636
 637        switch (hw->device_id) {
 638        /* the following devices do not have boot_cfg_tlv yet */
 639        case ICE_DEV_ID_E823C_BACKPLANE:
 640        case ICE_DEV_ID_E823C_QSFP:
 641        case ICE_DEV_ID_E823C_SFP:
 642        case ICE_DEV_ID_E823C_10G_BASE_T:
 643        case ICE_DEV_ID_E823C_SGMII:
 644        case ICE_DEV_ID_E822C_BACKPLANE:
 645        case ICE_DEV_ID_E822C_QSFP:
 646        case ICE_DEV_ID_E822C_10G_BASE_T:
 647        case ICE_DEV_ID_E822C_SGMII:
 648        case ICE_DEV_ID_E822C_SFP:
 649        case ICE_DEV_ID_E822L_BACKPLANE:
 650        case ICE_DEV_ID_E822L_SFP:
 651        case ICE_DEV_ID_E822L_10G_BASE_T:
 652        case ICE_DEV_ID_E822L_SGMII:
 653        case ICE_DEV_ID_E823L_BACKPLANE:
 654        case ICE_DEV_ID_E823L_SFP:
 655        case ICE_DEV_ID_E823L_10G_BASE_T:
 656        case ICE_DEV_ID_E823L_1GBE:
 657        case ICE_DEV_ID_E823L_QSFP:
 658                return status;
 659        default:
 660                break;
 661        }
 662
 663        status = ice_get_orom_ver_info(hw);
 664        if (status) {
 665                ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
 666                return status;
 667        }
 668
 669        /* read the netlist version information */
 670        status = ice_get_netlist_ver_info(hw);
 671        if (status)
 672                ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
 673
 674        return 0;
 675}
 676
 677/**
 678 * ice_nvm_validate_checksum
 679 * @hw: pointer to the HW struct
 680 *
 681 * Verify NVM PFA checksum validity (0x0706)
 682 */
 683enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
 684{
 685        struct ice_aqc_nvm_checksum *cmd;
 686        struct ice_aq_desc desc;
 687        enum ice_status status;
 688
 689        status = ice_acquire_nvm(hw, ICE_RES_READ);
 690        if (status)
 691                return status;
 692
 693        cmd = &desc.params.nvm_checksum;
 694
 695        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
 696        cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
 697
 698        status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
 699        ice_release_nvm(hw);
 700
 701        if (!status)
 702                if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
 703                        status = ICE_ERR_NVM_CHECKSUM;
 704
 705        return status;
 706}
 707
 708/**
 709 * ice_nvm_write_activate
 710 * @hw: pointer to the HW struct
 711 * @cmd_flags: NVM activate admin command bits (banks to be validated)
 712 *
 713 * Update the control word with the required banks' validity bits
 714 * and dumps the Shadow RAM to flash (0x0707)
 715 */
 716enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
 717{
 718        struct ice_aqc_nvm *cmd;
 719        struct ice_aq_desc desc;
 720
 721        cmd = &desc.params.nvm;
 722        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
 723
 724        cmd->cmd_flags = cmd_flags;
 725
 726        return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
 727}
 728
 729/**
 730 * ice_aq_nvm_update_empr
 731 * @hw: pointer to the HW struct
 732 *
 733 * Update empr (0x0709). This command allows SW to
 734 * request an EMPR to activate new FW.
 735 */
 736enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
 737{
 738        struct ice_aq_desc desc;
 739
 740        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
 741
 742        return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
 743}
 744
 745/* ice_nvm_set_pkg_data
 746 * @hw: pointer to the HW struct
 747 * @del_pkg_data_flag: If is set then the current pkg_data store by FW
 748 *                     is deleted.
 749 *                     If bit is set to 1, then buffer should be size 0.
 750 * @data: pointer to buffer
 751 * @length: length of the buffer
 752 * @cd: pointer to command details structure or NULL
 753 *
 754 * Set package data (0x070A). This command is equivalent to the reception
 755 * of a PLDM FW Update GetPackageData cmd. This command should be sent
 756 * as part of the NVM update as the first cmd in the flow.
 757 */
 758
 759enum ice_status
 760ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
 761                     u16 length, struct ice_sq_cd *cd)
 762{
 763        struct ice_aqc_nvm_pkg_data *cmd;
 764        struct ice_aq_desc desc;
 765
 766        if (length != 0 && !data)
 767                return ICE_ERR_PARAM;
 768
 769        cmd = &desc.params.pkg_data;
 770
 771        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
 772        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 773
 774        if (del_pkg_data_flag)
 775                cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
 776
 777        return ice_aq_send_cmd(hw, &desc, data, length, cd);
 778}
 779
 780/* ice_nvm_pass_component_tbl
 781 * @hw: pointer to the HW struct
 782 * @data: pointer to buffer
 783 * @length: length of the buffer
 784 * @transfer_flag: parameter for determining stage of the update
 785 * @comp_response: a pointer to the response from the 0x070B AQC.
 786 * @comp_response_code: a pointer to the response code from the 0x070B AQC.
 787 * @cd: pointer to command details structure or NULL
 788 *
 789 * Pass component table (0x070B). This command is equivalent to the reception
 790 * of a PLDM FW Update PassComponentTable cmd. This command should be sent once
 791 * per component. It can be only sent after Set Package Data cmd and before
 792 * actual update. FW will assume these commands are going to be sent until
 793 * the TransferFlag is set to End or StartAndEnd.
 794 */
 795
 796enum ice_status
 797ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
 798                           u8 transfer_flag, u8 *comp_response,
 799                           u8 *comp_response_code, struct ice_sq_cd *cd)
 800{
 801        struct ice_aqc_nvm_pass_comp_tbl *cmd;
 802        struct ice_aq_desc desc;
 803        enum ice_status status;
 804
 805        if (!data || !comp_response || !comp_response_code)
 806                return ICE_ERR_PARAM;
 807
 808        cmd = &desc.params.pass_comp_tbl;
 809
 810        ice_fill_dflt_direct_cmd_desc(&desc,
 811                                      ice_aqc_opc_nvm_pass_component_tbl);
 812        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 813
 814        cmd->transfer_flag = transfer_flag;
 815        status = ice_aq_send_cmd(hw, &desc, data, length, cd);
 816
 817        if (!status) {
 818                *comp_response = cmd->component_response;
 819                *comp_response_code = cmd->component_response_code;
 820        }
 821        return status;
 822}
 823