1
2
3
4#include "ice_common.h"
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19static enum ice_status
20ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
21 void *data, bool last_command, bool read_shadow_ram,
22 struct ice_sq_cd *cd)
23{
24 struct ice_aq_desc desc;
25 struct ice_aqc_nvm *cmd;
26
27 cmd = &desc.params.nvm;
28
29 if (offset > ICE_AQC_NVM_MAX_OFFSET)
30 return ICE_ERR_PARAM;
31
32 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
33
34 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
35 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
36
37
38 if (last_command)
39 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
40 cmd->module_typeid = cpu_to_le16(module_typeid);
41 cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
42 cmd->offset_high = (offset >> 16) & 0xFF;
43 cmd->length = cpu_to_le16(length);
44
45 return ice_aq_send_cmd(hw, &desc, data, length, cd);
46}
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63enum ice_status
64ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
65 bool read_shadow_ram)
66{
67 enum ice_status status;
68 u32 inlen = *length;
69 u32 bytes_read = 0;
70 bool last_cmd;
71
72 *length = 0;
73
74
75 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
76 ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
77 return ICE_ERR_PARAM;
78 }
79
80 do {
81 u32 read_size, sector_offset;
82
83
84
85
86
87
88 sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
89 read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
90 inlen - bytes_read);
91
92 last_cmd = !(bytes_read + read_size < inlen);
93
94 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
95 offset, read_size,
96 data + bytes_read, last_cmd,
97 read_shadow_ram, NULL);
98 if (status)
99 break;
100
101 bytes_read += read_size;
102 offset += read_size;
103 } while (!last_cmd);
104
105 *length = bytes_read;
106 return status;
107}
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122enum ice_status
123ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
124 u16 length, void *data, bool last_command, u8 command_flags,
125 struct ice_sq_cd *cd)
126{
127 struct ice_aq_desc desc;
128 struct ice_aqc_nvm *cmd;
129
130 cmd = &desc.params.nvm;
131
132
133 if (offset & 0xFF000000)
134 return ICE_ERR_PARAM;
135
136 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
137
138 cmd->cmd_flags |= command_flags;
139
140
141 if (last_command)
142 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
143 cmd->module_typeid = cpu_to_le16(module_typeid);
144 cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
145 cmd->offset_high = (offset >> 16) & 0xFF;
146 cmd->length = cpu_to_le16(length);
147
148 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
149
150 return ice_aq_send_cmd(hw, &desc, data, length, cd);
151}
152
153
154
155
156
157
158
159
160
161enum ice_status
162ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
163{
164 struct ice_aq_desc desc;
165 struct ice_aqc_nvm *cmd;
166
167 cmd = &desc.params.nvm;
168
169 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
170
171 cmd->module_typeid = cpu_to_le16(module_typeid);
172 cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
173 cmd->offset_low = 0;
174 cmd->offset_high = 0;
175
176 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
177}
178
179
180
181
182
183
184
185
186
187static enum ice_status
188ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
189{
190 u32 bytes = sizeof(u16);
191 enum ice_status status;
192 __le16 data_local;
193
194
195
196
197 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
198 (__force u8 *)&data_local, true);
199 if (status)
200 return status;
201
202 *data = le16_to_cpu(data_local);
203 return 0;
204}
205
206
207
208
209
210
211
212
213enum ice_status
214ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
215{
216 if (hw->nvm.blank_nvm_mode)
217 return 0;
218
219 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
220}
221
222
223
224
225
226
227
228void ice_release_nvm(struct ice_hw *hw)
229{
230 if (hw->nvm.blank_nvm_mode)
231 return;
232
233 ice_release_res(hw, ICE_NVM_RES_ID);
234}
235
236
237
238
239
240
241
242
243
244enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
245{
246 enum ice_status status;
247
248 status = ice_acquire_nvm(hw, ICE_RES_READ);
249 if (!status) {
250 status = ice_read_sr_word_aq(hw, offset, data);
251 ice_release_nvm(hw);
252 }
253
254 return status;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268enum ice_status
269ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
270 u16 module_type)
271{
272 enum ice_status status;
273 u16 pfa_len, pfa_ptr;
274 u16 next_tlv;
275
276 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
277 if (status) {
278 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
279 return status;
280 }
281 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
282 if (status) {
283 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
284 return status;
285 }
286
287
288
289 next_tlv = pfa_ptr + 1;
290 while (next_tlv < pfa_ptr + pfa_len) {
291 u16 tlv_sub_module_type;
292 u16 tlv_len;
293
294
295 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
296 if (status) {
297 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
298 break;
299 }
300
301 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
302 if (status) {
303 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
304 break;
305 }
306 if (tlv_sub_module_type == module_type) {
307 if (tlv_len) {
308 *module_tlv = next_tlv;
309 *module_tlv_len = tlv_len;
310 return 0;
311 }
312 return ICE_ERR_INVAL_SIZE;
313 }
314
315
316
317 next_tlv = next_tlv + tlv_len + 2;
318 }
319
320 return ICE_ERR_DOES_NOT_EXIST;
321}
322
323
324
325
326
327
328
329
330
331enum ice_status
332ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
333{
334 u16 pba_tlv, pba_tlv_len;
335 enum ice_status status;
336 u16 pba_word, pba_size;
337 u16 i;
338
339 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
340 ICE_SR_PBA_BLOCK_PTR);
341 if (status) {
342 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
343 return status;
344 }
345
346
347 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
348 if (status) {
349 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
350 return status;
351 }
352
353 if (pba_tlv_len < pba_size) {
354 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
355 return ICE_ERR_INVAL_SIZE;
356 }
357
358
359
360
361 pba_size--;
362 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
363 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
364 return ICE_ERR_PARAM;
365 }
366
367 for (i = 0; i < pba_size; i++) {
368 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
369 if (status) {
370 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
371 return status;
372 }
373
374 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
375 pba_num[(i * 2) + 1] = pba_word & 0xFF;
376 }
377 pba_num[(pba_size * 2)] = '\0';
378
379 return status;
380}
381
382
383
384
385
386
387
388
389static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
390{
391 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
392 struct ice_orom_info *orom = &hw->nvm.orom;
393 enum ice_status status;
394 u32 combo_ver;
395
396 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
397 ICE_SR_BOOT_CFG_PTR);
398 if (status) {
399 ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
400 return status;
401 }
402
403
404
405
406 if (boot_cfg_tlv_len < 2) {
407 ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
408 return ICE_ERR_INVAL_SIZE;
409 }
410
411 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
412 &combo_hi);
413 if (status) {
414 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
415 return status;
416 }
417
418 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
419 &combo_lo);
420 if (status) {
421 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
422 return status;
423 }
424
425 combo_ver = ((u32)combo_hi << 16) | combo_lo;
426
427 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
428 ICE_OROM_VER_SHIFT);
429 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
430 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
431 ICE_OROM_VER_BUILD_SHIFT);
432
433 return 0;
434}
435
436
437
438
439
440
441
442static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
443{
444 struct ice_netlist_ver_info *ver = &hw->netlist_ver;
445 enum ice_status ret;
446 u32 id_blk_start;
447 __le16 raw_data;
448 u16 data, i;
449 u16 *buff;
450
451 ret = ice_acquire_nvm(hw, ICE_RES_READ);
452 if (ret)
453 return ret;
454 buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
455 GFP_KERNEL);
456 if (!buff) {
457 ret = ICE_ERR_NO_MEMORY;
458 goto exit_no_mem;
459 }
460
461
462 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
463 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
464 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
465 false, false, NULL);
466 if (ret)
467 goto exit_error;
468
469 data = le16_to_cpu(raw_data);
470
471 if (!data)
472 goto exit_error;
473
474
475 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
476 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
477 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
478 false, false, NULL);
479 if (ret)
480 goto exit_error;
481 data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
482
483
484 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
485
486
487 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
488 id_blk_start * 2,
489 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
490 false, NULL);
491 if (ret)
492 goto exit_error;
493
494 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
495 buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
496
497 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
498 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
499 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
500 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
501 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
502 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
503 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
504 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
505 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
506
507 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
508 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
509
510exit_error:
511 kfree(buff);
512exit_no_mem:
513 ice_release_nvm(hw);
514 return ret;
515}
516
517
518
519
520
521
522
523
524
525static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
526{
527 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
528 enum ice_status status;
529
530 status = ice_acquire_nvm(hw, ICE_RES_READ);
531 if (status)
532 return status;
533
534 while ((max_size - min_size) > 1) {
535 u32 offset = (max_size + min_size) / 2;
536 u32 len = 1;
537 u8 data;
538
539 status = ice_read_flat_nvm(hw, offset, &len, &data, false);
540 if (status == ICE_ERR_AQ_ERROR &&
541 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
542 ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
543 __func__, offset);
544 status = 0;
545 max_size = offset;
546 } else if (!status) {
547 ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
548 __func__, offset);
549 min_size = offset;
550 } else {
551
552 goto err_read_flat_nvm;
553 }
554 }
555
556 ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
557
558 hw->nvm.flash_size = max_size;
559
560err_read_flat_nvm:
561 ice_release_nvm(hw);
562
563 return status;
564}
565
566
567
568
569
570
571
572
573enum ice_status ice_init_nvm(struct ice_hw *hw)
574{
575 struct ice_nvm_info *nvm = &hw->nvm;
576 u16 eetrack_lo, eetrack_hi, ver;
577 enum ice_status status;
578 u32 fla, gens_stat;
579 u8 sr_size;
580
581
582
583
584 gens_stat = rd32(hw, GLNVM_GENS);
585 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
586
587
588 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
589
590
591 fla = rd32(hw, GLNVM_FLA);
592 if (fla & GLNVM_FLA_LOCKED_M) {
593 nvm->blank_nvm_mode = false;
594 } else {
595
596 nvm->blank_nvm_mode = true;
597 ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
598 return ICE_ERR_NVM_BLANK_MODE;
599 }
600
601 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
602 if (status) {
603 ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
604 return status;
605 }
606 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
607 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
608
609 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
610 if (status) {
611 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
612 return status;
613 }
614 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
615 if (status) {
616 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
617 return status;
618 }
619
620 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
621
622 status = ice_discover_flash_size(hw);
623 if (status) {
624 ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
625 return status;
626 }
627
628 status = ice_get_orom_ver_info(hw);
629 if (status) {
630 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
631 return status;
632 }
633
634
635 status = ice_get_netlist_ver_info(hw);
636 if (status)
637 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
638
639 return 0;
640}
641
642
643
644
645
646
647
648enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
649{
650 struct ice_aqc_nvm_checksum *cmd;
651 struct ice_aq_desc desc;
652 enum ice_status status;
653
654 status = ice_acquire_nvm(hw, ICE_RES_READ);
655 if (status)
656 return status;
657
658 cmd = &desc.params.nvm_checksum;
659
660 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
661 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
662
663 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
664 ice_release_nvm(hw);
665
666 if (!status)
667 if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
668 status = ICE_ERR_NVM_CHECKSUM;
669
670 return status;
671}
672
673
674
675
676
677
678
679
680
681enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
682{
683 struct ice_aqc_nvm *cmd;
684 struct ice_aq_desc desc;
685
686 cmd = &desc.params.nvm;
687 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
688
689 cmd->cmd_flags = cmd_flags;
690
691 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
692}
693
694
695
696
697
698
699
700
701enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
702{
703 struct ice_aq_desc desc;
704
705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
706
707 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
708}
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724enum ice_status
725ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
726 u16 length, struct ice_sq_cd *cd)
727{
728 struct ice_aqc_nvm_pkg_data *cmd;
729 struct ice_aq_desc desc;
730
731 if (length != 0 && !data)
732 return ICE_ERR_PARAM;
733
734 cmd = &desc.params.pkg_data;
735
736 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
737 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
738
739 if (del_pkg_data_flag)
740 cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
741
742 return ice_aq_send_cmd(hw, &desc, data, length, cd);
743}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761enum ice_status
762ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
763 u8 transfer_flag, u8 *comp_response,
764 u8 *comp_response_code, struct ice_sq_cd *cd)
765{
766 struct ice_aqc_nvm_pass_comp_tbl *cmd;
767 struct ice_aq_desc desc;
768 enum ice_status status;
769
770 if (!data || !comp_response || !comp_response_code)
771 return ICE_ERR_PARAM;
772
773 cmd = &desc.params.pass_comp_tbl;
774
775 ice_fill_dflt_direct_cmd_desc(&desc,
776 ice_aqc_opc_nvm_pass_component_tbl);
777 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
778
779 cmd->transfer_flag = transfer_flag;
780 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
781
782 if (!status) {
783 *comp_response = cmd->component_response;
784 *comp_response_code = cmd->component_response_code;
785 }
786 return status;
787}
788