1
2
3
4#include "ice_common.h"
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19static enum ice_status
20ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
21 void *data, bool last_command, bool read_shadow_ram,
22 struct ice_sq_cd *cd)
23{
24 struct ice_aq_desc desc;
25 struct ice_aqc_nvm *cmd;
26
27 cmd = &desc.params.nvm;
28
29 if (offset > ICE_AQC_NVM_MAX_OFFSET)
30 return ICE_ERR_PARAM;
31
32 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
33
34 if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
35 cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
36
37
38 if (last_command)
39 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
40 cmd->module_typeid = cpu_to_le16(module_typeid);
41 cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
42 cmd->offset_high = (offset >> 16) & 0xFF;
43 cmd->length = cpu_to_le16(length);
44
45 return ice_aq_send_cmd(hw, &desc, data, length, cd);
46}
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63enum ice_status
64ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
65 bool read_shadow_ram)
66{
67 enum ice_status status;
68 u32 inlen = *length;
69 u32 bytes_read = 0;
70 bool last_cmd;
71
72 *length = 0;
73
74
75 if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
76 ice_debug(hw, ICE_DBG_NVM,
77 "NVM error: requested offset is beyond Shadow RAM limit\n");
78 return ICE_ERR_PARAM;
79 }
80
81 do {
82 u32 read_size, sector_offset;
83
84
85
86
87
88
89 sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
90 read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
91 inlen - bytes_read);
92
93 last_cmd = !(bytes_read + read_size < inlen);
94
95 status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
96 offset, read_size,
97 data + bytes_read, last_cmd,
98 read_shadow_ram, NULL);
99 if (status)
100 break;
101
102 bytes_read += read_size;
103 offset += read_size;
104 } while (!last_cmd);
105
106 *length = bytes_read;
107 return status;
108}
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123enum ice_status
124ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
125 u16 length, void *data, bool last_command, u8 command_flags,
126 struct ice_sq_cd *cd)
127{
128 struct ice_aq_desc desc;
129 struct ice_aqc_nvm *cmd;
130
131 cmd = &desc.params.nvm;
132
133
134 if (offset & 0xFF000000)
135 return ICE_ERR_PARAM;
136
137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
138
139 cmd->cmd_flags |= command_flags;
140
141
142 if (last_command)
143 cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
144 cmd->module_typeid = cpu_to_le16(module_typeid);
145 cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
146 cmd->offset_high = (offset >> 16) & 0xFF;
147 cmd->length = cpu_to_le16(length);
148
149 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
150
151 return ice_aq_send_cmd(hw, &desc, data, length, cd);
152}
153
154
155
156
157
158
159
160
161
162enum ice_status
163ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
164{
165 struct ice_aq_desc desc;
166 struct ice_aqc_nvm *cmd;
167
168 cmd = &desc.params.nvm;
169
170 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
171
172 cmd->module_typeid = cpu_to_le16(module_typeid);
173 cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN);
174 cmd->offset_low = 0;
175 cmd->offset_high = 0;
176
177 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
178}
179
180
181
182
183
184
185
186
187
188static enum ice_status
189ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
190{
191 u32 bytes = sizeof(u16);
192 enum ice_status status;
193 __le16 data_local;
194
195
196
197
198 status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
199 (u8 *)&data_local, true);
200 if (status)
201 return status;
202
203 *data = le16_to_cpu(data_local);
204 return 0;
205}
206
207
208
209
210
211
212
213
214enum ice_status
215ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
216{
217 if (hw->nvm.blank_nvm_mode)
218 return 0;
219
220 return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
221}
222
223
224
225
226
227
228
229void ice_release_nvm(struct ice_hw *hw)
230{
231 if (hw->nvm.blank_nvm_mode)
232 return;
233
234 ice_release_res(hw, ICE_NVM_RES_ID);
235}
236
237
238
239
240
241
242
243
244
245enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
246{
247 enum ice_status status;
248
249 status = ice_acquire_nvm(hw, ICE_RES_READ);
250 if (!status) {
251 status = ice_read_sr_word_aq(hw, offset, data);
252 ice_release_nvm(hw);
253 }
254
255 return status;
256}
257
258
259
260
261
262
263
264
265
266
267
268
269enum ice_status
270ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
271 u16 module_type)
272{
273 enum ice_status status;
274 u16 pfa_len, pfa_ptr;
275 u16 next_tlv;
276
277 status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
278 if (status) {
279 ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
280 return status;
281 }
282 status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
283 if (status) {
284 ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
285 return status;
286 }
287
288
289
290 next_tlv = pfa_ptr + 1;
291 while (next_tlv < pfa_ptr + pfa_len) {
292 u16 tlv_sub_module_type;
293 u16 tlv_len;
294
295
296 status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
297 if (status) {
298 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
299 break;
300 }
301
302 status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
303 if (status) {
304 ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
305 break;
306 }
307 if (tlv_sub_module_type == module_type) {
308 if (tlv_len) {
309 *module_tlv = next_tlv;
310 *module_tlv_len = tlv_len;
311 return 0;
312 }
313 return ICE_ERR_INVAL_SIZE;
314 }
315
316
317
318 next_tlv = next_tlv + tlv_len + 2;
319 }
320
321 return ICE_ERR_DOES_NOT_EXIST;
322}
323
324
325
326
327
328
329
330
331
332enum ice_status
333ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
334{
335 u16 pba_tlv, pba_tlv_len;
336 enum ice_status status;
337 u16 pba_word, pba_size;
338 u16 i;
339
340 status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
341 ICE_SR_PBA_BLOCK_PTR);
342 if (status) {
343 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
344 return status;
345 }
346
347
348 status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
349 if (status) {
350 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
351 return status;
352 }
353
354 if (pba_tlv_len < pba_size) {
355 ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
356 return ICE_ERR_INVAL_SIZE;
357 }
358
359
360
361
362 pba_size--;
363 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
364 ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
365 return ICE_ERR_PARAM;
366 }
367
368 for (i = 0; i < pba_size; i++) {
369 status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
370 if (status) {
371 ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
372 return status;
373 }
374
375 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
376 pba_num[(i * 2) + 1] = pba_word & 0xFF;
377 }
378 pba_num[(pba_size * 2)] = '\0';
379
380 return status;
381}
382
383
384
385
386
387
388
389
390static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
391{
392 u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
393 struct ice_orom_info *orom = &hw->nvm.orom;
394 enum ice_status status;
395 u32 combo_ver;
396
397 status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
398 ICE_SR_BOOT_CFG_PTR);
399 if (status) {
400 ice_debug(hw, ICE_DBG_INIT,
401 "Failed to read Boot Configuration Block TLV.\n");
402 return status;
403 }
404
405
406
407
408 if (boot_cfg_tlv_len < 2) {
409 ice_debug(hw, ICE_DBG_INIT,
410 "Invalid Boot Configuration Block TLV size.\n");
411 return ICE_ERR_INVAL_SIZE;
412 }
413
414 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
415 &combo_hi);
416 if (status) {
417 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
418 return status;
419 }
420
421 status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
422 &combo_lo);
423 if (status) {
424 ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
425 return status;
426 }
427
428 combo_ver = ((u32)combo_hi << 16) | combo_lo;
429
430 orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
431 ICE_OROM_VER_SHIFT);
432 orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
433 orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
434 ICE_OROM_VER_BUILD_SHIFT);
435
436 return 0;
437}
438
439
440
441
442
443
444
445static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
446{
447 struct ice_netlist_ver_info *ver = &hw->netlist_ver;
448 enum ice_status ret;
449 u32 id_blk_start;
450 __le16 raw_data;
451 u16 data, i;
452 u16 *buff;
453
454 ret = ice_acquire_nvm(hw, ICE_RES_READ);
455 if (ret)
456 return ret;
457 buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
458 GFP_KERNEL);
459 if (!buff) {
460 ret = ICE_ERR_NO_MEMORY;
461 goto exit_no_mem;
462 }
463
464
465 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
466 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
467 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
468 false, false, NULL);
469 if (ret)
470 goto exit_error;
471
472 data = le16_to_cpu(raw_data);
473
474 if (!data)
475 goto exit_error;
476
477
478 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
479 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
480 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
481 false, false, NULL);
482 if (ret)
483 goto exit_error;
484 data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
485
486
487 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
488
489
490 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
491 id_blk_start * 2,
492 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
493 false, NULL);
494 if (ret)
495 goto exit_error;
496
497 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
498 buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
499
500 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
501 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
502 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
503 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
504 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
505 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
506 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
507 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
508 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
509
510 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
511 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
512
513exit_error:
514 kfree(buff);
515exit_no_mem:
516 ice_release_nvm(hw);
517 return ret;
518}
519
520
521
522
523
524
525
526
527
528static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
529{
530 u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
531 enum ice_status status;
532
533 status = ice_acquire_nvm(hw, ICE_RES_READ);
534 if (status)
535 return status;
536
537 while ((max_size - min_size) > 1) {
538 u32 offset = (max_size + min_size) / 2;
539 u32 len = 1;
540 u8 data;
541
542 status = ice_read_flat_nvm(hw, offset, &len, &data, false);
543 if (status == ICE_ERR_AQ_ERROR &&
544 hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
545 ice_debug(hw, ICE_DBG_NVM,
546 "%s: New upper bound of %u bytes\n",
547 __func__, offset);
548 status = 0;
549 max_size = offset;
550 } else if (!status) {
551 ice_debug(hw, ICE_DBG_NVM,
552 "%s: New lower bound of %u bytes\n",
553 __func__, offset);
554 min_size = offset;
555 } else {
556
557 goto err_read_flat_nvm;
558 }
559 }
560
561 ice_debug(hw, ICE_DBG_NVM,
562 "Predicted flash size is %u bytes\n", max_size);
563
564 hw->nvm.flash_size = max_size;
565
566err_read_flat_nvm:
567 ice_release_nvm(hw);
568
569 return status;
570}
571
572
573
574
575
576
577
578
579enum ice_status ice_init_nvm(struct ice_hw *hw)
580{
581 struct ice_nvm_info *nvm = &hw->nvm;
582 u16 eetrack_lo, eetrack_hi, ver;
583 enum ice_status status;
584 u32 fla, gens_stat;
585 u8 sr_size;
586
587
588
589
590 gens_stat = rd32(hw, GLNVM_GENS);
591 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
592
593
594 nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
595
596
597 fla = rd32(hw, GLNVM_FLA);
598 if (fla & GLNVM_FLA_LOCKED_M) {
599 nvm->blank_nvm_mode = false;
600 } else {
601
602 nvm->blank_nvm_mode = true;
603 ice_debug(hw, ICE_DBG_NVM,
604 "NVM init error: unsupported blank mode.\n");
605 return ICE_ERR_NVM_BLANK_MODE;
606 }
607
608 status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
609 if (status) {
610 ice_debug(hw, ICE_DBG_INIT,
611 "Failed to read DEV starter version.\n");
612 return status;
613 }
614 nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
615 nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
616
617 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
618 if (status) {
619 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
620 return status;
621 }
622 status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
623 if (status) {
624 ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
625 return status;
626 }
627
628 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
629
630 status = ice_discover_flash_size(hw);
631 if (status) {
632 ice_debug(hw, ICE_DBG_NVM,
633 "NVM init error: failed to discover flash size.\n");
634 return status;
635 }
636
637 switch (hw->device_id) {
638
639 case ICE_DEV_ID_E823C_BACKPLANE:
640 case ICE_DEV_ID_E823C_QSFP:
641 case ICE_DEV_ID_E823C_SFP:
642 case ICE_DEV_ID_E823C_10G_BASE_T:
643 case ICE_DEV_ID_E823C_SGMII:
644 case ICE_DEV_ID_E822C_BACKPLANE:
645 case ICE_DEV_ID_E822C_QSFP:
646 case ICE_DEV_ID_E822C_10G_BASE_T:
647 case ICE_DEV_ID_E822C_SGMII:
648 case ICE_DEV_ID_E822C_SFP:
649 case ICE_DEV_ID_E822L_BACKPLANE:
650 case ICE_DEV_ID_E822L_SFP:
651 case ICE_DEV_ID_E822L_10G_BASE_T:
652 case ICE_DEV_ID_E822L_SGMII:
653 case ICE_DEV_ID_E823L_BACKPLANE:
654 case ICE_DEV_ID_E823L_SFP:
655 case ICE_DEV_ID_E823L_10G_BASE_T:
656 case ICE_DEV_ID_E823L_1GBE:
657 case ICE_DEV_ID_E823L_QSFP:
658 return status;
659 default:
660 break;
661 }
662
663 status = ice_get_orom_ver_info(hw);
664 if (status) {
665 ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
666 return status;
667 }
668
669
670 status = ice_get_netlist_ver_info(hw);
671 if (status)
672 ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
673
674 return 0;
675}
676
677
678
679
680
681
682
683enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
684{
685 struct ice_aqc_nvm_checksum *cmd;
686 struct ice_aq_desc desc;
687 enum ice_status status;
688
689 status = ice_acquire_nvm(hw, ICE_RES_READ);
690 if (status)
691 return status;
692
693 cmd = &desc.params.nvm_checksum;
694
695 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
696 cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
697
698 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
699 ice_release_nvm(hw);
700
701 if (!status)
702 if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
703 status = ICE_ERR_NVM_CHECKSUM;
704
705 return status;
706}
707
708
709
710
711
712
713
714
715
716enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
717{
718 struct ice_aqc_nvm *cmd;
719 struct ice_aq_desc desc;
720
721 cmd = &desc.params.nvm;
722 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
723
724 cmd->cmd_flags = cmd_flags;
725
726 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
727}
728
729
730
731
732
733
734
735
736enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
737{
738 struct ice_aq_desc desc;
739
740 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
741
742 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
743}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759enum ice_status
760ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
761 u16 length, struct ice_sq_cd *cd)
762{
763 struct ice_aqc_nvm_pkg_data *cmd;
764 struct ice_aq_desc desc;
765
766 if (length != 0 && !data)
767 return ICE_ERR_PARAM;
768
769 cmd = &desc.params.pkg_data;
770
771 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
772 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
773
774 if (del_pkg_data_flag)
775 cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
776
777 return ice_aq_send_cmd(hw, &desc, data, length, cd);
778}
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796enum ice_status
797ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
798 u8 transfer_flag, u8 *comp_response,
799 u8 *comp_response_code, struct ice_sq_cd *cd)
800{
801 struct ice_aqc_nvm_pass_comp_tbl *cmd;
802 struct ice_aq_desc desc;
803 enum ice_status status;
804
805 if (!data || !comp_response || !comp_response_code)
806 return ICE_ERR_PARAM;
807
808 cmd = &desc.params.pass_comp_tbl;
809
810 ice_fill_dflt_direct_cmd_desc(&desc,
811 ice_aqc_opc_nvm_pass_component_tbl);
812 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
813
814 cmd->transfer_flag = transfer_flag;
815 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
816
817 if (!status) {
818 *comp_response = cmd->component_response;
819 *comp_response_code = cmd->component_response_code;
820 }
821 return status;
822}
823