1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include "i40e_prototype.h"
28
29
30
31
32
33
34
35
36
37
38
39i40e_status i40e_init_nvm(struct i40e_hw *hw)
40{
41 struct i40e_nvm_info *nvm = &hw->nvm;
42 i40e_status ret_code = 0;
43 u32 fla, gens;
44 u8 sr_size;
45
46
47
48
49 gens = rd32(hw, I40E_GLNVM_GENS);
50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
52
53 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
54
55
56 fla = rd32(hw, I40E_GLNVM_FLA);
57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) {
58
59 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
60 nvm->blank_nvm_mode = false;
61 } else {
62 nvm->blank_nvm_mode = true;
63 ret_code = I40E_ERR_NVM_BLANK_MODE;
64 hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
65 }
66
67 return ret_code;
68}
69
70
71
72
73
74
75
76
77
78i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
79 enum i40e_aq_resource_access_type access)
80{
81 i40e_status ret_code = 0;
82 u64 gtime, timeout;
83 u64 time = 0;
84
85 if (hw->nvm.blank_nvm_mode)
86 goto i40e_i40e_acquire_nvm_exit;
87
88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
89 0, &time, NULL);
90
91 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
92
93
94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
95
96 if (ret_code) {
97
98 if (time > I40E_MAX_NVM_TIMEOUT)
99 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
100 + gtime;
101 else
102 timeout = hw->nvm.hw_semaphore_timeout;
103
104 while (gtime < timeout) {
105 usleep_range(10000, 20000);
106 ret_code = i40e_aq_request_resource(hw,
107 I40E_NVM_RESOURCE_ID,
108 access, 0, &time,
109 NULL);
110 if (!ret_code) {
111 hw->nvm.hw_semaphore_timeout =
112 I40E_MS_TO_GTIME(time) + gtime;
113 break;
114 }
115 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 }
117 if (ret_code) {
118 hw->nvm.hw_semaphore_timeout = 0;
119 hw->nvm.hw_semaphore_wait =
120 I40E_MS_TO_GTIME(time) + gtime;
121 hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
122 time);
123 }
124 }
125
126i40e_i40e_acquire_nvm_exit:
127 return ret_code;
128}
129
130
131
132
133
134
135
136void i40e_release_nvm(struct i40e_hw *hw)
137{
138 if (!hw->nvm.blank_nvm_mode)
139 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
140}
141
142
143
144
145
146
147
148static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
149{
150 i40e_status ret_code = I40E_ERR_TIMEOUT;
151 u32 srctl, wait_cnt;
152
153
154 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
155 srctl = rd32(hw, I40E_GLNVM_SRCTL);
156 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
157 ret_code = 0;
158 break;
159 }
160 udelay(5);
161 }
162 if (ret_code == I40E_ERR_TIMEOUT)
163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
164 return ret_code;
165}
166
167
168
169
170
171
172
173
174
175i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
176 u16 *data)
177{
178 i40e_status ret_code = I40E_ERR_TIMEOUT;
179 u32 sr_reg;
180
181 if (offset >= hw->nvm.sr_size) {
182 hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
183 ret_code = I40E_ERR_PARAM;
184 goto read_nvm_exit;
185 }
186
187
188 ret_code = i40e_poll_sr_srctl_done_bit(hw);
189 if (!ret_code) {
190
191 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
192 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
193 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
194
195
196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (!ret_code) {
198 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
199 *data = (u16)((sr_reg &
200 I40E_GLNVM_SRDATA_RDDATA_MASK)
201 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
202 }
203 }
204 if (ret_code)
205 hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
206 offset);
207
208read_nvm_exit:
209 return ret_code;
210}
211
212
213
214
215
216
217
218
219
220
221
222
223i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
224 u16 *words, u16 *data)
225{
226 i40e_status ret_code = 0;
227 u16 index, word;
228
229
230 for (word = 0; word < *words; word++) {
231 index = offset + word;
232 ret_code = i40e_read_nvm_word(hw, index, &data[word]);
233 if (ret_code)
234 break;
235 }
236
237
238 *words = word;
239
240 return ret_code;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
255 u32 offset, u16 words, void *data,
256 bool last_command)
257{
258 i40e_status ret_code = I40E_ERR_NVM;
259
260
261
262
263
264
265 if ((offset + words) > hw->nvm.sr_size)
266 hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
267 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
268
269 hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
270 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
271 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
272
273 hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
274 else
275 ret_code = i40e_aq_update_nvm(hw, module_pointer,
276 2 * offset,
277 2 * words,
278 data, last_command, NULL);
279
280 return ret_code;
281}
282
283
284
285
286
287
288
289
290
291
292
293static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
294 u16 *checksum)
295{
296 i40e_status ret_code = 0;
297 u16 pcie_alt_module = 0;
298 u16 checksum_local = 0;
299 u16 vpd_module = 0;
300 u16 word = 0;
301 u32 i = 0;
302
303
304 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
305 if (ret_code) {
306 ret_code = I40E_ERR_NVM_CHECKSUM;
307 goto i40e_calc_nvm_checksum_exit;
308 }
309
310
311 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
312 &pcie_alt_module);
313 if (ret_code) {
314 ret_code = I40E_ERR_NVM_CHECKSUM;
315 goto i40e_calc_nvm_checksum_exit;
316 }
317
318
319
320
321 for (i = 0; i < hw->nvm.sr_size; i++) {
322
323 if (i == I40E_SR_SW_CHECKSUM_WORD)
324 i++;
325
326 if (i == (u32)vpd_module) {
327 i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
328 if (i >= hw->nvm.sr_size)
329 break;
330 }
331
332 if (i == (u32)pcie_alt_module) {
333 i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
334 if (i >= hw->nvm.sr_size)
335 break;
336 }
337
338 ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
339 if (ret_code) {
340 ret_code = I40E_ERR_NVM_CHECKSUM;
341 goto i40e_calc_nvm_checksum_exit;
342 }
343 checksum_local += word;
344 }
345
346 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
347
348i40e_calc_nvm_checksum_exit:
349 return ret_code;
350}
351
352
353
354
355
356
357
358
359
360i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
361{
362 i40e_status ret_code = 0;
363 u16 checksum;
364
365 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
366 if (!ret_code)
367 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
368 1, &checksum, true);
369
370 return ret_code;
371}
372
373
374
375
376
377
378
379
380
381i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
382 u16 *checksum)
383{
384 i40e_status ret_code = 0;
385 u16 checksum_sr = 0;
386 u16 checksum_local = 0;
387
388 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
389 if (ret_code)
390 goto i40e_validate_nvm_checksum_exit;
391
392
393
394
395 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
396
397
398
399
400 if (checksum_local != checksum_sr)
401 ret_code = I40E_ERR_NVM_CHECKSUM;
402
403
404 if (checksum)
405 *checksum = checksum_local;
406
407i40e_validate_nvm_checksum_exit:
408 return ret_code;
409}
410
411static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
412 struct i40e_nvm_access *cmd,
413 u8 *bytes, int *errno);
414static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
415 struct i40e_nvm_access *cmd,
416 u8 *bytes, int *errno);
417static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
418 struct i40e_nvm_access *cmd,
419 u8 *bytes, int *errno);
420static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
421 struct i40e_nvm_access *cmd,
422 int *errno);
423static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
424 struct i40e_nvm_access *cmd,
425 int *errno);
426static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
427 struct i40e_nvm_access *cmd,
428 u8 *bytes, int *errno);
429static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
430 struct i40e_nvm_access *cmd,
431 u8 *bytes, int *errno);
432static inline u8 i40e_nvmupd_get_module(u32 val)
433{
434 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
435}
436static inline u8 i40e_nvmupd_get_transaction(u32 val)
437{
438 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
439}
440
441
442
443
444
445
446
447
448
449
450i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
451 struct i40e_nvm_access *cmd,
452 u8 *bytes, int *errno)
453{
454 i40e_status status;
455
456
457 *errno = 0;
458
459 switch (hw->nvmupd_state) {
460 case I40E_NVMUPD_STATE_INIT:
461 status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
462 break;
463
464 case I40E_NVMUPD_STATE_READING:
465 status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
466 break;
467
468 case I40E_NVMUPD_STATE_WRITING:
469 status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
470 break;
471
472 default:
473
474 status = I40E_NOT_SUPPORTED;
475 *errno = -ESRCH;
476 break;
477 }
478 return status;
479}
480
481
482
483
484
485
486
487
488
489
490
491static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
492 struct i40e_nvm_access *cmd,
493 u8 *bytes, int *errno)
494{
495 i40e_status status = 0;
496 enum i40e_nvmupd_cmd upd_cmd;
497
498 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
499
500 switch (upd_cmd) {
501 case I40E_NVMUPD_READ_SA:
502 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
503 if (status) {
504 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
505 } else {
506 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
507 i40e_release_nvm(hw);
508 }
509 break;
510
511 case I40E_NVMUPD_READ_SNT:
512 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
513 if (status) {
514 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
515 } else {
516 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
517 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
518 }
519 break;
520
521 case I40E_NVMUPD_WRITE_ERA:
522 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
523 if (status) {
524 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
525 } else {
526 status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
527 if (status)
528 i40e_release_nvm(hw);
529 else
530 hw->aq.nvm_release_on_done = true;
531 }
532 break;
533
534 case I40E_NVMUPD_WRITE_SA:
535 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
536 if (status) {
537 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
538 } else {
539 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
540 if (status)
541 i40e_release_nvm(hw);
542 else
543 hw->aq.nvm_release_on_done = true;
544 }
545 break;
546
547 case I40E_NVMUPD_WRITE_SNT:
548 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
549 if (status) {
550 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
551 } else {
552 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
553 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
554 }
555 break;
556
557 case I40E_NVMUPD_CSUM_SA:
558 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
559 if (status) {
560 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
561 } else {
562 status = i40e_update_nvm_checksum(hw);
563 if (status) {
564 *errno = hw->aq.asq_last_status ?
565 i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
566 -EIO;
567 i40e_release_nvm(hw);
568 } else {
569 hw->aq.nvm_release_on_done = true;
570 }
571 }
572 break;
573
574 default:
575 status = I40E_ERR_NVM;
576 *errno = -ESRCH;
577 break;
578 }
579 return status;
580}
581
582
583
584
585
586
587
588
589
590
591
592static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
593 struct i40e_nvm_access *cmd,
594 u8 *bytes, int *errno)
595{
596 i40e_status status;
597 enum i40e_nvmupd_cmd upd_cmd;
598
599 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
600
601 switch (upd_cmd) {
602 case I40E_NVMUPD_READ_SA:
603 case I40E_NVMUPD_READ_CON:
604 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
605 break;
606
607 case I40E_NVMUPD_READ_LCB:
608 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
609 i40e_release_nvm(hw);
610 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
611 break;
612
613 default:
614 status = I40E_NOT_SUPPORTED;
615 *errno = -ESRCH;
616 break;
617 }
618 return status;
619}
620
621
622
623
624
625
626
627
628
629
630
631static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
632 struct i40e_nvm_access *cmd,
633 u8 *bytes, int *errno)
634{
635 i40e_status status;
636 enum i40e_nvmupd_cmd upd_cmd;
637
638 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
639
640 switch (upd_cmd) {
641 case I40E_NVMUPD_WRITE_CON:
642 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
643 break;
644
645 case I40E_NVMUPD_WRITE_LCB:
646 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
647 if (!status) {
648 hw->aq.nvm_release_on_done = true;
649 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
650 }
651 break;
652
653 case I40E_NVMUPD_CSUM_CON:
654 status = i40e_update_nvm_checksum(hw);
655 if (status)
656 *errno = hw->aq.asq_last_status ?
657 i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
658 -EIO;
659 break;
660
661 case I40E_NVMUPD_CSUM_LCB:
662 status = i40e_update_nvm_checksum(hw);
663 if (status) {
664 *errno = hw->aq.asq_last_status ?
665 i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
666 -EIO;
667 } else {
668 hw->aq.nvm_release_on_done = true;
669 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
670 }
671 break;
672
673 default:
674 status = I40E_NOT_SUPPORTED;
675 *errno = -ESRCH;
676 break;
677 }
678 return status;
679}
680
681
682
683
684
685
686
687
688
689static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
690 struct i40e_nvm_access *cmd,
691 int *errno)
692{
693 enum i40e_nvmupd_cmd upd_cmd;
694 u8 transaction, module;
695
696
697 upd_cmd = I40E_NVMUPD_INVALID;
698
699 transaction = i40e_nvmupd_get_transaction(cmd->config);
700 module = i40e_nvmupd_get_module(cmd->config);
701
702
703 if ((cmd->data_size < 1) ||
704 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
705 hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
706 cmd->data_size);
707 *errno = -EFAULT;
708 return I40E_NVMUPD_INVALID;
709 }
710
711 switch (cmd->command) {
712 case I40E_NVM_READ:
713 switch (transaction) {
714 case I40E_NVM_CON:
715 upd_cmd = I40E_NVMUPD_READ_CON;
716 break;
717 case I40E_NVM_SNT:
718 upd_cmd = I40E_NVMUPD_READ_SNT;
719 break;
720 case I40E_NVM_LCB:
721 upd_cmd = I40E_NVMUPD_READ_LCB;
722 break;
723 case I40E_NVM_SA:
724 upd_cmd = I40E_NVMUPD_READ_SA;
725 break;
726 }
727 break;
728
729 case I40E_NVM_WRITE:
730 switch (transaction) {
731 case I40E_NVM_CON:
732 upd_cmd = I40E_NVMUPD_WRITE_CON;
733 break;
734 case I40E_NVM_SNT:
735 upd_cmd = I40E_NVMUPD_WRITE_SNT;
736 break;
737 case I40E_NVM_LCB:
738 upd_cmd = I40E_NVMUPD_WRITE_LCB;
739 break;
740 case I40E_NVM_SA:
741 upd_cmd = I40E_NVMUPD_WRITE_SA;
742 break;
743 case I40E_NVM_ERA:
744 upd_cmd = I40E_NVMUPD_WRITE_ERA;
745 break;
746 case I40E_NVM_CSUM:
747 upd_cmd = I40E_NVMUPD_CSUM_CON;
748 break;
749 case (I40E_NVM_CSUM|I40E_NVM_SA):
750 upd_cmd = I40E_NVMUPD_CSUM_SA;
751 break;
752 case (I40E_NVM_CSUM|I40E_NVM_LCB):
753 upd_cmd = I40E_NVMUPD_CSUM_LCB;
754 break;
755 }
756 break;
757 }
758
759 if (upd_cmd == I40E_NVMUPD_INVALID) {
760 *errno = -EFAULT;
761 hw_dbg(hw,
762 "i40e_nvmupd_validate_command returns %d errno: %d\n",
763 upd_cmd, *errno);
764 }
765 return upd_cmd;
766}
767
768
769
770
771
772
773
774
775
776
777static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
778 struct i40e_nvm_access *cmd,
779 u8 *bytes, int *errno)
780{
781 i40e_status status;
782 u8 module, transaction;
783 bool last;
784
785 transaction = i40e_nvmupd_get_transaction(cmd->config);
786 module = i40e_nvmupd_get_module(cmd->config);
787 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
788 hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
789 module, cmd->offset, cmd->data_size);
790
791 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
792 bytes, last, NULL);
793 hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
794 if (status)
795 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
796
797 return status;
798}
799
800
801
802
803
804
805
806
807
808static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
809 struct i40e_nvm_access *cmd,
810 int *errno)
811{
812 i40e_status status = 0;
813 u8 module, transaction;
814 bool last;
815
816 transaction = i40e_nvmupd_get_transaction(cmd->config);
817 module = i40e_nvmupd_get_module(cmd->config);
818 last = (transaction & I40E_NVM_LCB);
819 hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
820 module, cmd->offset, cmd->data_size);
821 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
822 last, NULL);
823 hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
824 if (status)
825 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
826
827 return status;
828}
829
830
831
832
833
834
835
836
837
838
839static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
840 struct i40e_nvm_access *cmd,
841 u8 *bytes, int *errno)
842{
843 i40e_status status = 0;
844 u8 module, transaction;
845 bool last;
846
847 transaction = i40e_nvmupd_get_transaction(cmd->config);
848 module = i40e_nvmupd_get_module(cmd->config);
849 last = (transaction & I40E_NVM_LCB);
850 hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
851 module, cmd->offset, cmd->data_size);
852 status = i40e_aq_update_nvm(hw, module, cmd->offset,
853 (u16)cmd->data_size, bytes, last, NULL);
854 hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
855 if (status)
856 *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
857
858 return status;
859}
860