1
2
3
4#include "i40e_prototype.h"
5
6
7
8
9
10
11
12
13
14
15
16i40e_status i40e_init_nvm(struct i40e_hw *hw)
17{
18 struct i40e_nvm_info *nvm = &hw->nvm;
19 i40e_status ret_code = 0;
20 u32 fla, gens;
21 u8 sr_size;
22
23
24
25
26 gens = rd32(hw, I40E_GLNVM_GENS);
27 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
28 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
29
30 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
31
32
33 fla = rd32(hw, I40E_GLNVM_FLA);
34 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) {
35
36 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
37 nvm->blank_nvm_mode = false;
38 } else {
39 nvm->blank_nvm_mode = true;
40 ret_code = I40E_ERR_NVM_BLANK_MODE;
41 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
42 }
43
44 return ret_code;
45}
46
47
48
49
50
51
52
53
54
55i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
56 enum i40e_aq_resource_access_type access)
57{
58 i40e_status ret_code = 0;
59 u64 gtime, timeout;
60 u64 time_left = 0;
61
62 if (hw->nvm.blank_nvm_mode)
63 goto i40e_i40e_acquire_nvm_exit;
64
65 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
66 0, &time_left, NULL);
67
68 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
69
70
71 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
72
73 if (ret_code)
74 i40e_debug(hw, I40E_DEBUG_NVM,
75 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
76 access, time_left, ret_code, hw->aq.asq_last_status);
77
78 if (ret_code && time_left) {
79
80 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
81 while ((gtime < timeout) && time_left) {
82 usleep_range(10000, 20000);
83 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
84 ret_code = i40e_aq_request_resource(hw,
85 I40E_NVM_RESOURCE_ID,
86 access, 0, &time_left,
87 NULL);
88 if (!ret_code) {
89 hw->nvm.hw_semaphore_timeout =
90 I40E_MS_TO_GTIME(time_left) + gtime;
91 break;
92 }
93 }
94 if (ret_code) {
95 hw->nvm.hw_semaphore_timeout = 0;
96 i40e_debug(hw, I40E_DEBUG_NVM,
97 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
98 time_left, ret_code, hw->aq.asq_last_status);
99 }
100 }
101
102i40e_i40e_acquire_nvm_exit:
103 return ret_code;
104}
105
106
107
108
109
110
111
112void i40e_release_nvm(struct i40e_hw *hw)
113{
114 i40e_status ret_code = I40E_SUCCESS;
115 u32 total_delay = 0;
116
117 if (hw->nvm.blank_nvm_mode)
118 return;
119
120 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
121
122
123
124
125 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
126 (total_delay < hw->aq.asq_cmd_timeout)) {
127 usleep_range(1000, 2000);
128 ret_code = i40e_aq_release_resource(hw,
129 I40E_NVM_RESOURCE_ID,
130 0, NULL);
131 total_delay++;
132 }
133}
134
135
136
137
138
139
140
141static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
142{
143 i40e_status ret_code = I40E_ERR_TIMEOUT;
144 u32 srctl, wait_cnt;
145
146
147 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
148 srctl = rd32(hw, I40E_GLNVM_SRCTL);
149 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
150 ret_code = 0;
151 break;
152 }
153 udelay(5);
154 }
155 if (ret_code == I40E_ERR_TIMEOUT)
156 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
157 return ret_code;
158}
159
160
161
162
163
164
165
166
167
168static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
169 u16 *data)
170{
171 i40e_status ret_code = I40E_ERR_TIMEOUT;
172 u32 sr_reg;
173
174 if (offset >= hw->nvm.sr_size) {
175 i40e_debug(hw, I40E_DEBUG_NVM,
176 "NVM read error: offset %d beyond Shadow RAM limit %d\n",
177 offset, hw->nvm.sr_size);
178 ret_code = I40E_ERR_PARAM;
179 goto read_nvm_exit;
180 }
181
182
183 ret_code = i40e_poll_sr_srctl_done_bit(hw);
184 if (!ret_code) {
185
186 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
187 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
188 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
189
190
191 ret_code = i40e_poll_sr_srctl_done_bit(hw);
192 if (!ret_code) {
193 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
194 *data = (u16)((sr_reg &
195 I40E_GLNVM_SRDATA_RDDATA_MASK)
196 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
197 }
198 }
199 if (ret_code)
200 i40e_debug(hw, I40E_DEBUG_NVM,
201 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
202 offset);
203
204read_nvm_exit:
205 return ret_code;
206}
207
208
209
210
211
212
213
214
215
216
217
218
219static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
220 u8 module_pointer, u32 offset,
221 u16 words, void *data,
222 bool last_command)
223{
224 i40e_status ret_code = I40E_ERR_NVM;
225 struct i40e_asq_cmd_details cmd_details;
226
227 memset(&cmd_details, 0, sizeof(cmd_details));
228 cmd_details.wb_desc = &hw->nvm_wb_desc;
229
230
231
232
233
234
235 if ((offset + words) > hw->nvm.sr_size)
236 i40e_debug(hw, I40E_DEBUG_NVM,
237 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
238 (offset + words), hw->nvm.sr_size);
239 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
240
241 i40e_debug(hw, I40E_DEBUG_NVM,
242 "NVM write fail error: tried to write %d words, limit is %d.\n",
243 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
244 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
245 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
246
247 i40e_debug(hw, I40E_DEBUG_NVM,
248 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
249 offset, words);
250 else
251 ret_code = i40e_aq_read_nvm(hw, module_pointer,
252 2 * offset,
253 2 * words,
254 data, last_command, &cmd_details);
255
256 return ret_code;
257}
258
259
260
261
262
263
264
265
266
267static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
268 u16 *data)
269{
270 i40e_status ret_code = I40E_ERR_TIMEOUT;
271
272 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
273 *data = le16_to_cpu(*(__le16 *)data);
274
275 return ret_code;
276}
277
278
279
280
281
282
283
284
285
286
287
288
289static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
290 u16 offset, u16 *data)
291{
292 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
293 return i40e_read_nvm_word_aq(hw, offset, data);
294
295 return i40e_read_nvm_word_srctl(hw, offset, data);
296}
297
298
299
300
301
302
303
304
305
306i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
307 u16 *data)
308{
309 i40e_status ret_code = 0;
310
311 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
312 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
313 if (ret_code)
314 return ret_code;
315
316 ret_code = __i40e_read_nvm_word(hw, offset, data);
317
318 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
319 i40e_release_nvm(hw);
320
321 return ret_code;
322}
323
324
325
326
327
328
329
330
331
332
333
334
335static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
336 u16 *words, u16 *data)
337{
338 i40e_status ret_code = 0;
339 u16 index, word;
340
341
342 for (word = 0; word < *words; word++) {
343 index = offset + word;
344 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
345 if (ret_code)
346 break;
347 }
348
349
350 *words = word;
351
352 return ret_code;
353}
354
355
356
357
358
359
360
361
362
363
364
365
366static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
367 u16 *words, u16 *data)
368{
369 i40e_status ret_code;
370 u16 read_size;
371 bool last_cmd = false;
372 u16 words_read = 0;
373 u16 i = 0;
374
375 do {
376
377
378
379
380 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
381 read_size = min(*words,
382 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
383 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
384 else
385 read_size = min((*words - words_read),
386 I40E_SR_SECTOR_SIZE_IN_WORDS);
387
388
389 if ((words_read + read_size) >= *words)
390 last_cmd = true;
391
392 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
393 data + words_read, last_cmd);
394 if (ret_code)
395 goto read_nvm_buffer_aq_exit;
396
397
398
399
400 words_read += read_size;
401 offset += read_size;
402 } while (words_read < *words);
403
404 for (i = 0; i < *words; i++)
405 data[i] = le16_to_cpu(((__le16 *)data)[i]);
406
407read_nvm_buffer_aq_exit:
408 *words = words_read;
409 return ret_code;
410}
411
412
413
414
415
416
417
418
419
420
421
422static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
423 u16 offset, u16 *words,
424 u16 *data)
425{
426 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
427 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
428
429 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
430}
431
432
433
434
435
436
437
438
439
440
441
442
443static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
444 u32 offset, u16 words, void *data,
445 bool last_command)
446{
447 i40e_status ret_code = I40E_ERR_NVM;
448 struct i40e_asq_cmd_details cmd_details;
449
450 memset(&cmd_details, 0, sizeof(cmd_details));
451 cmd_details.wb_desc = &hw->nvm_wb_desc;
452
453
454
455
456
457
458 if ((offset + words) > hw->nvm.sr_size)
459 i40e_debug(hw, I40E_DEBUG_NVM,
460 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
461 (offset + words), hw->nvm.sr_size);
462 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
463
464 i40e_debug(hw, I40E_DEBUG_NVM,
465 "NVM write fail error: tried to write %d words, limit is %d.\n",
466 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
467 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
468 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
469
470 i40e_debug(hw, I40E_DEBUG_NVM,
471 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
472 offset, words);
473 else
474 ret_code = i40e_aq_update_nvm(hw, module_pointer,
475 2 * offset,
476 2 * words,
477 data, last_command, 0,
478 &cmd_details);
479
480 return ret_code;
481}
482
483
484
485
486
487
488
489
490
491
492
493static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
494 u16 *checksum)
495{
496 i40e_status ret_code;
497 struct i40e_virt_mem vmem;
498 u16 pcie_alt_module = 0;
499 u16 checksum_local = 0;
500 u16 vpd_module = 0;
501 u16 *data;
502 u16 i = 0;
503
504 ret_code = i40e_allocate_virt_mem(hw, &vmem,
505 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
506 if (ret_code)
507 goto i40e_calc_nvm_checksum_exit;
508 data = (u16 *)vmem.va;
509
510
511 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
512 if (ret_code) {
513 ret_code = I40E_ERR_NVM_CHECKSUM;
514 goto i40e_calc_nvm_checksum_exit;
515 }
516
517
518 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
519 &pcie_alt_module);
520 if (ret_code) {
521 ret_code = I40E_ERR_NVM_CHECKSUM;
522 goto i40e_calc_nvm_checksum_exit;
523 }
524
525
526
527
528 for (i = 0; i < hw->nvm.sr_size; i++) {
529
530 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
531 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
532
533 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
534 if (ret_code) {
535 ret_code = I40E_ERR_NVM_CHECKSUM;
536 goto i40e_calc_nvm_checksum_exit;
537 }
538 }
539
540
541 if (i == I40E_SR_SW_CHECKSUM_WORD)
542 continue;
543
544 if ((i >= (u32)vpd_module) &&
545 (i < ((u32)vpd_module +
546 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
547 continue;
548 }
549
550 if ((i >= (u32)pcie_alt_module) &&
551 (i < ((u32)pcie_alt_module +
552 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
553 continue;
554 }
555
556 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
557 }
558
559 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
560
561i40e_calc_nvm_checksum_exit:
562 i40e_free_virt_mem(hw, &vmem);
563 return ret_code;
564}
565
566
567
568
569
570
571
572
573
574i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
575{
576 i40e_status ret_code;
577 u16 checksum;
578 __le16 le_sum;
579
580 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
581 le_sum = cpu_to_le16(checksum);
582 if (!ret_code)
583 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
584 1, &le_sum, true);
585
586 return ret_code;
587}
588
589
590
591
592
593
594
595
596
597i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
598 u16 *checksum)
599{
600 i40e_status ret_code = 0;
601 u16 checksum_sr = 0;
602 u16 checksum_local = 0;
603
604
605
606
607
608
609 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
610 if (ret_code)
611 return ret_code;
612 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
613 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
614 i40e_release_nvm(hw);
615 if (ret_code)
616 return ret_code;
617
618
619
620
621 if (checksum_local != checksum_sr)
622 ret_code = I40E_ERR_NVM_CHECKSUM;
623
624
625 if (checksum)
626 *checksum = checksum_local;
627
628 return ret_code;
629}
630
631static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
632 struct i40e_nvm_access *cmd,
633 u8 *bytes, int *perrno);
634static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
635 struct i40e_nvm_access *cmd,
636 u8 *bytes, int *perrno);
637static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
638 struct i40e_nvm_access *cmd,
639 u8 *bytes, int *errno);
640static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
641 struct i40e_nvm_access *cmd,
642 int *perrno);
643static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
644 struct i40e_nvm_access *cmd,
645 int *perrno);
646static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
647 struct i40e_nvm_access *cmd,
648 u8 *bytes, int *perrno);
649static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
650 struct i40e_nvm_access *cmd,
651 u8 *bytes, int *perrno);
652static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
653 struct i40e_nvm_access *cmd,
654 u8 *bytes, int *perrno);
655static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
656 struct i40e_nvm_access *cmd,
657 u8 *bytes, int *perrno);
658static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
659 struct i40e_nvm_access *cmd,
660 u8 *bytes, int *perrno);
661static inline u8 i40e_nvmupd_get_module(u32 val)
662{
663 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
664}
665static inline u8 i40e_nvmupd_get_transaction(u32 val)
666{
667 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
668}
669
670static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
671{
672 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
673 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
674}
675
676static const char * const i40e_nvm_update_state_str[] = {
677 "I40E_NVMUPD_INVALID",
678 "I40E_NVMUPD_READ_CON",
679 "I40E_NVMUPD_READ_SNT",
680 "I40E_NVMUPD_READ_LCB",
681 "I40E_NVMUPD_READ_SA",
682 "I40E_NVMUPD_WRITE_ERA",
683 "I40E_NVMUPD_WRITE_CON",
684 "I40E_NVMUPD_WRITE_SNT",
685 "I40E_NVMUPD_WRITE_LCB",
686 "I40E_NVMUPD_WRITE_SA",
687 "I40E_NVMUPD_CSUM_CON",
688 "I40E_NVMUPD_CSUM_SA",
689 "I40E_NVMUPD_CSUM_LCB",
690 "I40E_NVMUPD_STATUS",
691 "I40E_NVMUPD_EXEC_AQ",
692 "I40E_NVMUPD_GET_AQ_RESULT",
693 "I40E_NVMUPD_GET_AQ_EVENT",
694};
695
696
697
698
699
700
701
702
703
704
705i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
706 struct i40e_nvm_access *cmd,
707 u8 *bytes, int *perrno)
708{
709 i40e_status status;
710 enum i40e_nvmupd_cmd upd_cmd;
711
712
713 *perrno = 0;
714
715
716 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
717
718 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
719 i40e_nvm_update_state_str[upd_cmd],
720 hw->nvmupd_state,
721 hw->nvm_release_on_done, hw->nvm_wait_opcode,
722 cmd->command, cmd->config, cmd->offset, cmd->data_size);
723
724 if (upd_cmd == I40E_NVMUPD_INVALID) {
725 *perrno = -EFAULT;
726 i40e_debug(hw, I40E_DEBUG_NVM,
727 "i40e_nvmupd_validate_command returns %d errno %d\n",
728 upd_cmd, *perrno);
729 }
730
731
732
733
734 if (upd_cmd == I40E_NVMUPD_STATUS) {
735 if (!cmd->data_size) {
736 *perrno = -EFAULT;
737 return I40E_ERR_BUF_TOO_SHORT;
738 }
739
740 bytes[0] = hw->nvmupd_state;
741
742 if (cmd->data_size >= 4) {
743 bytes[1] = 0;
744 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
745 }
746
747
748 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
749 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
750
751 return 0;
752 }
753
754
755 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
756 i40e_debug(hw, I40E_DEBUG_NVM,
757 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
758 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
759 }
760
761
762
763
764
765
766
767
768
769 mutex_lock(&hw->aq.arq_mutex);
770 switch (hw->nvmupd_state) {
771 case I40E_NVMUPD_STATE_INIT:
772 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
773 break;
774
775 case I40E_NVMUPD_STATE_READING:
776 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
777 break;
778
779 case I40E_NVMUPD_STATE_WRITING:
780 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
781 break;
782
783 case I40E_NVMUPD_STATE_INIT_WAIT:
784 case I40E_NVMUPD_STATE_WRITE_WAIT:
785
786
787
788 if (cmd->offset == 0xffff) {
789 i40e_nvmupd_clear_wait_state(hw);
790 status = 0;
791 break;
792 }
793
794 status = I40E_ERR_NOT_READY;
795 *perrno = -EBUSY;
796 break;
797
798 default:
799
800 i40e_debug(hw, I40E_DEBUG_NVM,
801 "NVMUPD: no such state %d\n", hw->nvmupd_state);
802 status = I40E_NOT_SUPPORTED;
803 *perrno = -ESRCH;
804 break;
805 }
806
807 mutex_unlock(&hw->aq.arq_mutex);
808 return status;
809}
810
811
812
813
814
815
816
817
818
819
820
821static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
822 struct i40e_nvm_access *cmd,
823 u8 *bytes, int *perrno)
824{
825 i40e_status status = 0;
826 enum i40e_nvmupd_cmd upd_cmd;
827
828 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
829
830 switch (upd_cmd) {
831 case I40E_NVMUPD_READ_SA:
832 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
833 if (status) {
834 *perrno = i40e_aq_rc_to_posix(status,
835 hw->aq.asq_last_status);
836 } else {
837 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
838 i40e_release_nvm(hw);
839 }
840 break;
841
842 case I40E_NVMUPD_READ_SNT:
843 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
844 if (status) {
845 *perrno = i40e_aq_rc_to_posix(status,
846 hw->aq.asq_last_status);
847 } else {
848 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
849 if (status)
850 i40e_release_nvm(hw);
851 else
852 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
853 }
854 break;
855
856 case I40E_NVMUPD_WRITE_ERA:
857 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
858 if (status) {
859 *perrno = i40e_aq_rc_to_posix(status,
860 hw->aq.asq_last_status);
861 } else {
862 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
863 if (status) {
864 i40e_release_nvm(hw);
865 } else {
866 hw->nvm_release_on_done = true;
867 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
868 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
869 }
870 }
871 break;
872
873 case I40E_NVMUPD_WRITE_SA:
874 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
875 if (status) {
876 *perrno = i40e_aq_rc_to_posix(status,
877 hw->aq.asq_last_status);
878 } else {
879 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
880 if (status) {
881 i40e_release_nvm(hw);
882 } else {
883 hw->nvm_release_on_done = true;
884 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
885 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
886 }
887 }
888 break;
889
890 case I40E_NVMUPD_WRITE_SNT:
891 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
892 if (status) {
893 *perrno = i40e_aq_rc_to_posix(status,
894 hw->aq.asq_last_status);
895 } else {
896 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
897 if (status) {
898 i40e_release_nvm(hw);
899 } else {
900 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
901 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
902 }
903 }
904 break;
905
906 case I40E_NVMUPD_CSUM_SA:
907 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
908 if (status) {
909 *perrno = i40e_aq_rc_to_posix(status,
910 hw->aq.asq_last_status);
911 } else {
912 status = i40e_update_nvm_checksum(hw);
913 if (status) {
914 *perrno = hw->aq.asq_last_status ?
915 i40e_aq_rc_to_posix(status,
916 hw->aq.asq_last_status) :
917 -EIO;
918 i40e_release_nvm(hw);
919 } else {
920 hw->nvm_release_on_done = true;
921 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
922 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
923 }
924 }
925 break;
926
927 case I40E_NVMUPD_EXEC_AQ:
928 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
929 break;
930
931 case I40E_NVMUPD_GET_AQ_RESULT:
932 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
933 break;
934
935 case I40E_NVMUPD_GET_AQ_EVENT:
936 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
937 break;
938
939 default:
940 i40e_debug(hw, I40E_DEBUG_NVM,
941 "NVMUPD: bad cmd %s in init state\n",
942 i40e_nvm_update_state_str[upd_cmd]);
943 status = I40E_ERR_NVM;
944 *perrno = -ESRCH;
945 break;
946 }
947 return status;
948}
949
950
951
952
953
954
955
956
957
958
959
960static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
961 struct i40e_nvm_access *cmd,
962 u8 *bytes, int *perrno)
963{
964 i40e_status status = 0;
965 enum i40e_nvmupd_cmd upd_cmd;
966
967 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
968
969 switch (upd_cmd) {
970 case I40E_NVMUPD_READ_SA:
971 case I40E_NVMUPD_READ_CON:
972 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
973 break;
974
975 case I40E_NVMUPD_READ_LCB:
976 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
977 i40e_release_nvm(hw);
978 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
979 break;
980
981 default:
982 i40e_debug(hw, I40E_DEBUG_NVM,
983 "NVMUPD: bad cmd %s in reading state.\n",
984 i40e_nvm_update_state_str[upd_cmd]);
985 status = I40E_NOT_SUPPORTED;
986 *perrno = -ESRCH;
987 break;
988 }
989 return status;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
1003 struct i40e_nvm_access *cmd,
1004 u8 *bytes, int *perrno)
1005{
1006 i40e_status status = 0;
1007 enum i40e_nvmupd_cmd upd_cmd;
1008 bool retry_attempt = false;
1009
1010 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1011
1012retry:
1013 switch (upd_cmd) {
1014 case I40E_NVMUPD_WRITE_CON:
1015 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1016 if (!status) {
1017 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1018 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1019 }
1020 break;
1021
1022 case I40E_NVMUPD_WRITE_LCB:
1023 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1024 if (status) {
1025 *perrno = hw->aq.asq_last_status ?
1026 i40e_aq_rc_to_posix(status,
1027 hw->aq.asq_last_status) :
1028 -EIO;
1029 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1030 } else {
1031 hw->nvm_release_on_done = true;
1032 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1033 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1034 }
1035 break;
1036
1037 case I40E_NVMUPD_CSUM_CON:
1038
1039 status = i40e_update_nvm_checksum(hw);
1040 if (status) {
1041 *perrno = hw->aq.asq_last_status ?
1042 i40e_aq_rc_to_posix(status,
1043 hw->aq.asq_last_status) :
1044 -EIO;
1045 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1046 } else {
1047 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1048 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1049 }
1050 break;
1051
1052 case I40E_NVMUPD_CSUM_LCB:
1053
1054 status = i40e_update_nvm_checksum(hw);
1055 if (status) {
1056 *perrno = hw->aq.asq_last_status ?
1057 i40e_aq_rc_to_posix(status,
1058 hw->aq.asq_last_status) :
1059 -EIO;
1060 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1061 } else {
1062 hw->nvm_release_on_done = true;
1063 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1064 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1065 }
1066 break;
1067
1068 default:
1069 i40e_debug(hw, I40E_DEBUG_NVM,
1070 "NVMUPD: bad cmd %s in writing state.\n",
1071 i40e_nvm_update_state_str[upd_cmd]);
1072 status = I40E_NOT_SUPPORTED;
1073 *perrno = -ESRCH;
1074 break;
1075 }
1076
1077
1078
1079
1080
1081
1082
1083 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1084 !retry_attempt) {
1085 i40e_status old_status = status;
1086 u32 old_asq_status = hw->aq.asq_last_status;
1087 u32 gtime;
1088
1089 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1090 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1091 i40e_debug(hw, I40E_DEBUG_ALL,
1092 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1093 gtime, hw->nvm.hw_semaphore_timeout);
1094 i40e_release_nvm(hw);
1095 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1096 if (status) {
1097 i40e_debug(hw, I40E_DEBUG_ALL,
1098 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1099 hw->aq.asq_last_status);
1100 status = old_status;
1101 hw->aq.asq_last_status = old_asq_status;
1102 } else {
1103 retry_attempt = true;
1104 goto retry;
1105 }
1106 }
1107 }
1108
1109 return status;
1110}
1111
1112
1113
1114
1115
1116void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1117{
1118 i40e_debug(hw, I40E_DEBUG_NVM,
1119 "NVMUPD: clearing wait on opcode 0x%04x\n",
1120 hw->nvm_wait_opcode);
1121
1122 if (hw->nvm_release_on_done) {
1123 i40e_release_nvm(hw);
1124 hw->nvm_release_on_done = false;
1125 }
1126 hw->nvm_wait_opcode = 0;
1127
1128 if (hw->aq.arq_last_status) {
1129 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1130 return;
1131 }
1132
1133 switch (hw->nvmupd_state) {
1134 case I40E_NVMUPD_STATE_INIT_WAIT:
1135 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1136 break;
1137
1138 case I40E_NVMUPD_STATE_WRITE_WAIT:
1139 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1140 break;
1141
1142 default:
1143 break;
1144 }
1145}
1146
1147
1148
1149
1150
1151
1152
1153void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1154 struct i40e_aq_desc *desc)
1155{
1156 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1157
1158 if (opcode == hw->nvm_wait_opcode) {
1159 memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
1160 i40e_nvmupd_clear_wait_state(hw);
1161 }
1162}
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1173 struct i40e_nvm_access *cmd,
1174 int *perrno)
1175{
1176 enum i40e_nvmupd_cmd upd_cmd;
1177 u8 module, transaction;
1178
1179
1180 upd_cmd = I40E_NVMUPD_INVALID;
1181
1182 transaction = i40e_nvmupd_get_transaction(cmd->config);
1183 module = i40e_nvmupd_get_module(cmd->config);
1184
1185
1186 if ((cmd->data_size < 1) ||
1187 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1188 i40e_debug(hw, I40E_DEBUG_NVM,
1189 "i40e_nvmupd_validate_command data_size %d\n",
1190 cmd->data_size);
1191 *perrno = -EFAULT;
1192 return I40E_NVMUPD_INVALID;
1193 }
1194
1195 switch (cmd->command) {
1196 case I40E_NVM_READ:
1197 switch (transaction) {
1198 case I40E_NVM_CON:
1199 upd_cmd = I40E_NVMUPD_READ_CON;
1200 break;
1201 case I40E_NVM_SNT:
1202 upd_cmd = I40E_NVMUPD_READ_SNT;
1203 break;
1204 case I40E_NVM_LCB:
1205 upd_cmd = I40E_NVMUPD_READ_LCB;
1206 break;
1207 case I40E_NVM_SA:
1208 upd_cmd = I40E_NVMUPD_READ_SA;
1209 break;
1210 case I40E_NVM_EXEC:
1211 if (module == 0xf)
1212 upd_cmd = I40E_NVMUPD_STATUS;
1213 else if (module == 0)
1214 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1215 break;
1216 case I40E_NVM_AQE:
1217 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1218 break;
1219 }
1220 break;
1221
1222 case I40E_NVM_WRITE:
1223 switch (transaction) {
1224 case I40E_NVM_CON:
1225 upd_cmd = I40E_NVMUPD_WRITE_CON;
1226 break;
1227 case I40E_NVM_SNT:
1228 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1229 break;
1230 case I40E_NVM_LCB:
1231 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1232 break;
1233 case I40E_NVM_SA:
1234 upd_cmd = I40E_NVMUPD_WRITE_SA;
1235 break;
1236 case I40E_NVM_ERA:
1237 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1238 break;
1239 case I40E_NVM_CSUM:
1240 upd_cmd = I40E_NVMUPD_CSUM_CON;
1241 break;
1242 case (I40E_NVM_CSUM|I40E_NVM_SA):
1243 upd_cmd = I40E_NVMUPD_CSUM_SA;
1244 break;
1245 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1246 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1247 break;
1248 case I40E_NVM_EXEC:
1249 if (module == 0)
1250 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1251 break;
1252 }
1253 break;
1254 }
1255
1256 return upd_cmd;
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1269 struct i40e_nvm_access *cmd,
1270 u8 *bytes, int *perrno)
1271{
1272 struct i40e_asq_cmd_details cmd_details;
1273 i40e_status status;
1274 struct i40e_aq_desc *aq_desc;
1275 u32 buff_size = 0;
1276 u8 *buff = NULL;
1277 u32 aq_desc_len;
1278 u32 aq_data_len;
1279
1280 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1281 if (cmd->offset == 0xffff)
1282 return 0;
1283
1284 memset(&cmd_details, 0, sizeof(cmd_details));
1285 cmd_details.wb_desc = &hw->nvm_wb_desc;
1286
1287 aq_desc_len = sizeof(struct i40e_aq_desc);
1288 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1289
1290
1291 if (cmd->data_size < aq_desc_len) {
1292 i40e_debug(hw, I40E_DEBUG_NVM,
1293 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1294 cmd->data_size, aq_desc_len);
1295 *perrno = -EINVAL;
1296 return I40E_ERR_PARAM;
1297 }
1298 aq_desc = (struct i40e_aq_desc *)bytes;
1299
1300
1301 aq_data_len = cmd->data_size - aq_desc_len;
1302 buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
1303 if (buff_size) {
1304 if (!hw->nvm_buff.va) {
1305 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1306 hw->aq.asq_buf_size);
1307 if (status)
1308 i40e_debug(hw, I40E_DEBUG_NVM,
1309 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1310 status);
1311 }
1312
1313 if (hw->nvm_buff.va) {
1314 buff = hw->nvm_buff.va;
1315 memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1316 }
1317 }
1318
1319 if (cmd->offset)
1320 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1321
1322
1323 status = i40e_asq_send_command(hw, aq_desc, buff,
1324 buff_size, &cmd_details);
1325 if (status) {
1326 i40e_debug(hw, I40E_DEBUG_NVM,
1327 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1328 i40e_stat_str(hw, status),
1329 i40e_aq_str(hw, hw->aq.asq_last_status));
1330 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1331 return status;
1332 }
1333
1334
1335 if (cmd->offset) {
1336 hw->nvm_wait_opcode = cmd->offset;
1337 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1338 }
1339
1340 return status;
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1353 struct i40e_nvm_access *cmd,
1354 u8 *bytes, int *perrno)
1355{
1356 u32 aq_total_len;
1357 u32 aq_desc_len;
1358 int remainder;
1359 u8 *buff;
1360
1361 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1362
1363 aq_desc_len = sizeof(struct i40e_aq_desc);
1364 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
1365
1366
1367 if (cmd->offset > aq_total_len) {
1368 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1369 __func__, cmd->offset, aq_total_len);
1370 *perrno = -EINVAL;
1371 return I40E_ERR_PARAM;
1372 }
1373
1374
1375 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1376 int new_len = aq_total_len - cmd->offset;
1377
1378 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1379 __func__, cmd->data_size, new_len);
1380 cmd->data_size = new_len;
1381 }
1382
1383 remainder = cmd->data_size;
1384 if (cmd->offset < aq_desc_len) {
1385 u32 len = aq_desc_len - cmd->offset;
1386
1387 len = min(len, cmd->data_size);
1388 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1389 __func__, cmd->offset, cmd->offset + len);
1390
1391 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1392 memcpy(bytes, buff, len);
1393
1394 bytes += len;
1395 remainder -= len;
1396 buff = hw->nvm_buff.va;
1397 } else {
1398 buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1399 }
1400
1401 if (remainder > 0) {
1402 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1403
1404 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1405 __func__, start_byte, start_byte + remainder);
1406 memcpy(bytes, buff, remainder);
1407 }
1408
1409 return 0;
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1422 struct i40e_nvm_access *cmd,
1423 u8 *bytes, int *perrno)
1424{
1425 u32 aq_total_len;
1426 u32 aq_desc_len;
1427
1428 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1429
1430 aq_desc_len = sizeof(struct i40e_aq_desc);
1431 aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
1432
1433
1434 if (cmd->data_size > aq_total_len) {
1435 i40e_debug(hw, I40E_DEBUG_NVM,
1436 "%s: copy length %d too big, trimming to %d\n",
1437 __func__, cmd->data_size, aq_total_len);
1438 cmd->data_size = aq_total_len;
1439 }
1440
1441 memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
1442
1443 return 0;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1456 struct i40e_nvm_access *cmd,
1457 u8 *bytes, int *perrno)
1458{
1459 struct i40e_asq_cmd_details cmd_details;
1460 i40e_status status;
1461 u8 module, transaction;
1462 bool last;
1463
1464 transaction = i40e_nvmupd_get_transaction(cmd->config);
1465 module = i40e_nvmupd_get_module(cmd->config);
1466 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1467
1468 memset(&cmd_details, 0, sizeof(cmd_details));
1469 cmd_details.wb_desc = &hw->nvm_wb_desc;
1470
1471 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1472 bytes, last, &cmd_details);
1473 if (status) {
1474 i40e_debug(hw, I40E_DEBUG_NVM,
1475 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1476 module, cmd->offset, cmd->data_size);
1477 i40e_debug(hw, I40E_DEBUG_NVM,
1478 "i40e_nvmupd_nvm_read status %d aq %d\n",
1479 status, hw->aq.asq_last_status);
1480 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1481 }
1482
1483 return status;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1495 struct i40e_nvm_access *cmd,
1496 int *perrno)
1497{
1498 i40e_status status = 0;
1499 struct i40e_asq_cmd_details cmd_details;
1500 u8 module, transaction;
1501 bool last;
1502
1503 transaction = i40e_nvmupd_get_transaction(cmd->config);
1504 module = i40e_nvmupd_get_module(cmd->config);
1505 last = (transaction & I40E_NVM_LCB);
1506
1507 memset(&cmd_details, 0, sizeof(cmd_details));
1508 cmd_details.wb_desc = &hw->nvm_wb_desc;
1509
1510 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1511 last, &cmd_details);
1512 if (status) {
1513 i40e_debug(hw, I40E_DEBUG_NVM,
1514 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1515 module, cmd->offset, cmd->data_size);
1516 i40e_debug(hw, I40E_DEBUG_NVM,
1517 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1518 status, hw->aq.asq_last_status);
1519 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1520 }
1521
1522 return status;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1535 struct i40e_nvm_access *cmd,
1536 u8 *bytes, int *perrno)
1537{
1538 i40e_status status = 0;
1539 struct i40e_asq_cmd_details cmd_details;
1540 u8 module, transaction;
1541 u8 preservation_flags;
1542 bool last;
1543
1544 transaction = i40e_nvmupd_get_transaction(cmd->config);
1545 module = i40e_nvmupd_get_module(cmd->config);
1546 last = (transaction & I40E_NVM_LCB);
1547 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1548
1549 memset(&cmd_details, 0, sizeof(cmd_details));
1550 cmd_details.wb_desc = &hw->nvm_wb_desc;
1551
1552 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1553 (u16)cmd->data_size, bytes, last,
1554 preservation_flags, &cmd_details);
1555 if (status) {
1556 i40e_debug(hw, I40E_DEBUG_NVM,
1557 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1558 module, cmd->offset, cmd->data_size);
1559 i40e_debug(hw, I40E_DEBUG_NVM,
1560 "i40e_nvmupd_nvm_write status %d aq %d\n",
1561 status, hw->aq.asq_last_status);
1562 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1563 }
1564
1565 return status;
1566}
1567