1
2
3
4
5
6
7
8#include "habanalabs.h"
9#include "../include/common/hl_boot_if.h"
10
11#include <linux/firmware.h>
12#include <linux/genalloc.h>
13#include <linux/io-64-nonatomic-lo-hi.h>
14#include <linux/slab.h>
15
16#define FW_FILE_MAX_SIZE 0x1400000
17
18
19
20
21
22
23
24
25
26
27
28int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
29 void __iomem *dst)
30{
31 const struct firmware *fw;
32 const u64 *fw_data;
33 size_t fw_size;
34 int rc;
35
36 rc = request_firmware(&fw, fw_name, hdev->dev);
37 if (rc) {
38 dev_err(hdev->dev, "Firmware file %s is not found!\n", fw_name);
39 goto out;
40 }
41
42 fw_size = fw->size;
43 if ((fw_size % 4) != 0) {
44 dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
45 fw_name, fw_size);
46 rc = -EINVAL;
47 goto out;
48 }
49
50 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
51
52 if (fw_size > FW_FILE_MAX_SIZE) {
53 dev_err(hdev->dev,
54 "FW file size %zu exceeds maximum of %u bytes\n",
55 fw_size, FW_FILE_MAX_SIZE);
56 rc = -EINVAL;
57 goto out;
58 }
59
60 fw_data = (const u64 *) fw->data;
61
62 memcpy_toio(dst, fw_data, fw_size);
63
64out:
65 release_firmware(fw);
66 return rc;
67}
68
69int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
70{
71 struct cpucp_packet pkt = {};
72
73 pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
74
75 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
76 sizeof(pkt), 0, NULL);
77}
78
79int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
80 u16 len, u32 timeout, long *result)
81{
82 struct cpucp_packet *pkt;
83 dma_addr_t pkt_dma_addr;
84 u32 tmp;
85 int rc = 0;
86
87 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
88 &pkt_dma_addr);
89 if (!pkt) {
90 dev_err(hdev->dev,
91 "Failed to allocate DMA memory for packet to CPU\n");
92 return -ENOMEM;
93 }
94
95 memcpy(pkt, msg, len);
96
97 mutex_lock(&hdev->send_cpu_message_lock);
98
99 if (hdev->disabled)
100 goto out;
101
102 if (hdev->device_cpu_disabled) {
103 rc = -EIO;
104 goto out;
105 }
106
107 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
108 if (rc) {
109 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
110 goto out;
111 }
112
113 rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
114 (tmp == CPUCP_PACKET_FENCE_VAL), 1000,
115 timeout, true);
116
117 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
118
119 if (rc == -ETIMEDOUT) {
120 dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
121 hdev->device_cpu_disabled = true;
122 goto out;
123 }
124
125 tmp = le32_to_cpu(pkt->ctl);
126
127 rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
128 if (rc) {
129 dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
130 rc,
131 (tmp & CPUCP_PKT_CTL_OPCODE_MASK)
132 >> CPUCP_PKT_CTL_OPCODE_SHIFT);
133 rc = -EIO;
134 } else if (result) {
135 *result = (long) le64_to_cpu(pkt->result);
136 }
137
138out:
139 mutex_unlock(&hdev->send_cpu_message_lock);
140
141 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
142
143 return rc;
144}
145
146int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
147{
148 struct cpucp_packet pkt;
149 long result;
150 int rc;
151
152 memset(&pkt, 0, sizeof(pkt));
153
154 pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
155 CPUCP_PKT_CTL_OPCODE_SHIFT);
156 pkt.value = cpu_to_le64(event_type);
157
158 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
159 0, &result);
160
161 if (rc)
162 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
163
164 return rc;
165}
166
167int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
168 size_t irq_arr_size)
169{
170 struct cpucp_unmask_irq_arr_packet *pkt;
171 size_t total_pkt_size;
172 long result;
173 int rc;
174
175 total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
176 irq_arr_size;
177
178
179 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
180
181
182 if (total_pkt_size > USHRT_MAX) {
183 dev_err(hdev->dev, "too many elements in IRQ array\n");
184 return -EINVAL;
185 }
186
187 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
188 if (!pkt)
189 return -ENOMEM;
190
191 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
192 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
193
194 pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
195 CPUCP_PKT_CTL_OPCODE_SHIFT);
196
197 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
198 total_pkt_size, 0, &result);
199
200 if (rc)
201 dev_err(hdev->dev, "failed to unmask IRQ array\n");
202
203 kfree(pkt);
204
205 return rc;
206}
207
208int hl_fw_test_cpu_queue(struct hl_device *hdev)
209{
210 struct cpucp_packet test_pkt = {};
211 long result;
212 int rc;
213
214 test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
215 CPUCP_PKT_CTL_OPCODE_SHIFT);
216 test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
217
218 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
219 sizeof(test_pkt), 0, &result);
220
221 if (!rc) {
222 if (result != CPUCP_PACKET_FENCE_VAL)
223 dev_err(hdev->dev,
224 "CPU queue test failed (0x%08lX)\n", result);
225 } else {
226 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
227 }
228
229 return rc;
230}
231
232void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
233 dma_addr_t *dma_handle)
234{
235 u64 kernel_addr;
236
237 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
238
239 *dma_handle = hdev->cpu_accessible_dma_address +
240 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
241
242 return (void *) (uintptr_t) kernel_addr;
243}
244
245void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
246 void *vaddr)
247{
248 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
249 size);
250}
251
252int hl_fw_send_heartbeat(struct hl_device *hdev)
253{
254 struct cpucp_packet hb_pkt = {};
255 long result;
256 int rc;
257
258 hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
259 CPUCP_PKT_CTL_OPCODE_SHIFT);
260 hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
261
262 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
263 sizeof(hb_pkt), 0, &result);
264
265 if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
266 rc = -EIO;
267
268 return rc;
269}
270
271int hl_fw_cpucp_info_get(struct hl_device *hdev)
272{
273 struct asic_fixed_properties *prop = &hdev->asic_prop;
274 struct cpucp_packet pkt = {};
275 void *cpucp_info_cpu_addr;
276 dma_addr_t cpucp_info_dma_addr;
277 long result;
278 int rc;
279
280 cpucp_info_cpu_addr =
281 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
282 sizeof(struct cpucp_info),
283 &cpucp_info_dma_addr);
284 if (!cpucp_info_cpu_addr) {
285 dev_err(hdev->dev,
286 "Failed to allocate DMA memory for CPU-CP info packet\n");
287 return -ENOMEM;
288 }
289
290 memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
291
292 pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
293 CPUCP_PKT_CTL_OPCODE_SHIFT);
294 pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
295 pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
296
297 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
298 HL_CPUCP_INFO_TIMEOUT_USEC, &result);
299 if (rc) {
300 dev_err(hdev->dev,
301 "Failed to handle CPU-CP info pkt, error %d\n", rc);
302 goto out;
303 }
304
305 memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
306 sizeof(prop->cpucp_info));
307
308 rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
309 if (rc) {
310 dev_err(hdev->dev,
311 "Failed to build hwmon channel info, error %d\n", rc);
312 rc = -EFAULT;
313 goto out;
314 }
315
316out:
317 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
318 sizeof(struct cpucp_info), cpucp_info_cpu_addr);
319
320 return rc;
321}
322
323int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
324{
325 struct cpucp_packet pkt = {};
326 void *eeprom_info_cpu_addr;
327 dma_addr_t eeprom_info_dma_addr;
328 long result;
329 int rc;
330
331 eeprom_info_cpu_addr =
332 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
333 max_size, &eeprom_info_dma_addr);
334 if (!eeprom_info_cpu_addr) {
335 dev_err(hdev->dev,
336 "Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
337 return -ENOMEM;
338 }
339
340 memset(eeprom_info_cpu_addr, 0, max_size);
341
342 pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
343 CPUCP_PKT_CTL_OPCODE_SHIFT);
344 pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
345 pkt.data_max_size = cpu_to_le32(max_size);
346
347 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
348 HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
349
350 if (rc) {
351 dev_err(hdev->dev,
352 "Failed to handle CPU-CP EEPROM packet, error %d\n",
353 rc);
354 goto out;
355 }
356
357
358 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
359
360out:
361 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
362 eeprom_info_cpu_addr);
363
364 return rc;
365}
366
367int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
368 struct hl_info_pci_counters *counters)
369{
370 struct cpucp_packet pkt = {};
371 long result;
372 int rc;
373
374 pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
375 CPUCP_PKT_CTL_OPCODE_SHIFT);
376
377
378 pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
379 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
380 HL_CPUCP_INFO_TIMEOUT_USEC, &result);
381 if (rc) {
382 dev_err(hdev->dev,
383 "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
384 return rc;
385 }
386 counters->rx_throughput = result;
387
388
389 pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
390 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
391 HL_CPUCP_INFO_TIMEOUT_USEC, &result);
392 if (rc) {
393 dev_err(hdev->dev,
394 "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
395 return rc;
396 }
397 counters->tx_throughput = result;
398
399
400 pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
401 CPUCP_PKT_CTL_OPCODE_SHIFT);
402
403 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
404 HL_CPUCP_INFO_TIMEOUT_USEC, &result);
405 if (rc) {
406 dev_err(hdev->dev,
407 "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
408 return rc;
409 }
410 counters->replay_cnt = (u32) result;
411
412 return rc;
413}
414
415int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
416{
417 struct cpucp_packet pkt = {};
418 long result;
419 int rc;
420
421 pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
422 CPUCP_PKT_CTL_OPCODE_SHIFT);
423
424 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
425 HL_CPUCP_INFO_TIMEOUT_USEC, &result);
426 if (rc) {
427 dev_err(hdev->dev,
428 "Failed to handle CpuCP total energy pkt, error %d\n",
429 rc);
430 return rc;
431 }
432
433 *total_energy = result;
434
435 return rc;
436}
437
438static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
439{
440 u32 err_val;
441
442
443
444
445
446
447
448
449
450
451 err_val = RREG32(boot_err0_reg);
452 if (!(err_val & CPU_BOOT_ERR0_ENABLED))
453 return;
454
455 if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
456 dev_err(hdev->dev,
457 "Device boot error - DRAM initialization failed\n");
458 if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
459 dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
460 if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
461 dev_err(hdev->dev,
462 "Device boot error - Thermal Sensor initialization failed\n");
463 if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
464 dev_warn(hdev->dev,
465 "Device boot warning - Skipped DRAM initialization\n");
466 if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED)
467 dev_warn(hdev->dev,
468 "Device boot error - Skipped waiting for BMC\n");
469 if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
470 dev_err(hdev->dev,
471 "Device boot error - Serdes data from BMC not available\n");
472 if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
473 dev_err(hdev->dev,
474 "Device boot error - NIC F/W initialization failed\n");
475}
476
477static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
478{
479
480
481
482 switch (status) {
483 case CPU_BOOT_STATUS_NA:
484 dev_err(hdev->dev,
485 "Device boot error - BTL did NOT run\n");
486 break;
487 case CPU_BOOT_STATUS_IN_WFE:
488 dev_err(hdev->dev,
489 "Device boot error - Stuck inside WFE loop\n");
490 break;
491 case CPU_BOOT_STATUS_IN_BTL:
492 dev_err(hdev->dev,
493 "Device boot error - Stuck in BTL\n");
494 break;
495 case CPU_BOOT_STATUS_IN_PREBOOT:
496 dev_err(hdev->dev,
497 "Device boot error - Stuck in Preboot\n");
498 break;
499 case CPU_BOOT_STATUS_IN_SPL:
500 dev_err(hdev->dev,
501 "Device boot error - Stuck in SPL\n");
502 break;
503 case CPU_BOOT_STATUS_IN_UBOOT:
504 dev_err(hdev->dev,
505 "Device boot error - Stuck in u-boot\n");
506 break;
507 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
508 dev_err(hdev->dev,
509 "Device boot error - DRAM initialization failed\n");
510 break;
511 case CPU_BOOT_STATUS_UBOOT_NOT_READY:
512 dev_err(hdev->dev,
513 "Device boot error - u-boot stopped by user\n");
514 break;
515 case CPU_BOOT_STATUS_TS_INIT_FAIL:
516 dev_err(hdev->dev,
517 "Device boot error - Thermal Sensor initialization failed\n");
518 break;
519 default:
520 dev_err(hdev->dev,
521 "Device boot error - Invalid status code %d\n",
522 status);
523 break;
524 }
525}
526
527int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
528 u32 boot_err0_reg, u32 timeout)
529{
530 u32 status;
531 int rc;
532
533 if (!hdev->cpu_enable)
534 return 0;
535
536
537
538
539
540
541
542
543
544 rc = hl_poll_timeout(
545 hdev,
546 cpu_boot_status_reg,
547 status,
548 (status == CPU_BOOT_STATUS_IN_UBOOT) ||
549 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
550 (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
551 (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
552 (status == CPU_BOOT_STATUS_SRAM_AVAIL) ||
553 (status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
554 10000,
555 timeout);
556
557 if (rc) {
558 dev_err(hdev->dev, "Failed to read preboot version\n");
559 detect_cpu_boot_status(hdev, status);
560 fw_read_errors(hdev, boot_err0_reg);
561 return -EIO;
562 }
563
564 hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
565
566 return 0;
567}
568
569int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
570 u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
571 u32 boot_err0_reg, bool skip_bmc,
572 u32 cpu_timeout, u32 boot_fit_timeout)
573{
574 u32 status;
575 int rc;
576
577 dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
578 cpu_timeout / USEC_PER_SEC);
579
580
581 rc = hl_poll_timeout(
582 hdev,
583 cpu_boot_status_reg,
584 status,
585 status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
586 10000,
587 boot_fit_timeout);
588
589 if (rc) {
590 dev_dbg(hdev->dev,
591 "No boot fit request received, resuming boot\n");
592 } else {
593 rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
594 if (rc)
595 goto out;
596
597
598 WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
599
600
601 WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
602
603
604 rc = hl_poll_timeout(
605 hdev,
606 cpu_msg_status_reg,
607 status,
608 status == CPU_MSG_OK,
609 10000,
610 boot_fit_timeout);
611
612 if (rc) {
613 dev_err(hdev->dev,
614 "Timeout waiting for boot fit load ack\n");
615 goto out;
616 }
617
618
619 WREG32(msg_to_cpu_reg, KMD_MSG_NA);
620 }
621
622
623 rc = hl_poll_timeout(
624 hdev,
625 cpu_boot_status_reg,
626 status,
627 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
628 (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
629 (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
630 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
631 10000,
632 cpu_timeout);
633
634
635 hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
636
637 if (rc) {
638 detect_cpu_boot_status(hdev, status);
639 rc = -EIO;
640 goto out;
641 }
642
643 if (!hdev->fw_loading) {
644 dev_info(hdev->dev, "Skip loading FW\n");
645 goto out;
646 }
647
648 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
649 goto out;
650
651 dev_info(hdev->dev,
652 "Loading firmware to device, may take some time...\n");
653
654 rc = hdev->asic_funcs->load_firmware_to_device(hdev);
655 if (rc)
656 goto out;
657
658 if (skip_bmc) {
659 WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
660
661 rc = hl_poll_timeout(
662 hdev,
663 cpu_boot_status_reg,
664 status,
665 (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
666 10000,
667 cpu_timeout);
668
669 if (rc) {
670 dev_err(hdev->dev,
671 "Failed to get ACK on skipping BMC, %d\n",
672 status);
673 WREG32(msg_to_cpu_reg, KMD_MSG_NA);
674 rc = -EIO;
675 goto out;
676 }
677 }
678
679 WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
680
681 rc = hl_poll_timeout(
682 hdev,
683 cpu_boot_status_reg,
684 status,
685 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
686 10000,
687 cpu_timeout);
688
689
690 WREG32(msg_to_cpu_reg, KMD_MSG_NA);
691
692 if (rc) {
693 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
694 dev_err(hdev->dev,
695 "Device reports FIT image is corrupted\n");
696 else
697 dev_err(hdev->dev,
698 "Failed to load firmware to device, %d\n",
699 status);
700
701 rc = -EIO;
702 goto out;
703 }
704
705 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
706
707out:
708 fw_read_errors(hdev, boot_err0_reg);
709
710 return rc;
711}
712