1
2
3
4
5
6
7
8#define pr_fmt(fmt) "habanalabs: " fmt
9
10#include <uapi/misc/habanalabs.h>
11#include "habanalabs.h"
12
13#include <linux/pci.h>
14#include <linux/hwmon.h>
15
16#define HL_RESET_DELAY_USEC 10000
17
18enum hl_device_status hl_device_status(struct hl_device *hdev)
19{
20 enum hl_device_status status;
21
22 if (hdev->reset_info.in_reset)
23 status = HL_DEVICE_STATUS_IN_RESET;
24 else if (hdev->reset_info.needs_reset)
25 status = HL_DEVICE_STATUS_NEEDS_RESET;
26 else if (hdev->disabled)
27 status = HL_DEVICE_STATUS_MALFUNCTION;
28 else if (!hdev->init_done)
29 status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
30 else
31 status = HL_DEVICE_STATUS_OPERATIONAL;
32
33 return status;
34}
35
36bool hl_device_operational(struct hl_device *hdev,
37 enum hl_device_status *status)
38{
39 enum hl_device_status current_status;
40
41 current_status = hl_device_status(hdev);
42 if (status)
43 *status = current_status;
44
45 switch (current_status) {
46 case HL_DEVICE_STATUS_IN_RESET:
47 case HL_DEVICE_STATUS_MALFUNCTION:
48 case HL_DEVICE_STATUS_NEEDS_RESET:
49 return false;
50 case HL_DEVICE_STATUS_OPERATIONAL:
51 case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
52 default:
53 return true;
54 }
55}
56
57static void hpriv_release(struct kref *ref)
58{
59 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
60 bool device_is_idle = true;
61 struct hl_fpriv *hpriv;
62 struct hl_device *hdev;
63
64 hpriv = container_of(ref, struct hl_fpriv, refcount);
65
66 hdev = hpriv->hdev;
67
68 put_pid(hpriv->taskpid);
69
70 hl_debugfs_remove_file(hpriv);
71
72 mutex_destroy(&hpriv->restore_phase_mutex);
73
74 if ((!hdev->pldm) && (hdev->pdev) &&
75 (!hdev->asic_funcs->is_device_idle(hdev,
76 idle_mask,
77 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) {
78 dev_err(hdev->dev,
79 "device not idle after user context is closed (0x%llx_%llx)\n",
80 idle_mask[1], idle_mask[0]);
81
82 device_is_idle = false;
83 }
84
85
86
87
88
89
90
91
92
93
94 mutex_lock(&hdev->fpriv_list_lock);
95 list_del(&hpriv->dev_node);
96 mutex_unlock(&hdev->fpriv_list_lock);
97
98 if ((hdev->reset_if_device_not_idle && !device_is_idle)
99 || hdev->reset_upon_device_release)
100 hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
101
102
103
104
105
106 mutex_lock(&hdev->fpriv_list_lock);
107 hdev->is_compute_ctx_active = false;
108 mutex_unlock(&hdev->fpriv_list_lock);
109
110 kfree(hpriv);
111}
112
113void hl_hpriv_get(struct hl_fpriv *hpriv)
114{
115 kref_get(&hpriv->refcount);
116}
117
118int hl_hpriv_put(struct hl_fpriv *hpriv)
119{
120 return kref_put(&hpriv->refcount, hpriv_release);
121}
122
123
124
125
126
127
128
129
130
131static int hl_device_release(struct inode *inode, struct file *filp)
132{
133 struct hl_fpriv *hpriv = filp->private_data;
134 struct hl_device *hdev = hpriv->hdev;
135
136 filp->private_data = NULL;
137
138 if (!hdev) {
139 pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
140 put_pid(hpriv->taskpid);
141 return 0;
142 }
143
144
145
146
147 hl_release_pending_user_interrupts(hpriv->hdev);
148
149 hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
150 hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
151 hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
152
153 if (!hl_hpriv_put(hpriv))
154 dev_notice(hdev->dev,
155 "User process closed FD but device still in use\n");
156
157 hdev->last_open_session_duration_jif =
158 jiffies - hdev->last_successful_open_jif;
159
160 return 0;
161}
162
163static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
164{
165 struct hl_fpriv *hpriv = filp->private_data;
166 struct hl_device *hdev = hpriv->hdev;
167
168 filp->private_data = NULL;
169
170 if (!hdev) {
171 pr_err("Closing FD after device was removed\n");
172 goto out;
173 }
174
175 mutex_lock(&hdev->fpriv_ctrl_list_lock);
176 list_del(&hpriv->dev_node);
177 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
178out:
179 put_pid(hpriv->taskpid);
180
181 kfree(hpriv);
182
183 return 0;
184}
185
186
187
188
189
190
191
192
193
194
195static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
196{
197 struct hl_fpriv *hpriv = filp->private_data;
198 struct hl_device *hdev = hpriv->hdev;
199 unsigned long vm_pgoff;
200
201 if (!hdev) {
202 pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
203 return -ENODEV;
204 }
205
206 vm_pgoff = vma->vm_pgoff;
207 vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
208
209 switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
210 case HL_MMAP_TYPE_CB:
211 return hl_cb_mmap(hpriv, vma);
212
213 case HL_MMAP_TYPE_BLOCK:
214 return hl_hw_block_mmap(hpriv, vma);
215
216 case HL_MMAP_TYPE_TS_BUFF:
217 return hl_ts_mmap(hpriv, vma);
218 }
219
220 return -EINVAL;
221}
222
223static const struct file_operations hl_ops = {
224 .owner = THIS_MODULE,
225 .open = hl_device_open,
226 .release = hl_device_release,
227 .mmap = hl_mmap,
228 .unlocked_ioctl = hl_ioctl,
229 .compat_ioctl = hl_ioctl
230};
231
232static const struct file_operations hl_ctrl_ops = {
233 .owner = THIS_MODULE,
234 .open = hl_device_open_ctrl,
235 .release = hl_device_release_ctrl,
236 .unlocked_ioctl = hl_ioctl_control,
237 .compat_ioctl = hl_ioctl_control
238};
239
240static void device_release_func(struct device *dev)
241{
242 kfree(dev);
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
259 int minor, const struct file_operations *fops,
260 char *name, struct cdev *cdev,
261 struct device **dev)
262{
263 cdev_init(cdev, fops);
264 cdev->owner = THIS_MODULE;
265
266 *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
267 if (!*dev)
268 return -ENOMEM;
269
270 device_initialize(*dev);
271 (*dev)->devt = MKDEV(hdev->major, minor);
272 (*dev)->class = hclass;
273 (*dev)->release = device_release_func;
274 dev_set_drvdata(*dev, hdev);
275 dev_set_name(*dev, "%s", name);
276
277 return 0;
278}
279
280static int device_cdev_sysfs_add(struct hl_device *hdev)
281{
282 int rc;
283
284 rc = cdev_device_add(&hdev->cdev, hdev->dev);
285 if (rc) {
286 dev_err(hdev->dev,
287 "failed to add a char device to the system\n");
288 return rc;
289 }
290
291 rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
292 if (rc) {
293 dev_err(hdev->dev,
294 "failed to add a control char device to the system\n");
295 goto delete_cdev_device;
296 }
297
298
299 rc = hl_sysfs_init(hdev);
300 if (rc) {
301 dev_err(hdev->dev, "failed to initialize sysfs\n");
302 goto delete_ctrl_cdev_device;
303 }
304
305 hdev->cdev_sysfs_created = true;
306
307 return 0;
308
309delete_ctrl_cdev_device:
310 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
311delete_cdev_device:
312 cdev_device_del(&hdev->cdev, hdev->dev);
313 return rc;
314}
315
316static void device_cdev_sysfs_del(struct hl_device *hdev)
317{
318 if (!hdev->cdev_sysfs_created)
319 goto put_devices;
320
321 hl_sysfs_fini(hdev);
322 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
323 cdev_device_del(&hdev->cdev, hdev->dev);
324
325put_devices:
326 put_device(hdev->dev);
327 put_device(hdev->dev_ctrl);
328}
329
330static void device_hard_reset_pending(struct work_struct *work)
331{
332 struct hl_device_reset_work *device_reset_work =
333 container_of(work, struct hl_device_reset_work, reset_work.work);
334 struct hl_device *hdev = device_reset_work->hdev;
335 u32 flags;
336 int rc;
337
338 flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
339
340 rc = hl_device_reset(hdev, flags);
341 if ((rc == -EBUSY) && !hdev->device_fini_pending) {
342 dev_info(hdev->dev,
343 "Could not reset device. will try again in %u seconds",
344 HL_PENDING_RESET_PER_SEC);
345
346 queue_delayed_work(device_reset_work->wq,
347 &device_reset_work->reset_work,
348 msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
349 }
350}
351
352
353
354
355
356
357
358
359
360static int device_early_init(struct hl_device *hdev)
361{
362 int i, rc;
363 char workq_name[32];
364
365 switch (hdev->asic_type) {
366 case ASIC_GOYA:
367 goya_set_asic_funcs(hdev);
368 strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
369 break;
370 case ASIC_GAUDI:
371 gaudi_set_asic_funcs(hdev);
372 strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
373 break;
374 case ASIC_GAUDI_SEC:
375 gaudi_set_asic_funcs(hdev);
376 strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
377 break;
378 default:
379 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
380 hdev->asic_type);
381 return -EINVAL;
382 }
383
384 rc = hdev->asic_funcs->early_init(hdev);
385 if (rc)
386 return rc;
387
388 rc = hl_asid_init(hdev);
389 if (rc)
390 goto early_fini;
391
392 if (hdev->asic_prop.completion_queues_count) {
393 hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
394 sizeof(*hdev->cq_wq),
395 GFP_KERNEL);
396 if (!hdev->cq_wq) {
397 rc = -ENOMEM;
398 goto asid_fini;
399 }
400 }
401
402 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
403 snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
404 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
405 if (hdev->cq_wq[i] == NULL) {
406 dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
407 rc = -ENOMEM;
408 goto free_cq_wq;
409 }
410 }
411
412 hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
413 if (hdev->eq_wq == NULL) {
414 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
415 rc = -ENOMEM;
416 goto free_cq_wq;
417 }
418
419 hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
420 if (!hdev->ts_free_obj_wq) {
421 dev_err(hdev->dev,
422 "Failed to allocate Timestamp registration free workqueue\n");
423 rc = -ENOMEM;
424 goto free_eq_wq;
425 }
426
427 hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
428 GFP_KERNEL);
429 if (!hdev->hl_chip_info) {
430 rc = -ENOMEM;
431 goto free_ts_free_wq;
432 }
433
434 rc = hl_mmu_if_set_funcs(hdev);
435 if (rc)
436 goto free_chip_info;
437
438 hl_cb_mgr_init(&hdev->kernel_cb_mgr);
439
440 hdev->device_reset_work.wq =
441 create_singlethread_workqueue("hl_device_reset");
442 if (!hdev->device_reset_work.wq) {
443 rc = -ENOMEM;
444 dev_err(hdev->dev, "Failed to create device reset WQ\n");
445 goto free_cb_mgr;
446 }
447
448 INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
449 device_hard_reset_pending);
450 hdev->device_reset_work.hdev = hdev;
451 hdev->device_fini_pending = 0;
452
453 mutex_init(&hdev->send_cpu_message_lock);
454 mutex_init(&hdev->debug_lock);
455 INIT_LIST_HEAD(&hdev->cs_mirror_list);
456 spin_lock_init(&hdev->cs_mirror_lock);
457 spin_lock_init(&hdev->reset_info.lock);
458 INIT_LIST_HEAD(&hdev->fpriv_list);
459 INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
460 mutex_init(&hdev->fpriv_list_lock);
461 mutex_init(&hdev->fpriv_ctrl_list_lock);
462 mutex_init(&hdev->clk_throttling.lock);
463
464 return 0;
465
466free_cb_mgr:
467 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
468free_chip_info:
469 kfree(hdev->hl_chip_info);
470free_ts_free_wq:
471 destroy_workqueue(hdev->ts_free_obj_wq);
472free_eq_wq:
473 destroy_workqueue(hdev->eq_wq);
474free_cq_wq:
475 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
476 if (hdev->cq_wq[i])
477 destroy_workqueue(hdev->cq_wq[i]);
478 kfree(hdev->cq_wq);
479asid_fini:
480 hl_asid_fini(hdev);
481early_fini:
482 if (hdev->asic_funcs->early_fini)
483 hdev->asic_funcs->early_fini(hdev);
484
485 return rc;
486}
487
488
489
490
491
492
493
494static void device_early_fini(struct hl_device *hdev)
495{
496 int i;
497
498 mutex_destroy(&hdev->debug_lock);
499 mutex_destroy(&hdev->send_cpu_message_lock);
500
501 mutex_destroy(&hdev->fpriv_list_lock);
502 mutex_destroy(&hdev->fpriv_ctrl_list_lock);
503
504 mutex_destroy(&hdev->clk_throttling.lock);
505
506 hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
507
508 kfree(hdev->hl_chip_info);
509
510 destroy_workqueue(hdev->ts_free_obj_wq);
511 destroy_workqueue(hdev->eq_wq);
512 destroy_workqueue(hdev->device_reset_work.wq);
513
514 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
515 destroy_workqueue(hdev->cq_wq[i]);
516 kfree(hdev->cq_wq);
517
518 hl_asid_fini(hdev);
519
520 if (hdev->asic_funcs->early_fini)
521 hdev->asic_funcs->early_fini(hdev);
522}
523
524static void hl_device_heartbeat(struct work_struct *work)
525{
526 struct hl_device *hdev = container_of(work, struct hl_device,
527 work_heartbeat.work);
528
529 if (!hl_device_operational(hdev, NULL))
530 goto reschedule;
531
532 if (!hdev->asic_funcs->send_heartbeat(hdev))
533 goto reschedule;
534
535 if (hl_device_operational(hdev, NULL))
536 dev_err(hdev->dev, "Device heartbeat failed!\n");
537
538 hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
539
540 return;
541
542reschedule:
543
544
545
546
547
548
549
550
551
552
553 if (!hdev->reset_info.in_reset)
554 hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
555
556 schedule_delayed_work(&hdev->work_heartbeat,
557 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
558}
559
560
561
562
563
564
565
566
567
568static int device_late_init(struct hl_device *hdev)
569{
570 int rc;
571
572 if (hdev->asic_funcs->late_init) {
573 rc = hdev->asic_funcs->late_init(hdev);
574 if (rc) {
575 dev_err(hdev->dev,
576 "failed late initialization for the H/W\n");
577 return rc;
578 }
579 }
580
581 hdev->high_pll = hdev->asic_prop.high_pll;
582
583 if (hdev->heartbeat) {
584 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
585 schedule_delayed_work(&hdev->work_heartbeat,
586 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
587 }
588
589 hdev->late_init_done = true;
590
591 return 0;
592}
593
594
595
596
597
598
599
600static void device_late_fini(struct hl_device *hdev)
601{
602 if (!hdev->late_init_done)
603 return;
604
605 if (hdev->heartbeat)
606 cancel_delayed_work_sync(&hdev->work_heartbeat);
607
608 if (hdev->asic_funcs->late_fini)
609 hdev->asic_funcs->late_fini(hdev);
610
611 hdev->late_init_done = false;
612}
613
614int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
615{
616 u64 max_power, curr_power, dc_power, dividend;
617 int rc;
618
619 max_power = hdev->max_power;
620 dc_power = hdev->asic_prop.dc_power_default;
621 rc = hl_fw_cpucp_power_get(hdev, &curr_power);
622
623 if (rc)
624 return rc;
625
626 curr_power = clamp(curr_power, dc_power, max_power);
627
628 dividend = (curr_power - dc_power) * 100;
629 *utilization = (u32) div_u64(dividend, (max_power - dc_power));
630
631 return 0;
632}
633
634int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
635{
636 int rc = 0;
637
638 mutex_lock(&hdev->debug_lock);
639
640 if (!enable) {
641 if (!hdev->in_debug) {
642 dev_err(hdev->dev,
643 "Failed to disable debug mode because device was not in debug mode\n");
644 rc = -EFAULT;
645 goto out;
646 }
647
648 if (!hdev->reset_info.hard_reset_pending)
649 hdev->asic_funcs->halt_coresight(hdev, ctx);
650
651 hdev->in_debug = 0;
652
653 goto out;
654 }
655
656 if (hdev->in_debug) {
657 dev_err(hdev->dev,
658 "Failed to enable debug mode because device is already in debug mode\n");
659 rc = -EFAULT;
660 goto out;
661 }
662
663 hdev->in_debug = 1;
664
665out:
666 mutex_unlock(&hdev->debug_lock);
667
668 return rc;
669}
670
671static void take_release_locks(struct hl_device *hdev)
672{
673
674
675
676 hdev->asic_funcs->hw_queues_lock(hdev);
677 hdev->asic_funcs->hw_queues_unlock(hdev);
678
679
680 mutex_lock(&hdev->send_cpu_message_lock);
681 mutex_unlock(&hdev->send_cpu_message_lock);
682
683
684 mutex_lock(&hdev->fpriv_list_lock);
685 mutex_unlock(&hdev->fpriv_list_lock);
686 mutex_lock(&hdev->fpriv_ctrl_list_lock);
687 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
688}
689
690static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
691 bool skip_wq_flush)
692{
693 if (hard_reset)
694 device_late_fini(hdev);
695
696
697
698
699
700
701 hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
702
703
704 hl_cs_rollback_all(hdev, skip_wq_flush);
705
706
707
708
709 hl_release_pending_user_interrupts(hdev);
710}
711
712
713
714
715
716
717
718
719
720
721int hl_device_suspend(struct hl_device *hdev)
722{
723 int rc;
724
725 pci_save_state(hdev->pdev);
726
727
728 spin_lock(&hdev->reset_info.lock);
729 if (hdev->reset_info.in_reset) {
730 spin_unlock(&hdev->reset_info.lock);
731 dev_err(hdev->dev, "Can't suspend while in reset\n");
732 return -EIO;
733 }
734 hdev->reset_info.in_reset = 1;
735 spin_unlock(&hdev->reset_info.lock);
736
737
738 hdev->disabled = true;
739
740 take_release_locks(hdev);
741
742 rc = hdev->asic_funcs->suspend(hdev);
743 if (rc)
744 dev_err(hdev->dev,
745 "Failed to disable PCI access of device CPU\n");
746
747
748 pci_disable_device(hdev->pdev);
749 pci_set_power_state(hdev->pdev, PCI_D3hot);
750
751 return 0;
752}
753
754
755
756
757
758
759
760
761
762
763int hl_device_resume(struct hl_device *hdev)
764{
765 int rc;
766
767 pci_set_power_state(hdev->pdev, PCI_D0);
768 pci_restore_state(hdev->pdev);
769 rc = pci_enable_device_mem(hdev->pdev);
770 if (rc) {
771 dev_err(hdev->dev,
772 "Failed to enable PCI device in resume\n");
773 return rc;
774 }
775
776 pci_set_master(hdev->pdev);
777
778 rc = hdev->asic_funcs->resume(hdev);
779 if (rc) {
780 dev_err(hdev->dev, "Failed to resume device after suspend\n");
781 goto disable_device;
782 }
783
784
785
786
787
788 hdev->reset_info.in_reset = 0;
789
790 rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
791 if (rc) {
792 dev_err(hdev->dev, "Failed to reset device during resume\n");
793 goto disable_device;
794 }
795
796 return 0;
797
798disable_device:
799 pci_clear_master(hdev->pdev);
800 pci_disable_device(hdev->pdev);
801
802 return rc;
803}
804
805static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
806{
807 struct task_struct *task = NULL;
808 struct list_head *fd_list;
809 struct hl_fpriv *hpriv;
810 struct mutex *fd_lock;
811 u32 pending_cnt;
812
813 fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
814 fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
815
816
817
818
819 if (!list_empty(fd_list))
820 ssleep(1);
821
822 if (timeout) {
823 pending_cnt = timeout;
824 } else {
825 if (hdev->process_kill_trial_cnt) {
826
827 pending_cnt = 1;
828 goto wait_for_processes;
829 } else {
830
831 pending_cnt = HL_PENDING_RESET_PER_SEC;
832 }
833 }
834
835 mutex_lock(fd_lock);
836
837
838
839
840 list_for_each_entry(hpriv, fd_list, dev_node) {
841 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
842 if (task) {
843 dev_info(hdev->dev, "Killing user process pid=%d\n",
844 task_pid_nr(task));
845 send_sig(SIGKILL, task, 1);
846 usleep_range(1000, 10000);
847
848 put_task_struct(task);
849 } else {
850 dev_warn(hdev->dev,
851 "Can't get task struct for PID so giving up on killing process\n");
852 mutex_unlock(fd_lock);
853 return -ETIME;
854 }
855 }
856
857 mutex_unlock(fd_lock);
858
859
860
861
862
863
864
865
866
867
868wait_for_processes:
869 while ((!list_empty(fd_list)) && (pending_cnt)) {
870 dev_dbg(hdev->dev,
871 "Waiting for all unmap operations to finish before hard reset\n");
872
873 pending_cnt--;
874
875 ssleep(1);
876 }
877
878
879 if (list_empty(fd_list))
880 return 0;
881
882
883 if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
884 return -ETIME;
885
886 hdev->process_kill_trial_cnt++;
887
888 return -EBUSY;
889}
890
891static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
892{
893 struct list_head *fd_list;
894 struct hl_fpriv *hpriv;
895 struct mutex *fd_lock;
896
897 fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
898 fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
899
900 mutex_lock(fd_lock);
901 list_for_each_entry(hpriv, fd_list, dev_node)
902 hpriv->hdev = NULL;
903 mutex_unlock(fd_lock);
904}
905
906static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
907{
908 u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
909
910
911
912
913
914
915
916 if (flags & HL_DRV_RESET_HEARTBEAT) {
917 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
918 cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
919 } else if (flags & HL_DRV_RESET_TDR) {
920 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
921 cur_reset_trigger = HL_DRV_RESET_TDR;
922 } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
923 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
924 cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
925 } else {
926 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
927 }
928
929
930
931
932
933
934 if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
935 hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
936 hdev->reset_info.reset_trigger_repeated = 0;
937 } else {
938 hdev->reset_info.reset_trigger_repeated = 1;
939 }
940
941
942
943
944
945
946
947 if ((flags & HL_DRV_RESET_HARD) &&
948 !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
949
950
951
952
953
954
955
956
957
958 if (hl_fw_send_pci_access_msg(hdev,
959 CPUCP_PACKET_DISABLE_PCI_ACCESS))
960 dev_warn(hdev->dev,
961 "Failed to disable PCI access by F/W\n");
962 }
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981int hl_device_reset(struct hl_device *hdev, u32 flags)
982{
983 bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
984 reset_upon_device_release = false, schedule_hard_reset = false,
985 skip_wq_flush, delay_reset;
986 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
987 struct hl_ctx *ctx;
988 int i, rc;
989
990 if (!hdev->init_done) {
991 dev_err(hdev->dev, "Can't reset before initialization is done\n");
992 return 0;
993 }
994
995 hard_reset = !!(flags & HL_DRV_RESET_HARD);
996 from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
997 fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
998 skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE);
999 delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1000
1001 if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
1002 hard_instead_soft = true;
1003 hard_reset = true;
1004 }
1005
1006 if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
1007 if (hard_reset) {
1008 dev_crit(hdev->dev,
1009 "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1010 return -EINVAL;
1011 }
1012
1013 reset_upon_device_release = true;
1014
1015 goto do_reset;
1016 }
1017
1018 if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1019 hard_instead_soft = true;
1020 hard_reset = true;
1021 }
1022
1023 if (hard_instead_soft)
1024 dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
1025
1026do_reset:
1027
1028 if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1029 goto kill_processes;
1030
1031
1032
1033
1034
1035
1036 if (!from_hard_reset_thread) {
1037
1038 spin_lock(&hdev->reset_info.lock);
1039 if (hdev->reset_info.in_reset) {
1040
1041 if (hard_reset && hdev->reset_info.is_in_soft_reset)
1042 hdev->reset_info.hard_reset_schedule_flags = flags;
1043 spin_unlock(&hdev->reset_info.lock);
1044 return 0;
1045 }
1046 hdev->reset_info.in_reset = 1;
1047 spin_unlock(&hdev->reset_info.lock);
1048
1049 if (delay_reset)
1050 usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1051
1052 handle_reset_trigger(hdev, flags);
1053
1054
1055 hdev->reset_info.is_in_soft_reset = !hard_reset;
1056
1057
1058 hdev->disabled = true;
1059
1060 take_release_locks(hdev);
1061
1062 if (hard_reset)
1063 dev_info(hdev->dev, "Going to reset device\n");
1064 else if (reset_upon_device_release)
1065 dev_info(hdev->dev, "Going to reset device after release by user\n");
1066 else
1067 dev_info(hdev->dev, "Going to reset engines of inference device\n");
1068 }
1069
1070again:
1071 if ((hard_reset) && (!from_hard_reset_thread)) {
1072 hdev->reset_info.hard_reset_pending = true;
1073
1074 hdev->process_kill_trial_cnt = 0;
1075
1076 hdev->device_reset_work.flags = flags;
1077
1078
1079
1080
1081
1082 queue_delayed_work(hdev->device_reset_work.wq,
1083 &hdev->device_reset_work.reset_work, 0);
1084
1085 return 0;
1086 }
1087
1088 cleanup_resources(hdev, hard_reset, fw_reset, skip_wq_flush);
1089
1090kill_processes:
1091 if (hard_reset) {
1092
1093
1094
1095
1096 rc = device_kill_open_processes(hdev, 0, false);
1097
1098 if (rc == -EBUSY) {
1099 if (hdev->device_fini_pending) {
1100 dev_crit(hdev->dev,
1101 "Failed to kill all open processes, stopping hard reset\n");
1102 goto out_err;
1103 }
1104
1105
1106 return rc;
1107 }
1108
1109 if (rc) {
1110 dev_crit(hdev->dev,
1111 "Failed to kill all open processes, stopping hard reset\n");
1112 goto out_err;
1113 }
1114
1115
1116
1117
1118 flush_workqueue(hdev->eq_wq);
1119 }
1120
1121
1122 hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1123
1124 if (hard_reset) {
1125 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1126
1127
1128 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1129 hdev->kernel_ctx = NULL;
1130
1131 hl_vm_fini(hdev);
1132 hl_mmu_fini(hdev);
1133 hl_eq_reset(hdev, &hdev->event_queue);
1134 }
1135
1136
1137 hl_hw_queue_reset(hdev, hard_reset);
1138 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1139 hl_cq_reset(hdev, &hdev->completion_queue[i]);
1140
1141
1142 ctx = hl_get_compute_ctx(hdev);
1143 if (ctx) {
1144 atomic_set(&ctx->thread_ctx_switch_token, 1);
1145 ctx->thread_ctx_switch_wait_token = 0;
1146 hl_ctx_put(ctx);
1147 }
1148
1149
1150
1151 if (hard_reset) {
1152 hdev->device_cpu_disabled = false;
1153 hdev->reset_info.hard_reset_pending = false;
1154
1155 if (hdev->reset_info.reset_trigger_repeated &&
1156 (hdev->reset_info.prev_reset_trigger ==
1157 HL_DRV_RESET_FW_FATAL_ERR)) {
1158
1159
1160
1161 dev_crit(hdev->dev,
1162 "Consecutive FW fatal errors received, stopping hard reset\n");
1163 rc = -EIO;
1164 goto out_err;
1165 }
1166
1167 if (hdev->kernel_ctx) {
1168 dev_crit(hdev->dev,
1169 "kernel ctx was alive during hard reset, something is terribly wrong\n");
1170 rc = -EBUSY;
1171 goto out_err;
1172 }
1173
1174 rc = hl_mmu_init(hdev);
1175 if (rc) {
1176 dev_err(hdev->dev,
1177 "Failed to initialize MMU S/W after hard reset\n");
1178 goto out_err;
1179 }
1180
1181
1182 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1183 GFP_KERNEL);
1184 if (!hdev->kernel_ctx) {
1185 rc = -ENOMEM;
1186 hl_mmu_fini(hdev);
1187 goto out_err;
1188 }
1189
1190 hdev->is_compute_ctx_active = false;
1191
1192 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1193 if (rc) {
1194 dev_err(hdev->dev,
1195 "failed to init kernel ctx in hard reset\n");
1196 kfree(hdev->kernel_ctx);
1197 hdev->kernel_ctx = NULL;
1198 hl_mmu_fini(hdev);
1199 goto out_err;
1200 }
1201 }
1202
1203
1204
1205
1206
1207 hdev->disabled = false;
1208
1209 rc = hdev->asic_funcs->hw_init(hdev);
1210 if (rc) {
1211 dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1212 goto out_err;
1213 }
1214
1215
1216 if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1217 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1218 dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n",
1219 idle_mask[1], idle_mask[0]);
1220 rc = -EIO;
1221 goto out_err;
1222 }
1223
1224
1225 rc = hdev->asic_funcs->test_queues(hdev);
1226 if (rc) {
1227 dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1228 goto out_err;
1229 }
1230
1231 if (hard_reset) {
1232 rc = device_late_init(hdev);
1233 if (rc) {
1234 dev_err(hdev->dev, "Failed late init after hard reset\n");
1235 goto out_err;
1236 }
1237
1238 rc = hl_vm_init(hdev);
1239 if (rc) {
1240 dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1241 goto out_err;
1242 }
1243
1244 hl_fw_set_max_power(hdev);
1245 } else {
1246 rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
1247 if (rc) {
1248 if (reset_upon_device_release)
1249 dev_err(hdev->dev,
1250 "Failed late init in reset after device release\n");
1251 else
1252 dev_err(hdev->dev, "Failed late init after soft reset\n");
1253 goto out_err;
1254 }
1255 }
1256
1257 spin_lock(&hdev->reset_info.lock);
1258 hdev->reset_info.is_in_soft_reset = false;
1259
1260
1261
1262
1263
1264 if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1265 schedule_hard_reset = true;
1266 else
1267 hdev->reset_info.in_reset = 0;
1268
1269 spin_unlock(&hdev->reset_info.lock);
1270
1271 hdev->reset_info.needs_reset = false;
1272
1273 dev_notice(hdev->dev, "Successfully finished resetting the device\n");
1274
1275 if (hard_reset) {
1276 hdev->reset_info.hard_reset_cnt++;
1277
1278
1279
1280
1281
1282
1283 hdev->asic_funcs->enable_events_from_fw(hdev);
1284 } else if (!reset_upon_device_release) {
1285 hdev->reset_info.soft_reset_cnt++;
1286 }
1287
1288 if (schedule_hard_reset) {
1289 dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n");
1290 flags = hdev->reset_info.hard_reset_schedule_flags;
1291 hdev->reset_info.hard_reset_schedule_flags = 0;
1292 hdev->disabled = true;
1293 hard_reset = true;
1294 handle_reset_trigger(hdev, flags);
1295 goto again;
1296 }
1297
1298 return 0;
1299
1300out_err:
1301 hdev->disabled = true;
1302 hdev->reset_info.is_in_soft_reset = false;
1303
1304 if (hard_reset) {
1305 dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
1306 hdev->reset_info.hard_reset_cnt++;
1307 } else if (reset_upon_device_release) {
1308 dev_err(hdev->dev, "Failed to reset device after user release\n");
1309 flags |= HL_DRV_RESET_HARD;
1310 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1311 hard_reset = true;
1312 goto again;
1313 } else {
1314 dev_err(hdev->dev, "Failed to do soft-reset\n");
1315 hdev->reset_info.soft_reset_cnt++;
1316 flags |= HL_DRV_RESET_HARD;
1317 hard_reset = true;
1318 goto again;
1319 }
1320
1321 hdev->reset_info.in_reset = 0;
1322
1323 return rc;
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335int hl_device_init(struct hl_device *hdev, struct class *hclass)
1336{
1337 int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
1338 char *name;
1339 bool add_cdev_sysfs_on_err = false;
1340
1341 name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
1342 if (!name) {
1343 rc = -ENOMEM;
1344 goto out_disabled;
1345 }
1346
1347
1348 rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1349 &hdev->cdev, &hdev->dev);
1350
1351 kfree(name);
1352
1353 if (rc)
1354 goto out_disabled;
1355
1356 name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
1357 if (!name) {
1358 rc = -ENOMEM;
1359 goto free_dev;
1360 }
1361
1362
1363 rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1364 name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1365
1366 kfree(name);
1367
1368 if (rc)
1369 goto free_dev;
1370
1371
1372 rc = device_early_init(hdev);
1373 if (rc)
1374 goto free_dev_ctrl;
1375
1376 user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
1377
1378 if (user_interrupt_cnt) {
1379 hdev->user_interrupt = kcalloc(user_interrupt_cnt,
1380 sizeof(*hdev->user_interrupt),
1381 GFP_KERNEL);
1382
1383 if (!hdev->user_interrupt) {
1384 rc = -ENOMEM;
1385 goto early_fini;
1386 }
1387 }
1388
1389
1390
1391
1392
1393 rc = hdev->asic_funcs->sw_init(hdev);
1394 if (rc)
1395 goto user_interrupts_fini;
1396
1397
1398
1399 hl_multi_cs_completion_init(hdev);
1400
1401
1402
1403
1404
1405
1406 rc = hl_hw_queues_create(hdev);
1407 if (rc) {
1408 dev_err(hdev->dev, "failed to initialize kernel queues\n");
1409 goto sw_fini;
1410 }
1411
1412 cq_cnt = hdev->asic_prop.completion_queues_count;
1413
1414
1415
1416
1417
1418
1419 if (cq_cnt) {
1420 hdev->completion_queue = kcalloc(cq_cnt,
1421 sizeof(*hdev->completion_queue),
1422 GFP_KERNEL);
1423
1424 if (!hdev->completion_queue) {
1425 dev_err(hdev->dev,
1426 "failed to allocate completion queues\n");
1427 rc = -ENOMEM;
1428 goto hw_queues_destroy;
1429 }
1430 }
1431
1432 for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
1433 rc = hl_cq_init(hdev, &hdev->completion_queue[i],
1434 hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
1435 if (rc) {
1436 dev_err(hdev->dev,
1437 "failed to initialize completion queue\n");
1438 goto cq_fini;
1439 }
1440 hdev->completion_queue[i].cq_idx = i;
1441 }
1442
1443
1444
1445
1446
1447
1448 rc = hl_eq_init(hdev, &hdev->event_queue);
1449 if (rc) {
1450 dev_err(hdev->dev, "failed to initialize event queue\n");
1451 goto cq_fini;
1452 }
1453
1454
1455 rc = hl_mmu_init(hdev);
1456 if (rc) {
1457 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
1458 goto eq_fini;
1459 }
1460
1461
1462 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
1463 if (!hdev->kernel_ctx) {
1464 rc = -ENOMEM;
1465 goto mmu_fini;
1466 }
1467
1468 hdev->is_compute_ctx_active = false;
1469
1470 hdev->asic_funcs->state_dump_init(hdev);
1471
1472 hl_debugfs_add_device(hdev);
1473
1474
1475
1476
1477 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1478 if (rc) {
1479 dev_err(hdev->dev, "failed to initialize kernel context\n");
1480 kfree(hdev->kernel_ctx);
1481 goto remove_device_from_debugfs;
1482 }
1483
1484 rc = hl_cb_pool_init(hdev);
1485 if (rc) {
1486 dev_err(hdev->dev, "failed to initialize CB pool\n");
1487 goto release_ctx;
1488 }
1489
1490
1491
1492
1493
1494
1495 add_cdev_sysfs_on_err = true;
1496
1497
1498
1499
1500
1501 hdev->disabled = false;
1502
1503 rc = hdev->asic_funcs->hw_init(hdev);
1504 if (rc) {
1505 dev_err(hdev->dev, "failed to initialize the H/W\n");
1506 rc = 0;
1507 goto out_disabled;
1508 }
1509
1510
1511 rc = hdev->asic_funcs->test_queues(hdev);
1512 if (rc) {
1513 dev_err(hdev->dev, "Failed to detect if device is alive\n");
1514 rc = 0;
1515 goto out_disabled;
1516 }
1517
1518 rc = device_late_init(hdev);
1519 if (rc) {
1520 dev_err(hdev->dev, "Failed late initialization\n");
1521 rc = 0;
1522 goto out_disabled;
1523 }
1524
1525 dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
1526 hdev->asic_name,
1527 hdev->asic_prop.dram_size / SZ_1G);
1528
1529 rc = hl_vm_init(hdev);
1530 if (rc) {
1531 dev_err(hdev->dev, "Failed to initialize memory module\n");
1532 rc = 0;
1533 goto out_disabled;
1534 }
1535
1536
1537
1538
1539
1540
1541 add_cdev_sysfs_on_err = false;
1542 rc = device_cdev_sysfs_add(hdev);
1543 if (rc) {
1544 dev_err(hdev->dev,
1545 "Failed to add char devices and sysfs nodes\n");
1546 rc = 0;
1547 goto out_disabled;
1548 }
1549
1550
1551
1552
1553 if (hdev->asic_prop.set_max_power_on_device_init)
1554 hl_fw_set_max_power(hdev);
1555
1556
1557
1558
1559
1560
1561
1562 rc = hl_hwmon_init(hdev);
1563 if (rc) {
1564 dev_err(hdev->dev, "Failed to initialize hwmon\n");
1565 rc = 0;
1566 goto out_disabled;
1567 }
1568
1569 dev_notice(hdev->dev,
1570 "Successfully added device to habanalabs driver\n");
1571
1572 hdev->init_done = true;
1573
1574
1575
1576
1577
1578
1579 hdev->asic_funcs->enable_events_from_fw(hdev);
1580
1581 return 0;
1582
1583release_ctx:
1584 if (hl_ctx_put(hdev->kernel_ctx) != 1)
1585 dev_err(hdev->dev,
1586 "kernel ctx is still alive on initialization failure\n");
1587remove_device_from_debugfs:
1588 hl_debugfs_remove_device(hdev);
1589mmu_fini:
1590 hl_mmu_fini(hdev);
1591eq_fini:
1592 hl_eq_fini(hdev, &hdev->event_queue);
1593cq_fini:
1594 for (i = 0 ; i < cq_ready_cnt ; i++)
1595 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1596 kfree(hdev->completion_queue);
1597hw_queues_destroy:
1598 hl_hw_queues_destroy(hdev);
1599sw_fini:
1600 hdev->asic_funcs->sw_fini(hdev);
1601user_interrupts_fini:
1602 kfree(hdev->user_interrupt);
1603early_fini:
1604 device_early_fini(hdev);
1605free_dev_ctrl:
1606 put_device(hdev->dev_ctrl);
1607free_dev:
1608 put_device(hdev->dev);
1609out_disabled:
1610 hdev->disabled = true;
1611 if (add_cdev_sysfs_on_err)
1612 device_cdev_sysfs_add(hdev);
1613 if (hdev->pdev)
1614 dev_err(&hdev->pdev->dev,
1615 "Failed to initialize hl%d. Device is NOT usable !\n",
1616 hdev->id / 2);
1617 else
1618 pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
1619 hdev->id / 2);
1620
1621 return rc;
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631void hl_device_fini(struct hl_device *hdev)
1632{
1633 bool device_in_reset;
1634 ktime_t timeout;
1635 u64 reset_sec;
1636 int i, rc;
1637
1638 dev_info(hdev->dev, "Removing device\n");
1639
1640 hdev->device_fini_pending = 1;
1641 flush_delayed_work(&hdev->device_reset_work.reset_work);
1642
1643 if (hdev->pldm)
1644 reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
1645 else
1646 reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656 timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
1657
1658 spin_lock(&hdev->reset_info.lock);
1659 device_in_reset = !!hdev->reset_info.in_reset;
1660 if (!device_in_reset)
1661 hdev->reset_info.in_reset = 1;
1662 spin_unlock(&hdev->reset_info.lock);
1663
1664 while (device_in_reset) {
1665 usleep_range(50, 200);
1666
1667 spin_lock(&hdev->reset_info.lock);
1668 device_in_reset = !!hdev->reset_info.in_reset;
1669 if (!device_in_reset)
1670 hdev->reset_info.in_reset = 1;
1671 spin_unlock(&hdev->reset_info.lock);
1672
1673 if (ktime_compare(ktime_get(), timeout) > 0) {
1674 dev_crit(hdev->dev,
1675 "Failed to remove device because reset function did not finish\n");
1676 return;
1677 }
1678 }
1679
1680
1681
1682
1683
1684
1685
1686
1687 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
1688
1689
1690 hdev->disabled = true;
1691
1692 take_release_locks(hdev);
1693
1694 hdev->reset_info.hard_reset_pending = true;
1695
1696 hl_hwmon_fini(hdev);
1697
1698 cleanup_resources(hdev, true, false, false);
1699
1700
1701
1702
1703
1704 dev_info(hdev->dev,
1705 "Waiting for all processes to exit (timeout of %u seconds)",
1706 HL_PENDING_RESET_LONG_SEC);
1707
1708 rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
1709 if (rc) {
1710 dev_crit(hdev->dev, "Failed to kill all open processes\n");
1711 device_disable_open_processes(hdev, false);
1712 }
1713
1714 rc = device_kill_open_processes(hdev, 0, true);
1715 if (rc) {
1716 dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
1717 device_disable_open_processes(hdev, true);
1718 }
1719
1720 hl_cb_pool_fini(hdev);
1721
1722
1723 hdev->asic_funcs->hw_fini(hdev, true, false);
1724
1725 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1726
1727
1728 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
1729 dev_err(hdev->dev, "kernel ctx is still alive\n");
1730
1731 hl_debugfs_remove_device(hdev);
1732
1733 hl_vm_fini(hdev);
1734
1735 hl_mmu_fini(hdev);
1736
1737 hl_eq_fini(hdev, &hdev->event_queue);
1738
1739 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1740 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1741 kfree(hdev->completion_queue);
1742 kfree(hdev->user_interrupt);
1743
1744 hl_hw_queues_destroy(hdev);
1745
1746
1747 hdev->asic_funcs->sw_fini(hdev);
1748
1749 device_early_fini(hdev);
1750
1751
1752 device_cdev_sysfs_del(hdev);
1753
1754 pr_info("removed device successfully\n");
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
1771{
1772 return readl(hdev->rmmio + reg);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
1786{
1787 writel(val, hdev->rmmio + reg);
1788}
1789