linux/drivers/misc/habanalabs/common/device.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2016-2019 HabanaLabs, Ltd.
   5 * All Rights Reserved.
   6 */
   7
   8#define pr_fmt(fmt)                     "habanalabs: " fmt
   9
  10#include <uapi/misc/habanalabs.h>
  11#include "habanalabs.h"
  12
  13#include <linux/pci.h>
  14#include <linux/hwmon.h>
  15
  16enum hl_device_status hl_device_status(struct hl_device *hdev)
  17{
  18        enum hl_device_status status;
  19
  20        if (atomic_read(&hdev->in_reset))
  21                status = HL_DEVICE_STATUS_IN_RESET;
  22        else if (hdev->needs_reset)
  23                status = HL_DEVICE_STATUS_NEEDS_RESET;
  24        else if (hdev->disabled)
  25                status = HL_DEVICE_STATUS_MALFUNCTION;
  26        else if (!hdev->init_done)
  27                status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
  28        else
  29                status = HL_DEVICE_STATUS_OPERATIONAL;
  30
  31        return status;
  32}
  33
  34bool hl_device_operational(struct hl_device *hdev,
  35                enum hl_device_status *status)
  36{
  37        enum hl_device_status current_status;
  38
  39        current_status = hl_device_status(hdev);
  40        if (status)
  41                *status = current_status;
  42
  43        switch (current_status) {
  44        case HL_DEVICE_STATUS_IN_RESET:
  45        case HL_DEVICE_STATUS_MALFUNCTION:
  46        case HL_DEVICE_STATUS_NEEDS_RESET:
  47                return false;
  48        case HL_DEVICE_STATUS_OPERATIONAL:
  49        case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
  50        default:
  51                return true;
  52        }
  53}
  54
  55static void hpriv_release(struct kref *ref)
  56{
  57        u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
  58        bool device_is_idle = true;
  59        struct hl_fpriv *hpriv;
  60        struct hl_device *hdev;
  61
  62        hpriv = container_of(ref, struct hl_fpriv, refcount);
  63
  64        hdev = hpriv->hdev;
  65
  66        put_pid(hpriv->taskpid);
  67
  68        hl_debugfs_remove_file(hpriv);
  69
  70        mutex_destroy(&hpriv->restore_phase_mutex);
  71
  72        mutex_lock(&hdev->fpriv_list_lock);
  73        list_del(&hpriv->dev_node);
  74        hdev->compute_ctx = NULL;
  75        mutex_unlock(&hdev->fpriv_list_lock);
  76
  77        kfree(hpriv);
  78
  79        if ((!hdev->pldm) && (hdev->pdev) &&
  80                        (!hdev->asic_funcs->is_device_idle(hdev,
  81                                idle_mask,
  82                                HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) {
  83                dev_err(hdev->dev,
  84                        "device not idle after user context is closed (0x%llx_%llx)\n",
  85                        idle_mask[1], idle_mask[0]);
  86
  87                device_is_idle = false;
  88        }
  89
  90        if ((hdev->reset_if_device_not_idle && !device_is_idle)
  91                        || hdev->reset_upon_device_release)
  92                hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE);
  93}
  94
  95void hl_hpriv_get(struct hl_fpriv *hpriv)
  96{
  97        kref_get(&hpriv->refcount);
  98}
  99
 100int hl_hpriv_put(struct hl_fpriv *hpriv)
 101{
 102        return kref_put(&hpriv->refcount, hpriv_release);
 103}
 104
 105/*
 106 * hl_device_release - release function for habanalabs device
 107 *
 108 * @inode: pointer to inode structure
 109 * @filp: pointer to file structure
 110 *
 111 * Called when process closes an habanalabs device
 112 */
 113static int hl_device_release(struct inode *inode, struct file *filp)
 114{
 115        struct hl_fpriv *hpriv = filp->private_data;
 116        struct hl_device *hdev = hpriv->hdev;
 117
 118        filp->private_data = NULL;
 119
 120        if (!hdev) {
 121                pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
 122                put_pid(hpriv->taskpid);
 123                return 0;
 124        }
 125
 126        /* Each pending user interrupt holds the user's context, hence we
 127         * must release them all before calling hl_ctx_mgr_fini().
 128         */
 129        hl_release_pending_user_interrupts(hpriv->hdev);
 130
 131        hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
 132        hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
 133
 134        if (!hl_hpriv_put(hpriv))
 135                dev_notice(hdev->dev,
 136                        "User process closed FD but device still in use\n");
 137
 138        hdev->last_open_session_duration_jif =
 139                jiffies - hdev->last_successful_open_jif;
 140
 141        return 0;
 142}
 143
 144static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
 145{
 146        struct hl_fpriv *hpriv = filp->private_data;
 147        struct hl_device *hdev = hpriv->hdev;
 148
 149        filp->private_data = NULL;
 150
 151        if (!hdev) {
 152                pr_err("Closing FD after device was removed\n");
 153                goto out;
 154        }
 155
 156        mutex_lock(&hdev->fpriv_list_lock);
 157        list_del(&hpriv->dev_node);
 158        mutex_unlock(&hdev->fpriv_list_lock);
 159out:
 160        put_pid(hpriv->taskpid);
 161
 162        kfree(hpriv);
 163
 164        return 0;
 165}
 166
 167/*
 168 * hl_mmap - mmap function for habanalabs device
 169 *
 170 * @*filp: pointer to file structure
 171 * @*vma: pointer to vm_area_struct of the process
 172 *
 173 * Called when process does an mmap on habanalabs device. Call the device's mmap
 174 * function at the end of the common code.
 175 */
 176static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
 177{
 178        struct hl_fpriv *hpriv = filp->private_data;
 179        struct hl_device *hdev = hpriv->hdev;
 180        unsigned long vm_pgoff;
 181
 182        if (!hdev) {
 183                pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
 184                return -ENODEV;
 185        }
 186
 187        vm_pgoff = vma->vm_pgoff;
 188        vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
 189
 190        switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
 191        case HL_MMAP_TYPE_CB:
 192                return hl_cb_mmap(hpriv, vma);
 193
 194        case HL_MMAP_TYPE_BLOCK:
 195                return hl_hw_block_mmap(hpriv, vma);
 196        }
 197
 198        return -EINVAL;
 199}
 200
 201static const struct file_operations hl_ops = {
 202        .owner = THIS_MODULE,
 203        .open = hl_device_open,
 204        .release = hl_device_release,
 205        .mmap = hl_mmap,
 206        .unlocked_ioctl = hl_ioctl,
 207        .compat_ioctl = hl_ioctl
 208};
 209
 210static const struct file_operations hl_ctrl_ops = {
 211        .owner = THIS_MODULE,
 212        .open = hl_device_open_ctrl,
 213        .release = hl_device_release_ctrl,
 214        .unlocked_ioctl = hl_ioctl_control,
 215        .compat_ioctl = hl_ioctl_control
 216};
 217
 218static void device_release_func(struct device *dev)
 219{
 220        kfree(dev);
 221}
 222
 223/*
 224 * device_init_cdev - Initialize cdev and device for habanalabs device
 225 *
 226 * @hdev: pointer to habanalabs device structure
 227 * @hclass: pointer to the class object of the device
 228 * @minor: minor number of the specific device
 229 * @fpos: file operations to install for this device
 230 * @name: name of the device as it will appear in the filesystem
 231 * @cdev: pointer to the char device object that will be initialized
 232 * @dev: pointer to the device object that will be initialized
 233 *
 234 * Initialize a cdev and a Linux device for habanalabs's device.
 235 */
 236static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
 237                                int minor, const struct file_operations *fops,
 238                                char *name, struct cdev *cdev,
 239                                struct device **dev)
 240{
 241        cdev_init(cdev, fops);
 242        cdev->owner = THIS_MODULE;
 243
 244        *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
 245        if (!*dev)
 246                return -ENOMEM;
 247
 248        device_initialize(*dev);
 249        (*dev)->devt = MKDEV(hdev->major, minor);
 250        (*dev)->class = hclass;
 251        (*dev)->release = device_release_func;
 252        dev_set_drvdata(*dev, hdev);
 253        dev_set_name(*dev, "%s", name);
 254
 255        return 0;
 256}
 257
 258static int device_cdev_sysfs_add(struct hl_device *hdev)
 259{
 260        int rc;
 261
 262        rc = cdev_device_add(&hdev->cdev, hdev->dev);
 263        if (rc) {
 264                dev_err(hdev->dev,
 265                        "failed to add a char device to the system\n");
 266                return rc;
 267        }
 268
 269        rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
 270        if (rc) {
 271                dev_err(hdev->dev,
 272                        "failed to add a control char device to the system\n");
 273                goto delete_cdev_device;
 274        }
 275
 276        /* hl_sysfs_init() must be done after adding the device to the system */
 277        rc = hl_sysfs_init(hdev);
 278        if (rc) {
 279                dev_err(hdev->dev, "failed to initialize sysfs\n");
 280                goto delete_ctrl_cdev_device;
 281        }
 282
 283        hdev->cdev_sysfs_created = true;
 284
 285        return 0;
 286
 287delete_ctrl_cdev_device:
 288        cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
 289delete_cdev_device:
 290        cdev_device_del(&hdev->cdev, hdev->dev);
 291        return rc;
 292}
 293
 294static void device_cdev_sysfs_del(struct hl_device *hdev)
 295{
 296        if (!hdev->cdev_sysfs_created)
 297                goto put_devices;
 298
 299        hl_sysfs_fini(hdev);
 300        cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
 301        cdev_device_del(&hdev->cdev, hdev->dev);
 302
 303put_devices:
 304        put_device(hdev->dev);
 305        put_device(hdev->dev_ctrl);
 306}
 307
 308static void device_hard_reset_pending(struct work_struct *work)
 309{
 310        struct hl_device_reset_work *device_reset_work =
 311                container_of(work, struct hl_device_reset_work,
 312                                reset_work.work);
 313        struct hl_device *hdev = device_reset_work->hdev;
 314        u32 flags;
 315        int rc;
 316
 317        flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD;
 318
 319        if (device_reset_work->fw_reset)
 320                flags |= HL_RESET_FW;
 321
 322        rc = hl_device_reset(hdev, flags);
 323        if ((rc == -EBUSY) && !hdev->device_fini_pending) {
 324                dev_info(hdev->dev,
 325                        "Could not reset device. will try again in %u seconds",
 326                        HL_PENDING_RESET_PER_SEC);
 327
 328                queue_delayed_work(device_reset_work->wq,
 329                        &device_reset_work->reset_work,
 330                        msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
 331        }
 332}
 333
 334/*
 335 * device_early_init - do some early initialization for the habanalabs device
 336 *
 337 * @hdev: pointer to habanalabs device structure
 338 *
 339 * Install the relevant function pointers and call the early_init function,
 340 * if such a function exists
 341 */
 342static int device_early_init(struct hl_device *hdev)
 343{
 344        int i, rc;
 345        char workq_name[32];
 346
 347        switch (hdev->asic_type) {
 348        case ASIC_GOYA:
 349                goya_set_asic_funcs(hdev);
 350                strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
 351                break;
 352        case ASIC_GAUDI:
 353                gaudi_set_asic_funcs(hdev);
 354                strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
 355                break;
 356        case ASIC_GAUDI_SEC:
 357                gaudi_set_asic_funcs(hdev);
 358                strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
 359                break;
 360        default:
 361                dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
 362                        hdev->asic_type);
 363                return -EINVAL;
 364        }
 365
 366        rc = hdev->asic_funcs->early_init(hdev);
 367        if (rc)
 368                return rc;
 369
 370        rc = hl_asid_init(hdev);
 371        if (rc)
 372                goto early_fini;
 373
 374        if (hdev->asic_prop.completion_queues_count) {
 375                hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
 376                                sizeof(*hdev->cq_wq),
 377                                GFP_KERNEL);
 378                if (!hdev->cq_wq) {
 379                        rc = -ENOMEM;
 380                        goto asid_fini;
 381                }
 382        }
 383
 384        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
 385                snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
 386                hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
 387                if (hdev->cq_wq[i] == NULL) {
 388                        dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
 389                        rc = -ENOMEM;
 390                        goto free_cq_wq;
 391                }
 392        }
 393
 394        hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
 395        if (hdev->eq_wq == NULL) {
 396                dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
 397                rc = -ENOMEM;
 398                goto free_cq_wq;
 399        }
 400
 401        hdev->sob_reset_wq = alloc_workqueue("hl-sob-reset", WQ_UNBOUND, 0);
 402        if (!hdev->sob_reset_wq) {
 403                dev_err(hdev->dev,
 404                        "Failed to allocate SOB reset workqueue\n");
 405                rc = -ENOMEM;
 406                goto free_eq_wq;
 407        }
 408
 409        hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
 410                                        GFP_KERNEL);
 411        if (!hdev->hl_chip_info) {
 412                rc = -ENOMEM;
 413                goto free_sob_reset_wq;
 414        }
 415
 416        rc = hl_mmu_if_set_funcs(hdev);
 417        if (rc)
 418                goto free_chip_info;
 419
 420        hl_cb_mgr_init(&hdev->kernel_cb_mgr);
 421
 422        hdev->device_reset_work.wq =
 423                        create_singlethread_workqueue("hl_device_reset");
 424        if (!hdev->device_reset_work.wq) {
 425                rc = -ENOMEM;
 426                dev_err(hdev->dev, "Failed to create device reset WQ\n");
 427                goto free_cb_mgr;
 428        }
 429
 430        INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
 431                        device_hard_reset_pending);
 432        hdev->device_reset_work.hdev = hdev;
 433        hdev->device_fini_pending = 0;
 434
 435        mutex_init(&hdev->send_cpu_message_lock);
 436        mutex_init(&hdev->debug_lock);
 437        INIT_LIST_HEAD(&hdev->cs_mirror_list);
 438        spin_lock_init(&hdev->cs_mirror_lock);
 439        INIT_LIST_HEAD(&hdev->fpriv_list);
 440        mutex_init(&hdev->fpriv_list_lock);
 441        atomic_set(&hdev->in_reset, 0);
 442
 443        return 0;
 444
 445free_cb_mgr:
 446        hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
 447free_chip_info:
 448        kfree(hdev->hl_chip_info);
 449free_sob_reset_wq:
 450        destroy_workqueue(hdev->sob_reset_wq);
 451free_eq_wq:
 452        destroy_workqueue(hdev->eq_wq);
 453free_cq_wq:
 454        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
 455                if (hdev->cq_wq[i])
 456                        destroy_workqueue(hdev->cq_wq[i]);
 457        kfree(hdev->cq_wq);
 458asid_fini:
 459        hl_asid_fini(hdev);
 460early_fini:
 461        if (hdev->asic_funcs->early_fini)
 462                hdev->asic_funcs->early_fini(hdev);
 463
 464        return rc;
 465}
 466
 467/*
 468 * device_early_fini - finalize all that was done in device_early_init
 469 *
 470 * @hdev: pointer to habanalabs device structure
 471 *
 472 */
 473static void device_early_fini(struct hl_device *hdev)
 474{
 475        int i;
 476
 477        mutex_destroy(&hdev->debug_lock);
 478        mutex_destroy(&hdev->send_cpu_message_lock);
 479
 480        mutex_destroy(&hdev->fpriv_list_lock);
 481
 482        hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
 483
 484        kfree(hdev->hl_chip_info);
 485
 486        destroy_workqueue(hdev->sob_reset_wq);
 487        destroy_workqueue(hdev->eq_wq);
 488        destroy_workqueue(hdev->device_reset_work.wq);
 489
 490        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
 491                destroy_workqueue(hdev->cq_wq[i]);
 492        kfree(hdev->cq_wq);
 493
 494        hl_asid_fini(hdev);
 495
 496        if (hdev->asic_funcs->early_fini)
 497                hdev->asic_funcs->early_fini(hdev);
 498}
 499
 500static void set_freq_to_low_job(struct work_struct *work)
 501{
 502        struct hl_device *hdev = container_of(work, struct hl_device,
 503                                                work_freq.work);
 504
 505        mutex_lock(&hdev->fpriv_list_lock);
 506
 507        if (!hdev->compute_ctx)
 508                hl_device_set_frequency(hdev, PLL_LOW);
 509
 510        mutex_unlock(&hdev->fpriv_list_lock);
 511
 512        schedule_delayed_work(&hdev->work_freq,
 513                        usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
 514}
 515
 516static void hl_device_heartbeat(struct work_struct *work)
 517{
 518        struct hl_device *hdev = container_of(work, struct hl_device,
 519                                                work_heartbeat.work);
 520
 521        if (!hl_device_operational(hdev, NULL))
 522                goto reschedule;
 523
 524        if (!hdev->asic_funcs->send_heartbeat(hdev))
 525                goto reschedule;
 526
 527        dev_err(hdev->dev, "Device heartbeat failed!\n");
 528        hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_HEARTBEAT);
 529
 530        return;
 531
 532reschedule:
 533        schedule_delayed_work(&hdev->work_heartbeat,
 534                        usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
 535}
 536
 537/*
 538 * device_late_init - do late stuff initialization for the habanalabs device
 539 *
 540 * @hdev: pointer to habanalabs device structure
 541 *
 542 * Do stuff that either needs the device H/W queues to be active or needs
 543 * to happen after all the rest of the initialization is finished
 544 */
 545static int device_late_init(struct hl_device *hdev)
 546{
 547        int rc;
 548
 549        if (hdev->asic_funcs->late_init) {
 550                rc = hdev->asic_funcs->late_init(hdev);
 551                if (rc) {
 552                        dev_err(hdev->dev,
 553                                "failed late initialization for the H/W\n");
 554                        return rc;
 555                }
 556        }
 557
 558        hdev->high_pll = hdev->asic_prop.high_pll;
 559
 560        /* force setting to low frequency */
 561        hdev->curr_pll_profile = PLL_LOW;
 562
 563        if (hdev->pm_mng_profile == PM_AUTO)
 564                hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
 565        else
 566                hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
 567
 568        INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
 569        schedule_delayed_work(&hdev->work_freq,
 570        usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
 571
 572        if (hdev->heartbeat) {
 573                INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
 574                schedule_delayed_work(&hdev->work_heartbeat,
 575                                usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
 576        }
 577
 578        hdev->late_init_done = true;
 579
 580        return 0;
 581}
 582
 583/*
 584 * device_late_fini - finalize all that was done in device_late_init
 585 *
 586 * @hdev: pointer to habanalabs device structure
 587 *
 588 */
 589static void device_late_fini(struct hl_device *hdev)
 590{
 591        if (!hdev->late_init_done)
 592                return;
 593
 594        cancel_delayed_work_sync(&hdev->work_freq);
 595        if (hdev->heartbeat)
 596                cancel_delayed_work_sync(&hdev->work_heartbeat);
 597
 598        if (hdev->asic_funcs->late_fini)
 599                hdev->asic_funcs->late_fini(hdev);
 600
 601        hdev->late_init_done = false;
 602}
 603
 604int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
 605{
 606        u64 max_power, curr_power, dc_power, dividend;
 607        int rc;
 608
 609        max_power = hdev->asic_prop.max_power_default;
 610        dc_power = hdev->asic_prop.dc_power_default;
 611        rc = hl_fw_cpucp_power_get(hdev, &curr_power);
 612
 613        if (rc)
 614                return rc;
 615
 616        curr_power = clamp(curr_power, dc_power, max_power);
 617
 618        dividend = (curr_power - dc_power) * 100;
 619        *utilization = (u32) div_u64(dividend, (max_power - dc_power));
 620
 621        return 0;
 622}
 623
 624/*
 625 * hl_device_set_frequency - set the frequency of the device
 626 *
 627 * @hdev: pointer to habanalabs device structure
 628 * @freq: the new frequency value
 629 *
 630 * Change the frequency if needed. This function has no protection against
 631 * concurrency, therefore it is assumed that the calling function has protected
 632 * itself against the case of calling this function from multiple threads with
 633 * different values
 634 *
 635 * Returns 0 if no change was done, otherwise returns 1
 636 */
 637int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
 638{
 639        if ((hdev->pm_mng_profile == PM_MANUAL) ||
 640                        (hdev->curr_pll_profile == freq))
 641                return 0;
 642
 643        dev_dbg(hdev->dev, "Changing device frequency to %s\n",
 644                freq == PLL_HIGH ? "high" : "low");
 645
 646        hdev->asic_funcs->set_pll_profile(hdev, freq);
 647
 648        hdev->curr_pll_profile = freq;
 649
 650        return 1;
 651}
 652
 653int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
 654{
 655        int rc = 0;
 656
 657        mutex_lock(&hdev->debug_lock);
 658
 659        if (!enable) {
 660                if (!hdev->in_debug) {
 661                        dev_err(hdev->dev,
 662                                "Failed to disable debug mode because device was not in debug mode\n");
 663                        rc = -EFAULT;
 664                        goto out;
 665                }
 666
 667                if (!hdev->hard_reset_pending)
 668                        hdev->asic_funcs->halt_coresight(hdev);
 669
 670                hdev->in_debug = 0;
 671
 672                if (!hdev->hard_reset_pending)
 673                        hdev->asic_funcs->set_clock_gating(hdev);
 674
 675                goto out;
 676        }
 677
 678        if (hdev->in_debug) {
 679                dev_err(hdev->dev,
 680                        "Failed to enable debug mode because device is already in debug mode\n");
 681                rc = -EFAULT;
 682                goto out;
 683        }
 684
 685        hdev->asic_funcs->disable_clock_gating(hdev);
 686        hdev->in_debug = 1;
 687
 688out:
 689        mutex_unlock(&hdev->debug_lock);
 690
 691        return rc;
 692}
 693
 694static void take_release_locks(struct hl_device *hdev)
 695{
 696        /* Flush anyone that is inside the critical section of enqueue
 697         * jobs to the H/W
 698         */
 699        hdev->asic_funcs->hw_queues_lock(hdev);
 700        hdev->asic_funcs->hw_queues_unlock(hdev);
 701
 702        /* Flush processes that are sending message to CPU */
 703        mutex_lock(&hdev->send_cpu_message_lock);
 704        mutex_unlock(&hdev->send_cpu_message_lock);
 705
 706        /* Flush anyone that is inside device open */
 707        mutex_lock(&hdev->fpriv_list_lock);
 708        mutex_unlock(&hdev->fpriv_list_lock);
 709}
 710
 711static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
 712{
 713        if (hard_reset)
 714                device_late_fini(hdev);
 715
 716        /*
 717         * Halt the engines and disable interrupts so we won't get any more
 718         * completions from H/W and we won't have any accesses from the
 719         * H/W to the host machine
 720         */
 721        hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
 722
 723        /* Go over all the queues, release all CS and their jobs */
 724        hl_cs_rollback_all(hdev);
 725
 726        /* Release all pending user interrupts, each pending user interrupt
 727         * holds a reference to user context
 728         */
 729        hl_release_pending_user_interrupts(hdev);
 730}
 731
 732/*
 733 * hl_device_suspend - initiate device suspend
 734 *
 735 * @hdev: pointer to habanalabs device structure
 736 *
 737 * Puts the hw in the suspend state (all asics).
 738 * Returns 0 for success or an error on failure.
 739 * Called at driver suspend.
 740 */
 741int hl_device_suspend(struct hl_device *hdev)
 742{
 743        int rc;
 744
 745        pci_save_state(hdev->pdev);
 746
 747        /* Block future CS/VM/JOB completion operations */
 748        rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
 749        if (rc) {
 750                dev_err(hdev->dev, "Can't suspend while in reset\n");
 751                return -EIO;
 752        }
 753
 754        /* This blocks all other stuff that is not blocked by in_reset */
 755        hdev->disabled = true;
 756
 757        take_release_locks(hdev);
 758
 759        rc = hdev->asic_funcs->suspend(hdev);
 760        if (rc)
 761                dev_err(hdev->dev,
 762                        "Failed to disable PCI access of device CPU\n");
 763
 764        /* Shut down the device */
 765        pci_disable_device(hdev->pdev);
 766        pci_set_power_state(hdev->pdev, PCI_D3hot);
 767
 768        return 0;
 769}
 770
 771/*
 772 * hl_device_resume - initiate device resume
 773 *
 774 * @hdev: pointer to habanalabs device structure
 775 *
 776 * Bring the hw back to operating state (all asics).
 777 * Returns 0 for success or an error on failure.
 778 * Called at driver resume.
 779 */
 780int hl_device_resume(struct hl_device *hdev)
 781{
 782        int rc;
 783
 784        pci_set_power_state(hdev->pdev, PCI_D0);
 785        pci_restore_state(hdev->pdev);
 786        rc = pci_enable_device_mem(hdev->pdev);
 787        if (rc) {
 788                dev_err(hdev->dev,
 789                        "Failed to enable PCI device in resume\n");
 790                return rc;
 791        }
 792
 793        pci_set_master(hdev->pdev);
 794
 795        rc = hdev->asic_funcs->resume(hdev);
 796        if (rc) {
 797                dev_err(hdev->dev, "Failed to resume device after suspend\n");
 798                goto disable_device;
 799        }
 800
 801
 802        hdev->disabled = false;
 803        atomic_set(&hdev->in_reset, 0);
 804
 805        rc = hl_device_reset(hdev, HL_RESET_HARD);
 806        if (rc) {
 807                dev_err(hdev->dev, "Failed to reset device during resume\n");
 808                goto disable_device;
 809        }
 810
 811        return 0;
 812
 813disable_device:
 814        pci_clear_master(hdev->pdev);
 815        pci_disable_device(hdev->pdev);
 816
 817        return rc;
 818}
 819
 820static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
 821{
 822        struct hl_fpriv *hpriv;
 823        struct task_struct *task = NULL;
 824        u32 pending_cnt;
 825
 826
 827        /* Giving time for user to close FD, and for processes that are inside
 828         * hl_device_open to finish
 829         */
 830        if (!list_empty(&hdev->fpriv_list))
 831                ssleep(1);
 832
 833        if (timeout) {
 834                pending_cnt = timeout;
 835        } else {
 836                if (hdev->process_kill_trial_cnt) {
 837                        /* Processes have been already killed */
 838                        pending_cnt = 1;
 839                        goto wait_for_processes;
 840                } else {
 841                        /* Wait a small period after process kill */
 842                        pending_cnt = HL_PENDING_RESET_PER_SEC;
 843                }
 844        }
 845
 846        mutex_lock(&hdev->fpriv_list_lock);
 847
 848        /* This section must be protected because we are dereferencing
 849         * pointers that are freed if the process exits
 850         */
 851        list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
 852                task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
 853                if (task) {
 854                        dev_info(hdev->dev, "Killing user process pid=%d\n",
 855                                task_pid_nr(task));
 856                        send_sig(SIGKILL, task, 1);
 857                        usleep_range(1000, 10000);
 858
 859                        put_task_struct(task);
 860                } else {
 861                        dev_warn(hdev->dev,
 862                                "Can't get task struct for PID so giving up on killing process\n");
 863                        mutex_unlock(&hdev->fpriv_list_lock);
 864                        return -ETIME;
 865                }
 866        }
 867
 868        mutex_unlock(&hdev->fpriv_list_lock);
 869
 870        /*
 871         * We killed the open users, but that doesn't mean they are closed.
 872         * It could be that they are running a long cleanup phase in the driver
 873         * e.g. MMU unmappings, or running other long teardown flow even before
 874         * our cleanup.
 875         * Therefore we need to wait again to make sure they are closed before
 876         * continuing with the reset.
 877         */
 878
 879wait_for_processes:
 880        while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
 881                dev_dbg(hdev->dev,
 882                        "Waiting for all unmap operations to finish before hard reset\n");
 883
 884                pending_cnt--;
 885
 886                ssleep(1);
 887        }
 888
 889        /* All processes exited successfully */
 890        if (list_empty(&hdev->fpriv_list))
 891                return 0;
 892
 893        /* Give up waiting for processes to exit */
 894        if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
 895                return -ETIME;
 896
 897        hdev->process_kill_trial_cnt++;
 898
 899        return -EBUSY;
 900}
 901
 902static void device_disable_open_processes(struct hl_device *hdev)
 903{
 904        struct hl_fpriv *hpriv;
 905
 906        mutex_lock(&hdev->fpriv_list_lock);
 907        list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
 908                hpriv->hdev = NULL;
 909        mutex_unlock(&hdev->fpriv_list_lock);
 910}
 911
 912/*
 913 * hl_device_reset - reset the device
 914 *
 915 * @hdev: pointer to habanalabs device structure
 916 * @flags: reset flags.
 917 *
 918 * Block future CS and wait for pending CS to be enqueued
 919 * Call ASIC H/W fini
 920 * Flush all completions
 921 * Re-initialize all internal data structures
 922 * Call ASIC H/W init, late_init
 923 * Test queues
 924 * Enable device
 925 *
 926 * Returns 0 for success or an error on failure.
 927 */
 928int hl_device_reset(struct hl_device *hdev, u32 flags)
 929{
 930        u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
 931        bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false;
 932        int i, rc;
 933
 934        if (!hdev->init_done) {
 935                dev_err(hdev->dev,
 936                        "Can't reset before initialization is done\n");
 937                return 0;
 938        }
 939
 940        hard_reset = !!(flags & HL_RESET_HARD);
 941        from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD);
 942        fw_reset = !!(flags & HL_RESET_FW);
 943
 944        if (!hard_reset && !hdev->supports_soft_reset) {
 945                hard_instead_soft = true;
 946                hard_reset = true;
 947        }
 948
 949        if (hdev->reset_upon_device_release &&
 950                        (flags & HL_RESET_DEVICE_RELEASE)) {
 951                dev_dbg(hdev->dev,
 952                        "Perform %s-reset upon device release\n",
 953                        hard_reset ? "hard" : "soft");
 954                goto do_reset;
 955        }
 956
 957        if (!hard_reset && !hdev->allow_external_soft_reset) {
 958                hard_instead_soft = true;
 959                hard_reset = true;
 960        }
 961
 962        if (hard_instead_soft)
 963                dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
 964
 965do_reset:
 966        /* Re-entry of reset thread */
 967        if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
 968                goto kill_processes;
 969
 970        /*
 971         * Prevent concurrency in this function - only one reset should be
 972         * done at any given time. Only need to perform this if we didn't
 973         * get from the dedicated hard reset thread
 974         */
 975        if (!from_hard_reset_thread) {
 976                /* Block future CS/VM/JOB completion operations */
 977                rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
 978                if (rc)
 979                        return 0;
 980
 981                /*
 982                 * 'reset cause' is being updated here, because getting here
 983                 * means that it's the 1st time and the last time we're here
 984                 * ('in_reset' makes sure of it). This makes sure that
 985                 * 'reset_cause' will continue holding its 1st recorded reason!
 986                 */
 987                if (flags & HL_RESET_HEARTBEAT)
 988                        hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
 989                else if (flags & HL_RESET_TDR)
 990                        hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
 991                else
 992                        hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
 993
 994                /* If reset is due to heartbeat, device CPU is no responsive in
 995                 * which case no point sending PCI disable message to it.
 996                 *
 997                 * If F/W is performing the reset, no need to send it a message to disable
 998                 * PCI access
 999                 */
1000                if (hard_reset && !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
1001                        /* Disable PCI access from device F/W so he won't send
1002                         * us additional interrupts. We disable MSI/MSI-X at
1003                         * the halt_engines function and we can't have the F/W
1004                         * sending us interrupts after that. We need to disable
1005                         * the access here because if the device is marked
1006                         * disable, the message won't be send. Also, in case
1007                         * of heartbeat, the device CPU is marked as disable
1008                         * so this message won't be sent
1009                         */
1010                        if (hl_fw_send_pci_access_msg(hdev,
1011                                        CPUCP_PACKET_DISABLE_PCI_ACCESS))
1012                                dev_warn(hdev->dev,
1013                                        "Failed to disable PCI access by F/W\n");
1014                }
1015
1016                /* This also blocks future CS/VM/JOB completion operations */
1017                hdev->disabled = true;
1018
1019                take_release_locks(hdev);
1020
1021                dev_err(hdev->dev, "Going to RESET device!\n");
1022        }
1023
1024again:
1025        if ((hard_reset) && (!from_hard_reset_thread)) {
1026                hdev->hard_reset_pending = true;
1027
1028                hdev->process_kill_trial_cnt = 0;
1029
1030                hdev->device_reset_work.fw_reset = fw_reset;
1031
1032                /*
1033                 * Because the reset function can't run from heartbeat work,
1034                 * we need to call the reset function from a dedicated work.
1035                 */
1036                queue_delayed_work(hdev->device_reset_work.wq,
1037                        &hdev->device_reset_work.reset_work, 0);
1038
1039                return 0;
1040        }
1041
1042        cleanup_resources(hdev, hard_reset, fw_reset);
1043
1044kill_processes:
1045        if (hard_reset) {
1046                /* Kill processes here after CS rollback. This is because the
1047                 * process can't really exit until all its CSs are done, which
1048                 * is what we do in cs rollback
1049                 */
1050                rc = device_kill_open_processes(hdev, 0);
1051
1052                if (rc == -EBUSY) {
1053                        if (hdev->device_fini_pending) {
1054                                dev_crit(hdev->dev,
1055                                        "Failed to kill all open processes, stopping hard reset\n");
1056                                goto out_err;
1057                        }
1058
1059                        /* signal reset thread to reschedule */
1060                        return rc;
1061                }
1062
1063                if (rc) {
1064                        dev_crit(hdev->dev,
1065                                "Failed to kill all open processes, stopping hard reset\n");
1066                        goto out_err;
1067                }
1068
1069                /* Flush the Event queue workers to make sure no other thread is
1070                 * reading or writing to registers during the reset
1071                 */
1072                flush_workqueue(hdev->eq_wq);
1073        }
1074
1075        /* Reset the H/W. It will be in idle state after this returns */
1076        hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1077
1078        if (hard_reset) {
1079                hdev->fw_loader.linux_loaded = false;
1080
1081                /* Release kernel context */
1082                if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1083                        hdev->kernel_ctx = NULL;
1084
1085                hl_vm_fini(hdev);
1086                hl_mmu_fini(hdev);
1087                hl_eq_reset(hdev, &hdev->event_queue);
1088        }
1089
1090        /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1091        hl_hw_queue_reset(hdev, hard_reset);
1092        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1093                hl_cq_reset(hdev, &hdev->completion_queue[i]);
1094
1095        mutex_lock(&hdev->fpriv_list_lock);
1096
1097        /* Make sure the context switch phase will run again */
1098        if (hdev->compute_ctx) {
1099                atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
1100                hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
1101        }
1102
1103        mutex_unlock(&hdev->fpriv_list_lock);
1104
1105        /* Finished tear-down, starting to re-initialize */
1106
1107        if (hard_reset) {
1108                hdev->device_cpu_disabled = false;
1109                hdev->hard_reset_pending = false;
1110
1111                if (hdev->kernel_ctx) {
1112                        dev_crit(hdev->dev,
1113                                "kernel ctx was alive during hard reset, something is terribly wrong\n");
1114                        rc = -EBUSY;
1115                        goto out_err;
1116                }
1117
1118                rc = hl_mmu_init(hdev);
1119                if (rc) {
1120                        dev_err(hdev->dev,
1121                                "Failed to initialize MMU S/W after hard reset\n");
1122                        goto out_err;
1123                }
1124
1125                /* Allocate the kernel context */
1126                hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1127                                                GFP_KERNEL);
1128                if (!hdev->kernel_ctx) {
1129                        rc = -ENOMEM;
1130                        hl_mmu_fini(hdev);
1131                        goto out_err;
1132                }
1133
1134                hdev->compute_ctx = NULL;
1135
1136                rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1137                if (rc) {
1138                        dev_err(hdev->dev,
1139                                "failed to init kernel ctx in hard reset\n");
1140                        kfree(hdev->kernel_ctx);
1141                        hdev->kernel_ctx = NULL;
1142                        hl_mmu_fini(hdev);
1143                        goto out_err;
1144                }
1145        }
1146
1147        /* Device is now enabled as part of the initialization requires
1148         * communication with the device firmware to get information that
1149         * is required for the initialization itself
1150         */
1151        hdev->disabled = false;
1152
1153        rc = hdev->asic_funcs->hw_init(hdev);
1154        if (rc) {
1155                dev_err(hdev->dev,
1156                        "failed to initialize the H/W after reset\n");
1157                goto out_err;
1158        }
1159
1160        /* If device is not idle fail the reset process */
1161        if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1162                        HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1163                dev_err(hdev->dev,
1164                        "device is not idle (mask 0x%llx_%llx) after reset\n",
1165                        idle_mask[1], idle_mask[0]);
1166                rc = -EIO;
1167                goto out_err;
1168        }
1169
1170        /* Check that the communication with the device is working */
1171        rc = hdev->asic_funcs->test_queues(hdev);
1172        if (rc) {
1173                dev_err(hdev->dev,
1174                        "Failed to detect if device is alive after reset\n");
1175                goto out_err;
1176        }
1177
1178        if (hard_reset) {
1179                rc = device_late_init(hdev);
1180                if (rc) {
1181                        dev_err(hdev->dev,
1182                                "Failed late init after hard reset\n");
1183                        goto out_err;
1184                }
1185
1186                rc = hl_vm_init(hdev);
1187                if (rc) {
1188                        dev_err(hdev->dev,
1189                                "Failed to init memory module after hard reset\n");
1190                        goto out_err;
1191                }
1192
1193                hl_set_max_power(hdev);
1194        } else {
1195                rc = hdev->asic_funcs->soft_reset_late_init(hdev);
1196                if (rc) {
1197                        dev_err(hdev->dev,
1198                                "Failed late init after soft reset\n");
1199                        goto out_err;
1200                }
1201        }
1202
1203        atomic_set(&hdev->in_reset, 0);
1204        hdev->needs_reset = false;
1205
1206        dev_notice(hdev->dev, "Successfully finished resetting the device\n");
1207
1208        if (hard_reset) {
1209                hdev->hard_reset_cnt++;
1210
1211                /* After reset is done, we are ready to receive events from
1212                 * the F/W. We can't do it before because we will ignore events
1213                 * and if those events are fatal, we won't know about it and
1214                 * the device will be operational although it shouldn't be
1215                 */
1216                hdev->asic_funcs->enable_events_from_fw(hdev);
1217        } else {
1218                hdev->soft_reset_cnt++;
1219        }
1220
1221        return 0;
1222
1223out_err:
1224        hdev->disabled = true;
1225
1226        if (hard_reset) {
1227                dev_err(hdev->dev,
1228                        "Failed to reset! Device is NOT usable\n");
1229                hdev->hard_reset_cnt++;
1230        } else {
1231                dev_err(hdev->dev,
1232                        "Failed to do soft-reset, trying hard reset\n");
1233                hdev->soft_reset_cnt++;
1234                hard_reset = true;
1235                goto again;
1236        }
1237
1238        atomic_set(&hdev->in_reset, 0);
1239
1240        return rc;
1241}
1242
1243/*
1244 * hl_device_init - main initialization function for habanalabs device
1245 *
1246 * @hdev: pointer to habanalabs device structure
1247 *
1248 * Allocate an id for the device, do early initialization and then call the
1249 * ASIC specific initialization functions. Finally, create the cdev and the
1250 * Linux device to expose it to the user
1251 */
1252int hl_device_init(struct hl_device *hdev, struct class *hclass)
1253{
1254        int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
1255        char *name;
1256        bool add_cdev_sysfs_on_err = false;
1257
1258        name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
1259        if (!name) {
1260                rc = -ENOMEM;
1261                goto out_disabled;
1262        }
1263
1264        /* Initialize cdev and device structures */
1265        rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1266                                &hdev->cdev, &hdev->dev);
1267
1268        kfree(name);
1269
1270        if (rc)
1271                goto out_disabled;
1272
1273        name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
1274        if (!name) {
1275                rc = -ENOMEM;
1276                goto free_dev;
1277        }
1278
1279        /* Initialize cdev and device structures for control device */
1280        rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1281                                name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1282
1283        kfree(name);
1284
1285        if (rc)
1286                goto free_dev;
1287
1288        /* Initialize ASIC function pointers and perform early init */
1289        rc = device_early_init(hdev);
1290        if (rc)
1291                goto free_dev_ctrl;
1292
1293        user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
1294
1295        if (user_interrupt_cnt) {
1296                hdev->user_interrupt = kcalloc(user_interrupt_cnt,
1297                                sizeof(*hdev->user_interrupt),
1298                                GFP_KERNEL);
1299
1300                if (!hdev->user_interrupt) {
1301                        rc = -ENOMEM;
1302                        goto early_fini;
1303                }
1304        }
1305
1306        /*
1307         * Start calling ASIC initialization. First S/W then H/W and finally
1308         * late init
1309         */
1310        rc = hdev->asic_funcs->sw_init(hdev);
1311        if (rc)
1312                goto user_interrupts_fini;
1313
1314
1315        /* initialize completion structure for multi CS wait */
1316        hl_multi_cs_completion_init(hdev);
1317
1318        /*
1319         * Initialize the H/W queues. Must be done before hw_init, because
1320         * there the addresses of the kernel queue are being written to the
1321         * registers of the device
1322         */
1323        rc = hl_hw_queues_create(hdev);
1324        if (rc) {
1325                dev_err(hdev->dev, "failed to initialize kernel queues\n");
1326                goto sw_fini;
1327        }
1328
1329        cq_cnt = hdev->asic_prop.completion_queues_count;
1330
1331        /*
1332         * Initialize the completion queues. Must be done before hw_init,
1333         * because there the addresses of the completion queues are being
1334         * passed as arguments to request_irq
1335         */
1336        if (cq_cnt) {
1337                hdev->completion_queue = kcalloc(cq_cnt,
1338                                sizeof(*hdev->completion_queue),
1339                                GFP_KERNEL);
1340
1341                if (!hdev->completion_queue) {
1342                        dev_err(hdev->dev,
1343                                "failed to allocate completion queues\n");
1344                        rc = -ENOMEM;
1345                        goto hw_queues_destroy;
1346                }
1347        }
1348
1349        for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
1350                rc = hl_cq_init(hdev, &hdev->completion_queue[i],
1351                                hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
1352                if (rc) {
1353                        dev_err(hdev->dev,
1354                                "failed to initialize completion queue\n");
1355                        goto cq_fini;
1356                }
1357                hdev->completion_queue[i].cq_idx = i;
1358        }
1359
1360        /*
1361         * Initialize the event queue. Must be done before hw_init,
1362         * because there the address of the event queue is being
1363         * passed as argument to request_irq
1364         */
1365        rc = hl_eq_init(hdev, &hdev->event_queue);
1366        if (rc) {
1367                dev_err(hdev->dev, "failed to initialize event queue\n");
1368                goto cq_fini;
1369        }
1370
1371        /* MMU S/W must be initialized before kernel context is created */
1372        rc = hl_mmu_init(hdev);
1373        if (rc) {
1374                dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
1375                goto eq_fini;
1376        }
1377
1378        /* Allocate the kernel context */
1379        hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
1380        if (!hdev->kernel_ctx) {
1381                rc = -ENOMEM;
1382                goto mmu_fini;
1383        }
1384
1385        hdev->compute_ctx = NULL;
1386
1387        hdev->asic_funcs->state_dump_init(hdev);
1388
1389        hl_debugfs_add_device(hdev);
1390
1391        /* debugfs nodes are created in hl_ctx_init so it must be called after
1392         * hl_debugfs_add_device.
1393         */
1394        rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1395        if (rc) {
1396                dev_err(hdev->dev, "failed to initialize kernel context\n");
1397                kfree(hdev->kernel_ctx);
1398                goto remove_device_from_debugfs;
1399        }
1400
1401        rc = hl_cb_pool_init(hdev);
1402        if (rc) {
1403                dev_err(hdev->dev, "failed to initialize CB pool\n");
1404                goto release_ctx;
1405        }
1406
1407        /*
1408         * From this point, override rc (=0) in case of an error to allow
1409         * debugging (by adding char devices and create sysfs nodes as part of
1410         * the error flow).
1411         */
1412        add_cdev_sysfs_on_err = true;
1413
1414        /* Device is now enabled as part of the initialization requires
1415         * communication with the device firmware to get information that
1416         * is required for the initialization itself
1417         */
1418        hdev->disabled = false;
1419
1420        rc = hdev->asic_funcs->hw_init(hdev);
1421        if (rc) {
1422                dev_err(hdev->dev, "failed to initialize the H/W\n");
1423                rc = 0;
1424                goto out_disabled;
1425        }
1426
1427        /* Check that the communication with the device is working */
1428        rc = hdev->asic_funcs->test_queues(hdev);
1429        if (rc) {
1430                dev_err(hdev->dev, "Failed to detect if device is alive\n");
1431                rc = 0;
1432                goto out_disabled;
1433        }
1434
1435        rc = device_late_init(hdev);
1436        if (rc) {
1437                dev_err(hdev->dev, "Failed late initialization\n");
1438                rc = 0;
1439                goto out_disabled;
1440        }
1441
1442        dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
1443                hdev->asic_name,
1444                hdev->asic_prop.dram_size / SZ_1G);
1445
1446        rc = hl_vm_init(hdev);
1447        if (rc) {
1448                dev_err(hdev->dev, "Failed to initialize memory module\n");
1449                rc = 0;
1450                goto out_disabled;
1451        }
1452
1453        /*
1454         * Expose devices and sysfs nodes to user.
1455         * From here there is no need to add char devices and create sysfs nodes
1456         * in case of an error.
1457         */
1458        add_cdev_sysfs_on_err = false;
1459        rc = device_cdev_sysfs_add(hdev);
1460        if (rc) {
1461                dev_err(hdev->dev,
1462                        "Failed to add char devices and sysfs nodes\n");
1463                rc = 0;
1464                goto out_disabled;
1465        }
1466
1467        /* Need to call this again because the max power might change,
1468         * depending on card type for certain ASICs
1469         */
1470        hl_set_max_power(hdev);
1471
1472        /*
1473         * hl_hwmon_init() must be called after device_late_init(), because only
1474         * there we get the information from the device about which
1475         * hwmon-related sensors the device supports.
1476         * Furthermore, it must be done after adding the device to the system.
1477         */
1478        rc = hl_hwmon_init(hdev);
1479        if (rc) {
1480                dev_err(hdev->dev, "Failed to initialize hwmon\n");
1481                rc = 0;
1482                goto out_disabled;
1483        }
1484
1485        dev_notice(hdev->dev,
1486                "Successfully added device to habanalabs driver\n");
1487
1488        hdev->init_done = true;
1489
1490        /* After initialization is done, we are ready to receive events from
1491         * the F/W. We can't do it before because we will ignore events and if
1492         * those events are fatal, we won't know about it and the device will
1493         * be operational although it shouldn't be
1494         */
1495        hdev->asic_funcs->enable_events_from_fw(hdev);
1496
1497        return 0;
1498
1499release_ctx:
1500        if (hl_ctx_put(hdev->kernel_ctx) != 1)
1501                dev_err(hdev->dev,
1502                        "kernel ctx is still alive on initialization failure\n");
1503remove_device_from_debugfs:
1504        hl_debugfs_remove_device(hdev);
1505mmu_fini:
1506        hl_mmu_fini(hdev);
1507eq_fini:
1508        hl_eq_fini(hdev, &hdev->event_queue);
1509cq_fini:
1510        for (i = 0 ; i < cq_ready_cnt ; i++)
1511                hl_cq_fini(hdev, &hdev->completion_queue[i]);
1512        kfree(hdev->completion_queue);
1513hw_queues_destroy:
1514        hl_hw_queues_destroy(hdev);
1515sw_fini:
1516        hdev->asic_funcs->sw_fini(hdev);
1517user_interrupts_fini:
1518        kfree(hdev->user_interrupt);
1519early_fini:
1520        device_early_fini(hdev);
1521free_dev_ctrl:
1522        put_device(hdev->dev_ctrl);
1523free_dev:
1524        put_device(hdev->dev);
1525out_disabled:
1526        hdev->disabled = true;
1527        if (add_cdev_sysfs_on_err)
1528                device_cdev_sysfs_add(hdev);
1529        if (hdev->pdev)
1530                dev_err(&hdev->pdev->dev,
1531                        "Failed to initialize hl%d. Device is NOT usable !\n",
1532                        hdev->id / 2);
1533        else
1534                pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
1535                        hdev->id / 2);
1536
1537        return rc;
1538}
1539
1540/*
1541 * hl_device_fini - main tear-down function for habanalabs device
1542 *
1543 * @hdev: pointer to habanalabs device structure
1544 *
1545 * Destroy the device, call ASIC fini functions and release the id
1546 */
1547void hl_device_fini(struct hl_device *hdev)
1548{
1549        ktime_t timeout;
1550        u64 reset_sec;
1551        int i, rc;
1552
1553        dev_info(hdev->dev, "Removing device\n");
1554
1555        hdev->device_fini_pending = 1;
1556        flush_delayed_work(&hdev->device_reset_work.reset_work);
1557
1558        if (hdev->pldm)
1559                reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
1560        else
1561                reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
1562
1563        /*
1564         * This function is competing with the reset function, so try to
1565         * take the reset atomic and if we are already in middle of reset,
1566         * wait until reset function is finished. Reset function is designed
1567         * to always finish. However, in Gaudi, because of all the network
1568         * ports, the hard reset could take between 10-30 seconds
1569         */
1570
1571        timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
1572        rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
1573        while (rc) {
1574                usleep_range(50, 200);
1575                rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
1576                if (ktime_compare(ktime_get(), timeout) > 0) {
1577                        dev_crit(hdev->dev,
1578                                "Failed to remove device because reset function did not finish\n");
1579                        return;
1580                }
1581        }
1582
1583        /* Disable PCI access from device F/W so it won't send us additional
1584         * interrupts. We disable MSI/MSI-X at the halt_engines function and we
1585         * can't have the F/W sending us interrupts after that. We need to
1586         * disable the access here because if the device is marked disable, the
1587         * message won't be send. Also, in case of heartbeat, the device CPU is
1588         * marked as disable so this message won't be sent
1589         */
1590        hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
1591
1592        /* Mark device as disabled */
1593        hdev->disabled = true;
1594
1595        take_release_locks(hdev);
1596
1597        hdev->hard_reset_pending = true;
1598
1599        hl_hwmon_fini(hdev);
1600
1601        cleanup_resources(hdev, true, false);
1602
1603        /* Kill processes here after CS rollback. This is because the process
1604         * can't really exit until all its CSs are done, which is what we
1605         * do in cs rollback
1606         */
1607        dev_info(hdev->dev,
1608                "Waiting for all processes to exit (timeout of %u seconds)",
1609                HL_PENDING_RESET_LONG_SEC);
1610
1611        rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
1612        if (rc) {
1613                dev_crit(hdev->dev, "Failed to kill all open processes\n");
1614                device_disable_open_processes(hdev);
1615        }
1616
1617        hl_cb_pool_fini(hdev);
1618
1619        /* Reset the H/W. It will be in idle state after this returns */
1620        hdev->asic_funcs->hw_fini(hdev, true, false);
1621
1622        hdev->fw_loader.linux_loaded = false;
1623
1624        /* Release kernel context */
1625        if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
1626                dev_err(hdev->dev, "kernel ctx is still alive\n");
1627
1628        hl_debugfs_remove_device(hdev);
1629
1630        hl_vm_fini(hdev);
1631
1632        hl_mmu_fini(hdev);
1633
1634        hl_eq_fini(hdev, &hdev->event_queue);
1635
1636        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1637                hl_cq_fini(hdev, &hdev->completion_queue[i]);
1638        kfree(hdev->completion_queue);
1639        kfree(hdev->user_interrupt);
1640
1641        hl_hw_queues_destroy(hdev);
1642
1643        /* Call ASIC S/W finalize function */
1644        hdev->asic_funcs->sw_fini(hdev);
1645
1646        device_early_fini(hdev);
1647
1648        /* Hide devices and sysfs nodes from user */
1649        device_cdev_sysfs_del(hdev);
1650
1651        pr_info("removed device successfully\n");
1652}
1653
1654/*
1655 * MMIO register access helper functions.
1656 */
1657
1658/*
1659 * hl_rreg - Read an MMIO register
1660 *
1661 * @hdev: pointer to habanalabs device structure
1662 * @reg: MMIO register offset (in bytes)
1663 *
1664 * Returns the value of the MMIO register we are asked to read
1665 *
1666 */
1667inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
1668{
1669        return readl(hdev->rmmio + reg);
1670}
1671
1672/*
1673 * hl_wreg - Write to an MMIO register
1674 *
1675 * @hdev: pointer to habanalabs device structure
1676 * @reg: MMIO register offset (in bytes)
1677 * @val: 32-bit value
1678 *
1679 * Writes the 32-bit value into the MMIO register
1680 *
1681 */
1682inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
1683{
1684        writel(val, hdev->rmmio + reg);
1685}
1686