linux/drivers/misc/habanalabs/common/context.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2016-2019 HabanaLabs, Ltd.
   5 * All Rights Reserved.
   6 */
   7
   8#include "habanalabs.h"
   9
  10#include <linux/slab.h>
  11
  12static void hl_ctx_fini(struct hl_ctx *ctx)
  13{
  14        struct hl_device *hdev = ctx->hdev;
  15        u64 idle_mask = 0;
  16        int i;
  17
  18        /*
  19         * If we arrived here, there are no jobs waiting for this context
  20         * on its queues so we can safely remove it.
  21         * This is because for each CS, we increment the ref count and for
  22         * every CS that was finished we decrement it and we won't arrive
  23         * to this function unless the ref count is 0
  24         */
  25
  26        for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
  27                hl_fence_put(ctx->cs_pending[i]);
  28
  29        kfree(ctx->cs_pending);
  30
  31        if (ctx->asid != HL_KERNEL_ASID_ID) {
  32                dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
  33
  34                /* The engines are stopped as there is no executing CS, but the
  35                 * Coresight might be still working by accessing addresses
  36                 * related to the stopped engines. Hence stop it explicitly.
  37                 * Stop only if this is the compute context, as there can be
  38                 * only one compute context
  39                 */
  40                if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
  41                        hl_device_set_debug_mode(hdev, false);
  42
  43                hdev->asic_funcs->ctx_fini(ctx);
  44                hl_cb_va_pool_fini(ctx);
  45                hl_vm_ctx_fini(ctx);
  46                hl_asid_free(hdev, ctx->asid);
  47
  48                /* Scrub both SRAM and DRAM */
  49                hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
  50
  51                if ((!hdev->pldm) && (hdev->pdev) &&
  52                                (!hdev->asic_funcs->is_device_idle(hdev,
  53                                                        &idle_mask, NULL)))
  54                        dev_notice(hdev->dev,
  55                                "device not idle after user context is closed (0x%llx)\n",
  56                                idle_mask);
  57        } else {
  58                dev_dbg(hdev->dev, "closing kernel context\n");
  59                hl_mmu_ctx_fini(ctx);
  60        }
  61}
  62
  63void hl_ctx_do_release(struct kref *ref)
  64{
  65        struct hl_ctx *ctx;
  66
  67        ctx = container_of(ref, struct hl_ctx, refcount);
  68
  69        hl_ctx_fini(ctx);
  70
  71        if (ctx->hpriv)
  72                hl_hpriv_put(ctx->hpriv);
  73
  74        kfree(ctx);
  75}
  76
  77int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
  78{
  79        struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
  80        struct hl_ctx *ctx;
  81        int rc;
  82
  83        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  84        if (!ctx) {
  85                rc = -ENOMEM;
  86                goto out_err;
  87        }
  88
  89        mutex_lock(&mgr->ctx_lock);
  90        rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
  91        mutex_unlock(&mgr->ctx_lock);
  92
  93        if (rc < 0) {
  94                dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
  95                goto free_ctx;
  96        }
  97
  98        ctx->handle = rc;
  99
 100        rc = hl_ctx_init(hdev, ctx, false);
 101        if (rc)
 102                goto remove_from_idr;
 103
 104        hl_hpriv_get(hpriv);
 105        ctx->hpriv = hpriv;
 106
 107        /* TODO: remove for multiple contexts per process */
 108        hpriv->ctx = ctx;
 109
 110        /* TODO: remove the following line for multiple process support */
 111        hdev->compute_ctx = ctx;
 112
 113        return 0;
 114
 115remove_from_idr:
 116        mutex_lock(&mgr->ctx_lock);
 117        idr_remove(&mgr->ctx_handles, ctx->handle);
 118        mutex_unlock(&mgr->ctx_lock);
 119free_ctx:
 120        kfree(ctx);
 121out_err:
 122        return rc;
 123}
 124
 125void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
 126{
 127        if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
 128                return;
 129
 130        dev_warn(hdev->dev,
 131                "user process released device but its command submissions are still executing\n");
 132}
 133
 134int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
 135{
 136        int rc = 0;
 137
 138        ctx->hdev = hdev;
 139
 140        kref_init(&ctx->refcount);
 141
 142        ctx->cs_sequence = 1;
 143        spin_lock_init(&ctx->cs_lock);
 144        atomic_set(&ctx->thread_ctx_switch_token, 1);
 145        ctx->thread_ctx_switch_wait_token = 0;
 146        ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
 147                                sizeof(struct hl_fence *),
 148                                GFP_KERNEL);
 149        if (!ctx->cs_pending)
 150                return -ENOMEM;
 151
 152        if (is_kernel_ctx) {
 153                ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
 154                rc = hl_mmu_ctx_init(ctx);
 155                if (rc) {
 156                        dev_err(hdev->dev, "Failed to init mmu ctx module\n");
 157                        goto err_free_cs_pending;
 158                }
 159        } else {
 160                ctx->asid = hl_asid_alloc(hdev);
 161                if (!ctx->asid) {
 162                        dev_err(hdev->dev, "No free ASID, failed to create context\n");
 163                        rc = -ENOMEM;
 164                        goto err_free_cs_pending;
 165                }
 166
 167                rc = hl_vm_ctx_init(ctx);
 168                if (rc) {
 169                        dev_err(hdev->dev, "Failed to init mem ctx module\n");
 170                        rc = -ENOMEM;
 171                        goto err_asid_free;
 172                }
 173
 174                rc = hl_cb_va_pool_init(ctx);
 175                if (rc) {
 176                        dev_err(hdev->dev,
 177                                "Failed to init VA pool for mapped CB\n");
 178                        goto err_vm_ctx_fini;
 179                }
 180
 181                rc = hdev->asic_funcs->ctx_init(ctx);
 182                if (rc) {
 183                        dev_err(hdev->dev, "ctx_init failed\n");
 184                        goto err_cb_va_pool_fini;
 185                }
 186
 187                dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
 188        }
 189
 190        return 0;
 191
 192err_cb_va_pool_fini:
 193        hl_cb_va_pool_fini(ctx);
 194err_vm_ctx_fini:
 195        hl_vm_ctx_fini(ctx);
 196err_asid_free:
 197        hl_asid_free(hdev, ctx->asid);
 198err_free_cs_pending:
 199        kfree(ctx->cs_pending);
 200
 201        return rc;
 202}
 203
 204void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
 205{
 206        kref_get(&ctx->refcount);
 207}
 208
 209int hl_ctx_put(struct hl_ctx *ctx)
 210{
 211        return kref_put(&ctx->refcount, hl_ctx_do_release);
 212}
 213
 214struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
 215{
 216        struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
 217        struct hl_fence *fence;
 218
 219        spin_lock(&ctx->cs_lock);
 220
 221        if (seq >= ctx->cs_sequence) {
 222                spin_unlock(&ctx->cs_lock);
 223                return ERR_PTR(-EINVAL);
 224        }
 225
 226        if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
 227                spin_unlock(&ctx->cs_lock);
 228                return NULL;
 229        }
 230
 231        fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
 232        hl_fence_get(fence);
 233
 234        spin_unlock(&ctx->cs_lock);
 235
 236        return fence;
 237}
 238
 239/*
 240 * hl_ctx_mgr_init - initialize the context manager
 241 *
 242 * @mgr: pointer to context manager structure
 243 *
 244 * This manager is an object inside the hpriv object of the user process.
 245 * The function is called when a user process opens the FD.
 246 */
 247void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
 248{
 249        mutex_init(&mgr->ctx_lock);
 250        idr_init(&mgr->ctx_handles);
 251}
 252
 253/*
 254 * hl_ctx_mgr_fini - finalize the context manager
 255 *
 256 * @hdev: pointer to device structure
 257 * @mgr: pointer to context manager structure
 258 *
 259 * This function goes over all the contexts in the manager and frees them.
 260 * It is called when a process closes the FD.
 261 */
 262void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
 263{
 264        struct hl_ctx *ctx;
 265        struct idr *idp;
 266        u32 id;
 267
 268        idp = &mgr->ctx_handles;
 269
 270        idr_for_each_entry(idp, ctx, id)
 271                hl_ctx_free(hdev, ctx);
 272
 273        idr_destroy(&mgr->ctx_handles);
 274        mutex_destroy(&mgr->ctx_lock);
 275}
 276