linux/drivers/misc/habanalabs/common/command_buffer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Copyright 2016-2019 HabanaLabs, Ltd.
   5 * All Rights Reserved.
   6 */
   7
   8#include <uapi/misc/habanalabs.h>
   9#include "habanalabs.h"
  10
  11#include <linux/mm.h>
  12#include <linux/slab.h>
  13#include <linux/uaccess.h>
  14
  15static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
  16{
  17        struct hl_device *hdev = ctx->hdev;
  18        struct asic_fixed_properties *prop = &hdev->asic_prop;
  19        struct hl_vm_va_block *va_block, *tmp;
  20        dma_addr_t bus_addr;
  21        u64 virt_addr;
  22        u32 page_size = prop->pmmu.page_size;
  23        s32 offset;
  24        int rc;
  25
  26        if (!hdev->supports_cb_mapping) {
  27                dev_err_ratelimited(hdev->dev,
  28                                "Cannot map CB because no VA range is allocated for CB mapping\n");
  29                return -EINVAL;
  30        }
  31
  32        if (!hdev->mmu_enable) {
  33                dev_err_ratelimited(hdev->dev,
  34                                "Cannot map CB because MMU is disabled\n");
  35                return -EINVAL;
  36        }
  37
  38        INIT_LIST_HEAD(&cb->va_block_list);
  39
  40        for (bus_addr = cb->bus_address;
  41                        bus_addr < cb->bus_address + cb->size;
  42                        bus_addr += page_size) {
  43
  44                virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
  45                if (!virt_addr) {
  46                        dev_err(hdev->dev,
  47                                "Failed to allocate device virtual address for CB\n");
  48                        rc = -ENOMEM;
  49                        goto err_va_pool_free;
  50                }
  51
  52                va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
  53                if (!va_block) {
  54                        rc = -ENOMEM;
  55                        gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
  56                        goto err_va_pool_free;
  57                }
  58
  59                va_block->start = virt_addr;
  60                va_block->end = virt_addr + page_size - 1;
  61                va_block->size = page_size;
  62                list_add_tail(&va_block->node, &cb->va_block_list);
  63        }
  64
  65        mutex_lock(&ctx->mmu_lock);
  66
  67        bus_addr = cb->bus_address;
  68        offset = 0;
  69        list_for_each_entry(va_block, &cb->va_block_list, node) {
  70                rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
  71                                va_block->size, list_is_last(&va_block->node,
  72                                                        &cb->va_block_list));
  73                if (rc) {
  74                        dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
  75                                va_block->start);
  76                        goto err_va_umap;
  77                }
  78
  79                bus_addr += va_block->size;
  80                offset += va_block->size;
  81        }
  82
  83        rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
  84
  85        mutex_unlock(&ctx->mmu_lock);
  86
  87        cb->is_mmu_mapped = true;
  88
  89        return rc;
  90
  91err_va_umap:
  92        list_for_each_entry(va_block, &cb->va_block_list, node) {
  93                if (offset <= 0)
  94                        break;
  95                hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
  96                                offset <= va_block->size);
  97                offset -= va_block->size;
  98        }
  99
 100        rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
 101
 102        mutex_unlock(&ctx->mmu_lock);
 103
 104err_va_pool_free:
 105        list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
 106                gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
 107                list_del(&va_block->node);
 108                kfree(va_block);
 109        }
 110
 111        return rc;
 112}
 113
 114static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
 115{
 116        struct hl_device *hdev = ctx->hdev;
 117        struct hl_vm_va_block *va_block, *tmp;
 118
 119        mutex_lock(&ctx->mmu_lock);
 120
 121        list_for_each_entry(va_block, &cb->va_block_list, node)
 122                if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
 123                                list_is_last(&va_block->node,
 124                                                &cb->va_block_list)))
 125                        dev_warn_ratelimited(hdev->dev,
 126                                        "Failed to unmap CB's va 0x%llx\n",
 127                                        va_block->start);
 128
 129        hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
 130
 131        mutex_unlock(&ctx->mmu_lock);
 132
 133        list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
 134                gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
 135                list_del(&va_block->node);
 136                kfree(va_block);
 137        }
 138}
 139
 140static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
 141{
 142        if (cb->is_internal)
 143                gen_pool_free(hdev->internal_cb_pool,
 144                                (uintptr_t)cb->kernel_address, cb->size);
 145        else
 146                hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
 147                                cb->kernel_address, cb->bus_address);
 148
 149        kfree(cb);
 150}
 151
 152static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
 153{
 154        if (cb->is_pool) {
 155                spin_lock(&hdev->cb_pool_lock);
 156                list_add(&cb->pool_list, &hdev->cb_pool);
 157                spin_unlock(&hdev->cb_pool_lock);
 158        } else {
 159                cb_fini(hdev, cb);
 160        }
 161}
 162
 163static void cb_release(struct kref *ref)
 164{
 165        struct hl_device *hdev;
 166        struct hl_cb *cb;
 167
 168        cb = container_of(ref, struct hl_cb, refcount);
 169        hdev = cb->hdev;
 170
 171        hl_debugfs_remove_cb(cb);
 172
 173        if (cb->is_mmu_mapped)
 174                cb_unmap_mem(cb->ctx, cb);
 175
 176        hl_ctx_put(cb->ctx);
 177
 178        cb_do_release(hdev, cb);
 179}
 180
 181static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
 182                                        int ctx_id, bool internal_cb)
 183{
 184        struct hl_cb *cb = NULL;
 185        u32 cb_offset;
 186        void *p;
 187
 188        /*
 189         * We use of GFP_ATOMIC here because this function can be called from
 190         * the latency-sensitive code path for command submission. Due to H/W
 191         * limitations in some of the ASICs, the kernel must copy the user CB
 192         * that is designated for an external queue and actually enqueue
 193         * the kernel's copy. Hence, we must never sleep in this code section
 194         * and must use GFP_ATOMIC for all memory allocations.
 195         */
 196        if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
 197                cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
 198
 199        if (!cb)
 200                cb = kzalloc(sizeof(*cb), GFP_KERNEL);
 201
 202        if (!cb)
 203                return NULL;
 204
 205        if (internal_cb) {
 206                p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
 207                if (!p) {
 208                        kfree(cb);
 209                        return NULL;
 210                }
 211
 212                cb_offset = p - hdev->internal_cb_pool_virt_addr;
 213                cb->is_internal = true;
 214                cb->bus_address =  hdev->internal_cb_va_base + cb_offset;
 215        } else if (ctx_id == HL_KERNEL_ASID_ID) {
 216                p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
 217                                                &cb->bus_address, GFP_ATOMIC);
 218                if (!p)
 219                        p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
 220                                        cb_size, &cb->bus_address, GFP_KERNEL);
 221        } else {
 222                p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
 223                                                &cb->bus_address,
 224                                                GFP_USER | __GFP_ZERO);
 225        }
 226
 227        if (!p) {
 228                dev_err(hdev->dev,
 229                        "failed to allocate %d of dma memory for CB\n",
 230                        cb_size);
 231                kfree(cb);
 232                return NULL;
 233        }
 234
 235        cb->kernel_address = p;
 236        cb->size = cb_size;
 237
 238        return cb;
 239}
 240
 241int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
 242                        struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
 243                        bool map_cb, u64 *handle)
 244{
 245        struct hl_cb *cb;
 246        bool alloc_new_cb = true;
 247        int rc, ctx_id = ctx->asid;
 248
 249        /*
 250         * Can't use generic function to check this because of special case
 251         * where we create a CB as part of the reset process
 252         */
 253        if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
 254                dev_warn_ratelimited(hdev->dev,
 255                        "Device is disabled or in reset. Can't create new CBs\n");
 256                rc = -EBUSY;
 257                goto out_err;
 258        }
 259
 260        if (cb_size > SZ_2M) {
 261                dev_err(hdev->dev, "CB size %d must be less than %d\n",
 262                        cb_size, SZ_2M);
 263                rc = -EINVAL;
 264                goto out_err;
 265        }
 266
 267        if (!internal_cb) {
 268                /* Minimum allocation must be PAGE SIZE */
 269                if (cb_size < PAGE_SIZE)
 270                        cb_size = PAGE_SIZE;
 271
 272                if (ctx_id == HL_KERNEL_ASID_ID &&
 273                                cb_size <= hdev->asic_prop.cb_pool_cb_size) {
 274
 275                        spin_lock(&hdev->cb_pool_lock);
 276                        if (!list_empty(&hdev->cb_pool)) {
 277                                cb = list_first_entry(&hdev->cb_pool,
 278                                                typeof(*cb), pool_list);
 279                                list_del(&cb->pool_list);
 280                                spin_unlock(&hdev->cb_pool_lock);
 281                                alloc_new_cb = false;
 282                        } else {
 283                                spin_unlock(&hdev->cb_pool_lock);
 284                                dev_dbg(hdev->dev, "CB pool is empty\n");
 285                        }
 286                }
 287        }
 288
 289        if (alloc_new_cb) {
 290                cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
 291                if (!cb) {
 292                        rc = -ENOMEM;
 293                        goto out_err;
 294                }
 295        }
 296
 297        cb->hdev = hdev;
 298        cb->ctx = ctx;
 299        hl_ctx_get(hdev, cb->ctx);
 300
 301        if (map_cb) {
 302                if (ctx_id == HL_KERNEL_ASID_ID) {
 303                        dev_err(hdev->dev,
 304                                "CB mapping is not supported for kernel context\n");
 305                        rc = -EINVAL;
 306                        goto release_cb;
 307                }
 308
 309                rc = cb_map_mem(ctx, cb);
 310                if (rc)
 311                        goto release_cb;
 312        }
 313
 314        spin_lock(&mgr->cb_lock);
 315        rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
 316        spin_unlock(&mgr->cb_lock);
 317
 318        if (rc < 0) {
 319                dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
 320                goto unmap_mem;
 321        }
 322
 323        cb->id = (u64) rc;
 324
 325        kref_init(&cb->refcount);
 326        spin_lock_init(&cb->lock);
 327
 328        /*
 329         * idr is 32-bit so we can safely OR it with a mask that is above
 330         * 32 bit
 331         */
 332        *handle = cb->id | HL_MMAP_TYPE_CB;
 333        *handle <<= PAGE_SHIFT;
 334
 335        hl_debugfs_add_cb(cb);
 336
 337        return 0;
 338
 339unmap_mem:
 340        if (cb->is_mmu_mapped)
 341                cb_unmap_mem(cb->ctx, cb);
 342release_cb:
 343        hl_ctx_put(cb->ctx);
 344        cb_do_release(hdev, cb);
 345out_err:
 346        *handle = 0;
 347
 348        return rc;
 349}
 350
 351int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
 352{
 353        struct hl_cb *cb;
 354        u32 handle;
 355        int rc = 0;
 356
 357        /*
 358         * handle was given to user to do mmap, I need to shift it back to
 359         * how the idr module gave it to me
 360         */
 361        cb_handle >>= PAGE_SHIFT;
 362        handle = (u32) cb_handle;
 363
 364        spin_lock(&mgr->cb_lock);
 365
 366        cb = idr_find(&mgr->cb_handles, handle);
 367        if (cb) {
 368                idr_remove(&mgr->cb_handles, handle);
 369                spin_unlock(&mgr->cb_lock);
 370                kref_put(&cb->refcount, cb_release);
 371        } else {
 372                spin_unlock(&mgr->cb_lock);
 373                dev_err(hdev->dev,
 374                        "CB destroy failed, no match to handle 0x%x\n", handle);
 375                rc = -EINVAL;
 376        }
 377
 378        return rc;
 379}
 380
 381static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
 382                        u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va)
 383{
 384        struct hl_vm_va_block *va_block;
 385        struct hl_cb *cb;
 386        u32 handle;
 387        int rc = 0;
 388
 389        /* The CB handle was given to user to do mmap, so need to shift it back
 390         * to the value which was allocated by the IDR module.
 391         */
 392        cb_handle >>= PAGE_SHIFT;
 393        handle = (u32) cb_handle;
 394
 395        spin_lock(&mgr->cb_lock);
 396
 397        cb = idr_find(&mgr->cb_handles, handle);
 398        if (!cb) {
 399                dev_err(hdev->dev,
 400                        "CB info failed, no match to handle 0x%x\n", handle);
 401                rc = -EINVAL;
 402                goto out;
 403        }
 404
 405        if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
 406                va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node);
 407                if (va_block) {
 408                        *device_va = va_block->start;
 409                } else {
 410                        dev_err(hdev->dev, "CB is not mapped to the device's MMU\n");
 411                        rc = -EINVAL;
 412                        goto out;
 413                }
 414        } else {
 415                *usage_cnt = atomic_read(&cb->cs_cnt);
 416        }
 417
 418out:
 419        spin_unlock(&mgr->cb_lock);
 420        return rc;
 421}
 422
 423int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
 424{
 425        union hl_cb_args *args = data;
 426        struct hl_device *hdev = hpriv->hdev;
 427        u64 handle = 0, device_va = 0;
 428        enum hl_device_status status;
 429        u32 usage_cnt = 0;
 430        int rc;
 431
 432        if (!hl_device_operational(hdev, &status)) {
 433                dev_warn_ratelimited(hdev->dev,
 434                        "Device is %s. Can't execute CB IOCTL\n",
 435                        hdev->status[status]);
 436                return -EBUSY;
 437        }
 438
 439        switch (args->in.op) {
 440        case HL_CB_OP_CREATE:
 441                if (args->in.cb_size > HL_MAX_CB_SIZE) {
 442                        dev_err(hdev->dev,
 443                                "User requested CB size %d must be less than %d\n",
 444                                args->in.cb_size, HL_MAX_CB_SIZE);
 445                        rc = -EINVAL;
 446                } else {
 447                        rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
 448                                        args->in.cb_size, false,
 449                                        !!(args->in.flags & HL_CB_FLAGS_MAP),
 450                                        &handle);
 451                }
 452
 453                memset(args, 0, sizeof(*args));
 454                args->out.cb_handle = handle;
 455                break;
 456
 457        case HL_CB_OP_DESTROY:
 458                rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
 459                                        args->in.cb_handle);
 460                break;
 461
 462        case HL_CB_OP_INFO:
 463                rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
 464                                args->in.flags,
 465                                &usage_cnt,
 466                                &device_va);
 467                if (rc)
 468                        break;
 469
 470                memset(&args->out, 0, sizeof(args->out));
 471
 472                if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA)
 473                        args->out.device_va = device_va;
 474                else
 475                        args->out.usage_cnt = usage_cnt;
 476                break;
 477
 478        default:
 479                rc = -EINVAL;
 480                break;
 481        }
 482
 483        return rc;
 484}
 485
 486static void cb_vm_close(struct vm_area_struct *vma)
 487{
 488        struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
 489        long new_mmap_size;
 490
 491        new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
 492
 493        if (new_mmap_size > 0) {
 494                cb->mmap_size = new_mmap_size;
 495                return;
 496        }
 497
 498        spin_lock(&cb->lock);
 499        cb->mmap = false;
 500        spin_unlock(&cb->lock);
 501
 502        hl_cb_put(cb);
 503        vma->vm_private_data = NULL;
 504}
 505
 506static const struct vm_operations_struct cb_vm_ops = {
 507        .close = cb_vm_close
 508};
 509
 510int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
 511{
 512        struct hl_device *hdev = hpriv->hdev;
 513        struct hl_cb *cb;
 514        u32 handle, user_cb_size;
 515        int rc;
 516
 517        /* We use the page offset to hold the idr and thus we need to clear
 518         * it before doing the mmap itself
 519         */
 520        handle = vma->vm_pgoff;
 521        vma->vm_pgoff = 0;
 522
 523        /* reference was taken here */
 524        cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
 525        if (!cb) {
 526                dev_err(hdev->dev,
 527                        "CB mmap failed, no match to handle 0x%x\n", handle);
 528                return -EINVAL;
 529        }
 530
 531        /* Validation check */
 532        user_cb_size = vma->vm_end - vma->vm_start;
 533        if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
 534                dev_err(hdev->dev,
 535                        "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
 536                        vma->vm_end - vma->vm_start, cb->size);
 537                rc = -EINVAL;
 538                goto put_cb;
 539        }
 540
 541        if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
 542                                                        user_cb_size)) {
 543                dev_err(hdev->dev,
 544                        "user pointer is invalid - 0x%lx\n",
 545                        vma->vm_start);
 546
 547                rc = -EINVAL;
 548                goto put_cb;
 549        }
 550
 551        spin_lock(&cb->lock);
 552
 553        if (cb->mmap) {
 554                dev_err(hdev->dev,
 555                        "CB mmap failed, CB already mmaped to user\n");
 556                rc = -EINVAL;
 557                goto release_lock;
 558        }
 559
 560        cb->mmap = true;
 561
 562        spin_unlock(&cb->lock);
 563
 564        vma->vm_ops = &cb_vm_ops;
 565
 566        /*
 567         * Note: We're transferring the cb reference to
 568         * vma->vm_private_data here.
 569         */
 570
 571        vma->vm_private_data = cb;
 572
 573        rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
 574                                        cb->bus_address, cb->size);
 575        if (rc) {
 576                spin_lock(&cb->lock);
 577                cb->mmap = false;
 578                goto release_lock;
 579        }
 580
 581        cb->mmap_size = cb->size;
 582        vma->vm_pgoff = handle;
 583
 584        return 0;
 585
 586release_lock:
 587        spin_unlock(&cb->lock);
 588put_cb:
 589        hl_cb_put(cb);
 590        return rc;
 591}
 592
 593struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
 594                        u32 handle)
 595{
 596        struct hl_cb *cb;
 597
 598        spin_lock(&mgr->cb_lock);
 599        cb = idr_find(&mgr->cb_handles, handle);
 600
 601        if (!cb) {
 602                spin_unlock(&mgr->cb_lock);
 603                dev_warn(hdev->dev,
 604                        "CB get failed, no match to handle 0x%x\n", handle);
 605                return NULL;
 606        }
 607
 608        kref_get(&cb->refcount);
 609
 610        spin_unlock(&mgr->cb_lock);
 611
 612        return cb;
 613
 614}
 615
 616void hl_cb_put(struct hl_cb *cb)
 617{
 618        kref_put(&cb->refcount, cb_release);
 619}
 620
 621void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
 622{
 623        spin_lock_init(&mgr->cb_lock);
 624        idr_init(&mgr->cb_handles);
 625}
 626
 627void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
 628{
 629        struct hl_cb *cb;
 630        struct idr *idp;
 631        u32 id;
 632
 633        idp = &mgr->cb_handles;
 634
 635        idr_for_each_entry(idp, cb, id) {
 636                if (kref_put(&cb->refcount, cb_release) != 1)
 637                        dev_err(hdev->dev,
 638                                "CB %d for CTX ID %d is still alive\n",
 639                                id, cb->ctx->asid);
 640        }
 641
 642        idr_destroy(&mgr->cb_handles);
 643}
 644
 645struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
 646                                        bool internal_cb)
 647{
 648        u64 cb_handle;
 649        struct hl_cb *cb;
 650        int rc;
 651
 652        rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
 653                                internal_cb, false, &cb_handle);
 654        if (rc) {
 655                dev_err(hdev->dev,
 656                        "Failed to allocate CB for the kernel driver %d\n", rc);
 657                return NULL;
 658        }
 659
 660        cb_handle >>= PAGE_SHIFT;
 661        cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
 662        /* hl_cb_get should never fail here */
 663        if (!cb) {
 664                dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
 665                                (u32) cb_handle);
 666                goto destroy_cb;
 667        }
 668
 669        return cb;
 670
 671destroy_cb:
 672        hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
 673
 674        return NULL;
 675}
 676
 677int hl_cb_pool_init(struct hl_device *hdev)
 678{
 679        struct hl_cb *cb;
 680        int i;
 681
 682        INIT_LIST_HEAD(&hdev->cb_pool);
 683        spin_lock_init(&hdev->cb_pool_lock);
 684
 685        for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
 686                cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
 687                                HL_KERNEL_ASID_ID, false);
 688                if (cb) {
 689                        cb->is_pool = true;
 690                        list_add(&cb->pool_list, &hdev->cb_pool);
 691                } else {
 692                        hl_cb_pool_fini(hdev);
 693                        return -ENOMEM;
 694                }
 695        }
 696
 697        return 0;
 698}
 699
 700int hl_cb_pool_fini(struct hl_device *hdev)
 701{
 702        struct hl_cb *cb, *tmp;
 703
 704        list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
 705                list_del(&cb->pool_list);
 706                cb_fini(hdev, cb);
 707        }
 708
 709        return 0;
 710}
 711
 712int hl_cb_va_pool_init(struct hl_ctx *ctx)
 713{
 714        struct hl_device *hdev = ctx->hdev;
 715        struct asic_fixed_properties *prop = &hdev->asic_prop;
 716        int rc;
 717
 718        if (!hdev->supports_cb_mapping)
 719                return 0;
 720
 721        ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
 722        if (!ctx->cb_va_pool) {
 723                dev_err(hdev->dev,
 724                        "Failed to create VA gen pool for CB mapping\n");
 725                return -ENOMEM;
 726        }
 727
 728        rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
 729                        prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
 730        if (rc) {
 731                dev_err(hdev->dev,
 732                        "Failed to add memory to VA gen pool for CB mapping\n");
 733                goto err_pool_destroy;
 734        }
 735
 736        return 0;
 737
 738err_pool_destroy:
 739        gen_pool_destroy(ctx->cb_va_pool);
 740
 741        return rc;
 742}
 743
 744void hl_cb_va_pool_fini(struct hl_ctx *ctx)
 745{
 746        struct hl_device *hdev = ctx->hdev;
 747
 748        if (!hdev->supports_cb_mapping)
 749                return;
 750
 751        gen_pool_destroy(ctx->cb_va_pool);
 752}
 753