linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: monk liu <monk.liu@amd.com>
  23 */
  24
  25#include <drm/drm_auth.h>
  26#include "amdgpu.h"
  27#include "amdgpu_sched.h"
  28#include "amdgpu_ras.h"
  29#include <linux/nospec.h>
  30
  31#define to_amdgpu_ctx_entity(e) \
  32        container_of((e), struct amdgpu_ctx_entity, entity)
  33
  34const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
  35        [AMDGPU_HW_IP_GFX]      =       1,
  36        [AMDGPU_HW_IP_COMPUTE]  =       4,
  37        [AMDGPU_HW_IP_DMA]      =       2,
  38        [AMDGPU_HW_IP_UVD]      =       1,
  39        [AMDGPU_HW_IP_VCE]      =       1,
  40        [AMDGPU_HW_IP_UVD_ENC]  =       1,
  41        [AMDGPU_HW_IP_VCN_DEC]  =       1,
  42        [AMDGPU_HW_IP_VCN_ENC]  =       1,
  43        [AMDGPU_HW_IP_VCN_JPEG] =       1,
  44};
  45
  46static int amdgpu_ctx_priority_permit(struct drm_file *filp,
  47                                      enum drm_sched_priority priority)
  48{
  49        if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
  50                return -EINVAL;
  51
  52        /* NORMAL and below are accessible by everyone */
  53        if (priority <= DRM_SCHED_PRIORITY_NORMAL)
  54                return 0;
  55
  56        if (capable(CAP_SYS_NICE))
  57                return 0;
  58
  59        if (drm_is_current_master(filp))
  60                return 0;
  61
  62        return -EACCES;
  63}
  64
  65static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
  66{
  67        switch (prio) {
  68        case DRM_SCHED_PRIORITY_HIGH_HW:
  69        case DRM_SCHED_PRIORITY_KERNEL:
  70                return AMDGPU_GFX_PIPE_PRIO_HIGH;
  71        default:
  72                return AMDGPU_GFX_PIPE_PRIO_NORMAL;
  73        }
  74}
  75
  76static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
  77                                                 enum drm_sched_priority prio,
  78                                                 u32 hw_ip)
  79{
  80        unsigned int hw_prio;
  81
  82        hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
  83                        amdgpu_ctx_sched_prio_to_compute_prio(prio) :
  84                        AMDGPU_RING_PRIO_DEFAULT;
  85        hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
  86        if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
  87                hw_prio = AMDGPU_RING_PRIO_DEFAULT;
  88
  89        return hw_prio;
  90}
  91
  92static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
  93                                   const u32 ring)
  94{
  95        struct amdgpu_device *adev = ctx->adev;
  96        struct amdgpu_ctx_entity *entity;
  97        struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
  98        unsigned num_scheds = 0;
  99        unsigned int hw_prio;
 100        enum drm_sched_priority priority;
 101        int r;
 102
 103        entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]),
 104                         GFP_KERNEL);
 105        if (!entity)
 106                return  -ENOMEM;
 107
 108        entity->sequence = 1;
 109        priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
 110                                ctx->init_priority : ctx->override_priority;
 111        hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
 112
 113        hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
 114        scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
 115        num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
 116
 117        if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
 118                sched = drm_sched_pick_best(scheds, num_scheds);
 119                scheds = &sched;
 120                num_scheds = 1;
 121        }
 122
 123        r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
 124                                  &ctx->guilty);
 125        if (r)
 126                goto error_free_entity;
 127
 128        ctx->entities[hw_ip][ring] = entity;
 129        return 0;
 130
 131error_free_entity:
 132        kfree(entity);
 133
 134        return r;
 135}
 136
 137static int amdgpu_ctx_init(struct amdgpu_device *adev,
 138                           enum drm_sched_priority priority,
 139                           struct drm_file *filp,
 140                           struct amdgpu_ctx *ctx)
 141{
 142        int r;
 143
 144        r = amdgpu_ctx_priority_permit(filp, priority);
 145        if (r)
 146                return r;
 147
 148        memset(ctx, 0, sizeof(*ctx));
 149
 150        ctx->adev = adev;
 151
 152        kref_init(&ctx->refcount);
 153        spin_lock_init(&ctx->ring_lock);
 154        mutex_init(&ctx->lock);
 155
 156        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
 157        ctx->reset_counter_query = ctx->reset_counter;
 158        ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
 159        ctx->init_priority = priority;
 160        ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 161
 162        return 0;
 163}
 164
 165static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
 166{
 167
 168        int i;
 169
 170        if (!entity)
 171                return;
 172
 173        for (i = 0; i < amdgpu_sched_jobs; ++i)
 174                dma_fence_put(entity->fences[i]);
 175
 176        kfree(entity);
 177}
 178
 179static void amdgpu_ctx_fini(struct kref *ref)
 180{
 181        struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
 182        struct amdgpu_device *adev = ctx->adev;
 183        unsigned i, j;
 184
 185        if (!adev)
 186                return;
 187
 188        for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 189                for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
 190                        amdgpu_ctx_fini_entity(ctx->entities[i][j]);
 191                        ctx->entities[i][j] = NULL;
 192                }
 193        }
 194
 195        mutex_destroy(&ctx->lock);
 196        kfree(ctx);
 197}
 198
 199int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
 200                          u32 ring, struct drm_sched_entity **entity)
 201{
 202        int r;
 203
 204        if (hw_ip >= AMDGPU_HW_IP_NUM) {
 205                DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
 206                return -EINVAL;
 207        }
 208
 209        /* Right now all IPs have only one instance - multiple rings. */
 210        if (instance != 0) {
 211                DRM_DEBUG("invalid ip instance: %d\n", instance);
 212                return -EINVAL;
 213        }
 214
 215        if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
 216                DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
 217                return -EINVAL;
 218        }
 219
 220        if (ctx->entities[hw_ip][ring] == NULL) {
 221                r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
 222                if (r)
 223                        return r;
 224        }
 225
 226        *entity = &ctx->entities[hw_ip][ring]->entity;
 227        return 0;
 228}
 229
 230static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 231                            struct amdgpu_fpriv *fpriv,
 232                            struct drm_file *filp,
 233                            enum drm_sched_priority priority,
 234                            uint32_t *id)
 235{
 236        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 237        struct amdgpu_ctx *ctx;
 238        int r;
 239
 240        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 241        if (!ctx)
 242                return -ENOMEM;
 243
 244        mutex_lock(&mgr->lock);
 245        r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
 246        if (r < 0) {
 247                mutex_unlock(&mgr->lock);
 248                kfree(ctx);
 249                return r;
 250        }
 251
 252        *id = (uint32_t)r;
 253        r = amdgpu_ctx_init(adev, priority, filp, ctx);
 254        if (r) {
 255                idr_remove(&mgr->ctx_handles, *id);
 256                *id = 0;
 257                kfree(ctx);
 258        }
 259        mutex_unlock(&mgr->lock);
 260        return r;
 261}
 262
 263static void amdgpu_ctx_do_release(struct kref *ref)
 264{
 265        struct amdgpu_ctx *ctx;
 266        u32 i, j;
 267
 268        ctx = container_of(ref, struct amdgpu_ctx, refcount);
 269        for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 270                for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
 271                        if (!ctx->entities[i][j])
 272                                continue;
 273
 274                        drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
 275                }
 276        }
 277
 278        amdgpu_ctx_fini(ref);
 279}
 280
 281static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
 282{
 283        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 284        struct amdgpu_ctx *ctx;
 285
 286        mutex_lock(&mgr->lock);
 287        ctx = idr_remove(&mgr->ctx_handles, id);
 288        if (ctx)
 289                kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 290        mutex_unlock(&mgr->lock);
 291        return ctx ? 0 : -EINVAL;
 292}
 293
 294static int amdgpu_ctx_query(struct amdgpu_device *adev,
 295                            struct amdgpu_fpriv *fpriv, uint32_t id,
 296                            union drm_amdgpu_ctx_out *out)
 297{
 298        struct amdgpu_ctx *ctx;
 299        struct amdgpu_ctx_mgr *mgr;
 300        unsigned reset_counter;
 301
 302        if (!fpriv)
 303                return -EINVAL;
 304
 305        mgr = &fpriv->ctx_mgr;
 306        mutex_lock(&mgr->lock);
 307        ctx = idr_find(&mgr->ctx_handles, id);
 308        if (!ctx) {
 309                mutex_unlock(&mgr->lock);
 310                return -EINVAL;
 311        }
 312
 313        /* TODO: these two are always zero */
 314        out->state.flags = 0x0;
 315        out->state.hangs = 0x0;
 316
 317        /* determine if a GPU reset has occured since the last call */
 318        reset_counter = atomic_read(&adev->gpu_reset_counter);
 319        /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
 320        if (ctx->reset_counter_query == reset_counter)
 321                out->state.reset_status = AMDGPU_CTX_NO_RESET;
 322        else
 323                out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
 324        ctx->reset_counter_query = reset_counter;
 325
 326        mutex_unlock(&mgr->lock);
 327        return 0;
 328}
 329
 330static int amdgpu_ctx_query2(struct amdgpu_device *adev,
 331        struct amdgpu_fpriv *fpriv, uint32_t id,
 332        union drm_amdgpu_ctx_out *out)
 333{
 334        struct amdgpu_ctx *ctx;
 335        struct amdgpu_ctx_mgr *mgr;
 336        unsigned long ras_counter;
 337
 338        if (!fpriv)
 339                return -EINVAL;
 340
 341        mgr = &fpriv->ctx_mgr;
 342        mutex_lock(&mgr->lock);
 343        ctx = idr_find(&mgr->ctx_handles, id);
 344        if (!ctx) {
 345                mutex_unlock(&mgr->lock);
 346                return -EINVAL;
 347        }
 348
 349        out->state.flags = 0x0;
 350        out->state.hangs = 0x0;
 351
 352        if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
 353                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
 354
 355        if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
 356                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
 357
 358        if (atomic_read(&ctx->guilty))
 359                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
 360
 361        /*query ue count*/
 362        ras_counter = amdgpu_ras_query_error_count(adev, false);
 363        /*ras counter is monotonic increasing*/
 364        if (ras_counter != ctx->ras_counter_ue) {
 365                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
 366                ctx->ras_counter_ue = ras_counter;
 367        }
 368
 369        /*query ce count*/
 370        ras_counter = amdgpu_ras_query_error_count(adev, true);
 371        if (ras_counter != ctx->ras_counter_ce) {
 372                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
 373                ctx->ras_counter_ce = ras_counter;
 374        }
 375
 376        mutex_unlock(&mgr->lock);
 377        return 0;
 378}
 379
 380int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 381                     struct drm_file *filp)
 382{
 383        int r;
 384        uint32_t id;
 385        enum drm_sched_priority priority;
 386
 387        union drm_amdgpu_ctx *args = data;
 388        struct amdgpu_device *adev = dev->dev_private;
 389        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 390
 391        r = 0;
 392        id = args->in.ctx_id;
 393        priority = amdgpu_to_sched_priority(args->in.priority);
 394
 395        /* For backwards compatibility reasons, we need to accept
 396         * ioctls with garbage in the priority field */
 397        if (priority == DRM_SCHED_PRIORITY_INVALID)
 398                priority = DRM_SCHED_PRIORITY_NORMAL;
 399
 400        switch (args->in.op) {
 401        case AMDGPU_CTX_OP_ALLOC_CTX:
 402                r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
 403                args->out.alloc.ctx_id = id;
 404                break;
 405        case AMDGPU_CTX_OP_FREE_CTX:
 406                r = amdgpu_ctx_free(fpriv, id);
 407                break;
 408        case AMDGPU_CTX_OP_QUERY_STATE:
 409                r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
 410                break;
 411        case AMDGPU_CTX_OP_QUERY_STATE2:
 412                r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
 413                break;
 414        default:
 415                return -EINVAL;
 416        }
 417
 418        return r;
 419}
 420
 421struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 422{
 423        struct amdgpu_ctx *ctx;
 424        struct amdgpu_ctx_mgr *mgr;
 425
 426        if (!fpriv)
 427                return NULL;
 428
 429        mgr = &fpriv->ctx_mgr;
 430
 431        mutex_lock(&mgr->lock);
 432        ctx = idr_find(&mgr->ctx_handles, id);
 433        if (ctx)
 434                kref_get(&ctx->refcount);
 435        mutex_unlock(&mgr->lock);
 436        return ctx;
 437}
 438
 439int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 440{
 441        if (ctx == NULL)
 442                return -EINVAL;
 443
 444        kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 445        return 0;
 446}
 447
 448void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
 449                          struct drm_sched_entity *entity,
 450                          struct dma_fence *fence, uint64_t* handle)
 451{
 452        struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
 453        uint64_t seq = centity->sequence;
 454        struct dma_fence *other = NULL;
 455        unsigned idx = 0;
 456
 457        idx = seq & (amdgpu_sched_jobs - 1);
 458        other = centity->fences[idx];
 459        if (other)
 460                BUG_ON(!dma_fence_is_signaled(other));
 461
 462        dma_fence_get(fence);
 463
 464        spin_lock(&ctx->ring_lock);
 465        centity->fences[idx] = fence;
 466        centity->sequence++;
 467        spin_unlock(&ctx->ring_lock);
 468
 469        dma_fence_put(other);
 470        if (handle)
 471                *handle = seq;
 472}
 473
 474struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 475                                       struct drm_sched_entity *entity,
 476                                       uint64_t seq)
 477{
 478        struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
 479        struct dma_fence *fence;
 480
 481        spin_lock(&ctx->ring_lock);
 482
 483        if (seq == ~0ull)
 484                seq = centity->sequence - 1;
 485
 486        if (seq >= centity->sequence) {
 487                spin_unlock(&ctx->ring_lock);
 488                return ERR_PTR(-EINVAL);
 489        }
 490
 491
 492        if (seq + amdgpu_sched_jobs < centity->sequence) {
 493                spin_unlock(&ctx->ring_lock);
 494                return NULL;
 495        }
 496
 497        fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
 498        spin_unlock(&ctx->ring_lock);
 499
 500        return fence;
 501}
 502
 503static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
 504                                            struct amdgpu_ctx_entity *aentity,
 505                                            int hw_ip,
 506                                            enum drm_sched_priority priority)
 507{
 508        struct amdgpu_device *adev = ctx->adev;
 509        unsigned int hw_prio;
 510        struct drm_gpu_scheduler **scheds = NULL;
 511        unsigned num_scheds;
 512
 513        /* set sw priority */
 514        drm_sched_entity_set_priority(&aentity->entity, priority);
 515
 516        /* set hw priority */
 517        if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
 518                hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
 519                                                      AMDGPU_HW_IP_COMPUTE);
 520                hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
 521                scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
 522                num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
 523                drm_sched_entity_modify_sched(&aentity->entity, scheds,
 524                                              num_scheds);
 525        }
 526}
 527
 528void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
 529                                  enum drm_sched_priority priority)
 530{
 531        enum drm_sched_priority ctx_prio;
 532        unsigned i, j;
 533
 534        ctx->override_priority = priority;
 535
 536        ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
 537                        ctx->init_priority : ctx->override_priority;
 538        for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 539                for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
 540                        if (!ctx->entities[i][j])
 541                                continue;
 542
 543                        amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
 544                                                       i, ctx_prio);
 545                }
 546        }
 547}
 548
 549int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
 550                               struct drm_sched_entity *entity)
 551{
 552        struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
 553        struct dma_fence *other;
 554        unsigned idx;
 555        long r;
 556
 557        spin_lock(&ctx->ring_lock);
 558        idx = centity->sequence & (amdgpu_sched_jobs - 1);
 559        other = dma_fence_get(centity->fences[idx]);
 560        spin_unlock(&ctx->ring_lock);
 561
 562        if (!other)
 563                return 0;
 564
 565        r = dma_fence_wait(other, true);
 566        if (r < 0 && r != -ERESTARTSYS)
 567                DRM_ERROR("Error (%ld) waiting for fence!\n", r);
 568
 569        dma_fence_put(other);
 570        return r;
 571}
 572
 573void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 574{
 575        mutex_init(&mgr->lock);
 576        idr_init(&mgr->ctx_handles);
 577}
 578
 579long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
 580{
 581        struct amdgpu_ctx *ctx;
 582        struct idr *idp;
 583        uint32_t id, i, j;
 584
 585        idp = &mgr->ctx_handles;
 586
 587        mutex_lock(&mgr->lock);
 588        idr_for_each_entry(idp, ctx, id) {
 589                for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 590                        for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
 591                                struct drm_sched_entity *entity;
 592
 593                                if (!ctx->entities[i][j])
 594                                        continue;
 595
 596                                entity = &ctx->entities[i][j]->entity;
 597                                timeout = drm_sched_entity_flush(entity, timeout);
 598                        }
 599                }
 600        }
 601        mutex_unlock(&mgr->lock);
 602        return timeout;
 603}
 604
 605void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 606{
 607        struct amdgpu_ctx *ctx;
 608        struct idr *idp;
 609        uint32_t id, i, j;
 610
 611        idp = &mgr->ctx_handles;
 612
 613        idr_for_each_entry(idp, ctx, id) {
 614                if (kref_read(&ctx->refcount) != 1) {
 615                        DRM_ERROR("ctx %p is still alive\n", ctx);
 616                        continue;
 617                }
 618
 619                for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 620                        for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
 621                                struct drm_sched_entity *entity;
 622
 623                                if (!ctx->entities[i][j])
 624                                        continue;
 625
 626                                entity = &ctx->entities[i][j]->entity;
 627                                drm_sched_entity_fini(entity);
 628                        }
 629                }
 630        }
 631}
 632
 633void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 634{
 635        struct amdgpu_ctx *ctx;
 636        struct idr *idp;
 637        uint32_t id;
 638
 639        amdgpu_ctx_mgr_entity_fini(mgr);
 640
 641        idp = &mgr->ctx_handles;
 642
 643        idr_for_each_entry(idp, ctx, id) {
 644                if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
 645                        DRM_ERROR("ctx %p is still alive\n", ctx);
 646        }
 647
 648        idr_destroy(&mgr->ctx_handles);
 649        mutex_destroy(&mgr->lock);
 650}
 651