linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: monk liu <monk.liu@amd.com>
  23 */
  24
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27
  28static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
  29{
  30        unsigned i, j;
  31        int r;
  32
  33        memset(ctx, 0, sizeof(*ctx));
  34        ctx->adev = adev;
  35        kref_init(&ctx->refcount);
  36        spin_lock_init(&ctx->ring_lock);
  37        ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
  38                              sizeof(struct fence*), GFP_KERNEL);
  39        if (!ctx->fences)
  40                return -ENOMEM;
  41
  42        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
  43                ctx->rings[i].sequence = 1;
  44                ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
  45        }
  46
  47        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
  48
  49        /* create context entity for each ring */
  50        for (i = 0; i < adev->num_rings; i++) {
  51                struct amdgpu_ring *ring = adev->rings[i];
  52                struct amd_sched_rq *rq;
  53
  54                rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
  55                r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
  56                                          rq, amdgpu_sched_jobs);
  57                if (r)
  58                        break;
  59        }
  60
  61        if (i < adev->num_rings) {
  62                for (j = 0; j < i; j++)
  63                        amd_sched_entity_fini(&adev->rings[j]->sched,
  64                                              &ctx->rings[j].entity);
  65                kfree(ctx->fences);
  66                ctx->fences = NULL;
  67                return r;
  68        }
  69        return 0;
  70}
  71
  72static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
  73{
  74        struct amdgpu_device *adev = ctx->adev;
  75        unsigned i, j;
  76
  77        if (!adev)
  78                return;
  79
  80        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  81                for (j = 0; j < amdgpu_sched_jobs; ++j)
  82                        fence_put(ctx->rings[i].fences[j]);
  83        kfree(ctx->fences);
  84        ctx->fences = NULL;
  85
  86        for (i = 0; i < adev->num_rings; i++)
  87                amd_sched_entity_fini(&adev->rings[i]->sched,
  88                                      &ctx->rings[i].entity);
  89}
  90
  91static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
  92                            struct amdgpu_fpriv *fpriv,
  93                            uint32_t *id)
  94{
  95        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
  96        struct amdgpu_ctx *ctx;
  97        int r;
  98
  99        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 100        if (!ctx)
 101                return -ENOMEM;
 102
 103        mutex_lock(&mgr->lock);
 104        r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
 105        if (r < 0) {
 106                mutex_unlock(&mgr->lock);
 107                kfree(ctx);
 108                return r;
 109        }
 110        *id = (uint32_t)r;
 111        r = amdgpu_ctx_init(adev, ctx);
 112        if (r) {
 113                idr_remove(&mgr->ctx_handles, *id);
 114                *id = 0;
 115                kfree(ctx);
 116        }
 117        mutex_unlock(&mgr->lock);
 118        return r;
 119}
 120
 121static void amdgpu_ctx_do_release(struct kref *ref)
 122{
 123        struct amdgpu_ctx *ctx;
 124
 125        ctx = container_of(ref, struct amdgpu_ctx, refcount);
 126
 127        amdgpu_ctx_fini(ctx);
 128
 129        kfree(ctx);
 130}
 131
 132static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
 133{
 134        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 135        struct amdgpu_ctx *ctx;
 136
 137        mutex_lock(&mgr->lock);
 138        ctx = idr_find(&mgr->ctx_handles, id);
 139        if (ctx) {
 140                idr_remove(&mgr->ctx_handles, id);
 141                kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 142                mutex_unlock(&mgr->lock);
 143                return 0;
 144        }
 145        mutex_unlock(&mgr->lock);
 146        return -EINVAL;
 147}
 148
 149static int amdgpu_ctx_query(struct amdgpu_device *adev,
 150                            struct amdgpu_fpriv *fpriv, uint32_t id,
 151                            union drm_amdgpu_ctx_out *out)
 152{
 153        struct amdgpu_ctx *ctx;
 154        struct amdgpu_ctx_mgr *mgr;
 155        unsigned reset_counter;
 156
 157        if (!fpriv)
 158                return -EINVAL;
 159
 160        mgr = &fpriv->ctx_mgr;
 161        mutex_lock(&mgr->lock);
 162        ctx = idr_find(&mgr->ctx_handles, id);
 163        if (!ctx) {
 164                mutex_unlock(&mgr->lock);
 165                return -EINVAL;
 166        }
 167
 168        /* TODO: these two are always zero */
 169        out->state.flags = 0x0;
 170        out->state.hangs = 0x0;
 171
 172        /* determine if a GPU reset has occured since the last call */
 173        reset_counter = atomic_read(&adev->gpu_reset_counter);
 174        /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
 175        if (ctx->reset_counter == reset_counter)
 176                out->state.reset_status = AMDGPU_CTX_NO_RESET;
 177        else
 178                out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
 179        ctx->reset_counter = reset_counter;
 180
 181        mutex_unlock(&mgr->lock);
 182        return 0;
 183}
 184
 185int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 186                     struct drm_file *filp)
 187{
 188        int r;
 189        uint32_t id;
 190
 191        union drm_amdgpu_ctx *args = data;
 192        struct amdgpu_device *adev = dev->dev_private;
 193        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 194
 195        r = 0;
 196        id = args->in.ctx_id;
 197
 198        switch (args->in.op) {
 199        case AMDGPU_CTX_OP_ALLOC_CTX:
 200                r = amdgpu_ctx_alloc(adev, fpriv, &id);
 201                args->out.alloc.ctx_id = id;
 202                break;
 203        case AMDGPU_CTX_OP_FREE_CTX:
 204                r = amdgpu_ctx_free(fpriv, id);
 205                break;
 206        case AMDGPU_CTX_OP_QUERY_STATE:
 207                r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
 208                break;
 209        default:
 210                return -EINVAL;
 211        }
 212
 213        return r;
 214}
 215
 216struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 217{
 218        struct amdgpu_ctx *ctx;
 219        struct amdgpu_ctx_mgr *mgr;
 220
 221        if (!fpriv)
 222                return NULL;
 223
 224        mgr = &fpriv->ctx_mgr;
 225
 226        mutex_lock(&mgr->lock);
 227        ctx = idr_find(&mgr->ctx_handles, id);
 228        if (ctx)
 229                kref_get(&ctx->refcount);
 230        mutex_unlock(&mgr->lock);
 231        return ctx;
 232}
 233
 234int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 235{
 236        if (ctx == NULL)
 237                return -EINVAL;
 238
 239        kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 240        return 0;
 241}
 242
 243uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 244                              struct fence *fence)
 245{
 246        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 247        uint64_t seq = cring->sequence;
 248        unsigned idx = 0;
 249        struct fence *other = NULL;
 250
 251        idx = seq & (amdgpu_sched_jobs - 1);
 252        other = cring->fences[idx];
 253        if (other) {
 254                signed long r;
 255                r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
 256                if (r < 0)
 257                        DRM_ERROR("Error (%ld) waiting for fence!\n", r);
 258        }
 259
 260        fence_get(fence);
 261
 262        spin_lock(&ctx->ring_lock);
 263        cring->fences[idx] = fence;
 264        cring->sequence++;
 265        spin_unlock(&ctx->ring_lock);
 266
 267        fence_put(other);
 268
 269        return seq;
 270}
 271
 272struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 273                                   struct amdgpu_ring *ring, uint64_t seq)
 274{
 275        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 276        struct fence *fence;
 277
 278        spin_lock(&ctx->ring_lock);
 279
 280        if (seq >= cring->sequence) {
 281                spin_unlock(&ctx->ring_lock);
 282                return ERR_PTR(-EINVAL);
 283        }
 284
 285
 286        if (seq + amdgpu_sched_jobs < cring->sequence) {
 287                spin_unlock(&ctx->ring_lock);
 288                return NULL;
 289        }
 290
 291        fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
 292        spin_unlock(&ctx->ring_lock);
 293
 294        return fence;
 295}
 296
 297void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 298{
 299        mutex_init(&mgr->lock);
 300        idr_init(&mgr->ctx_handles);
 301}
 302
 303void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 304{
 305        struct amdgpu_ctx *ctx;
 306        struct idr *idp;
 307        uint32_t id;
 308
 309        idp = &mgr->ctx_handles;
 310
 311        idr_for_each_entry(idp, ctx, id) {
 312                if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
 313                        DRM_ERROR("ctx %p is still alive\n", ctx);
 314        }
 315
 316        idr_destroy(&mgr->ctx_handles);
 317        mutex_destroy(&mgr->lock);
 318}
 319