linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: monk liu <monk.liu@amd.com>
  23 */
  24
  25#include <drm/drmP.h>
  26#include "amdgpu.h"
  27
  28int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
  29                    struct amdgpu_ctx *ctx)
  30{
  31        unsigned i, j;
  32        int r;
  33
  34        memset(ctx, 0, sizeof(*ctx));
  35        ctx->adev = adev;
  36        kref_init(&ctx->refcount);
  37        spin_lock_init(&ctx->ring_lock);
  38        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  39                ctx->rings[i].sequence = 1;
  40
  41        if (amdgpu_enable_scheduler) {
  42                /* create context entity for each ring */
  43                for (i = 0; i < adev->num_rings; i++) {
  44                        struct amd_sched_rq *rq;
  45                        if (kernel)
  46                                rq = &adev->rings[i]->sched.kernel_rq;
  47                        else
  48                                rq = &adev->rings[i]->sched.sched_rq;
  49                        r = amd_sched_entity_init(&adev->rings[i]->sched,
  50                                                  &ctx->rings[i].entity,
  51                                                  rq, amdgpu_sched_jobs);
  52                        if (r)
  53                                break;
  54                }
  55
  56                if (i < adev->num_rings) {
  57                        for (j = 0; j < i; j++)
  58                                amd_sched_entity_fini(&adev->rings[j]->sched,
  59                                                      &ctx->rings[j].entity);
  60                        kfree(ctx);
  61                        return r;
  62                }
  63        }
  64        return 0;
  65}
  66
  67void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
  68{
  69        struct amdgpu_device *adev = ctx->adev;
  70        unsigned i, j;
  71
  72        if (!adev)
  73                return;
  74
  75        for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
  76                for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
  77                        fence_put(ctx->rings[i].fences[j]);
  78
  79        if (amdgpu_enable_scheduler) {
  80                for (i = 0; i < adev->num_rings; i++)
  81                        amd_sched_entity_fini(&adev->rings[i]->sched,
  82                                              &ctx->rings[i].entity);
  83        }
  84}
  85
  86static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
  87                            struct amdgpu_fpriv *fpriv,
  88                            uint32_t *id)
  89{
  90        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
  91        struct amdgpu_ctx *ctx;
  92        int r;
  93
  94        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  95        if (!ctx)
  96                return -ENOMEM;
  97
  98        mutex_lock(&mgr->lock);
  99        r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
 100        if (r < 0) {
 101                mutex_unlock(&mgr->lock);
 102                kfree(ctx);
 103                return r;
 104        }
 105        *id = (uint32_t)r;
 106        r = amdgpu_ctx_init(adev, false, ctx);
 107        mutex_unlock(&mgr->lock);
 108
 109        return r;
 110}
 111
 112static void amdgpu_ctx_do_release(struct kref *ref)
 113{
 114        struct amdgpu_ctx *ctx;
 115
 116        ctx = container_of(ref, struct amdgpu_ctx, refcount);
 117
 118        amdgpu_ctx_fini(ctx);
 119
 120        kfree(ctx);
 121}
 122
 123static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
 124{
 125        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
 126        struct amdgpu_ctx *ctx;
 127
 128        mutex_lock(&mgr->lock);
 129        ctx = idr_find(&mgr->ctx_handles, id);
 130        if (ctx) {
 131                idr_remove(&mgr->ctx_handles, id);
 132                kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 133                mutex_unlock(&mgr->lock);
 134                return 0;
 135        }
 136        mutex_unlock(&mgr->lock);
 137        return -EINVAL;
 138}
 139
 140static int amdgpu_ctx_query(struct amdgpu_device *adev,
 141                            struct amdgpu_fpriv *fpriv, uint32_t id,
 142                            union drm_amdgpu_ctx_out *out)
 143{
 144        struct amdgpu_ctx *ctx;
 145        struct amdgpu_ctx_mgr *mgr;
 146        unsigned reset_counter;
 147
 148        if (!fpriv)
 149                return -EINVAL;
 150
 151        mgr = &fpriv->ctx_mgr;
 152        mutex_lock(&mgr->lock);
 153        ctx = idr_find(&mgr->ctx_handles, id);
 154        if (!ctx) {
 155                mutex_unlock(&mgr->lock);
 156                return -EINVAL;
 157        }
 158
 159        /* TODO: these two are always zero */
 160        out->state.flags = 0x0;
 161        out->state.hangs = 0x0;
 162
 163        /* determine if a GPU reset has occured since the last call */
 164        reset_counter = atomic_read(&adev->gpu_reset_counter);
 165        /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
 166        if (ctx->reset_counter == reset_counter)
 167                out->state.reset_status = AMDGPU_CTX_NO_RESET;
 168        else
 169                out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
 170        ctx->reset_counter = reset_counter;
 171
 172        mutex_unlock(&mgr->lock);
 173        return 0;
 174}
 175
 176int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 177                     struct drm_file *filp)
 178{
 179        int r;
 180        uint32_t id;
 181
 182        union drm_amdgpu_ctx *args = data;
 183        struct amdgpu_device *adev = dev->dev_private;
 184        struct amdgpu_fpriv *fpriv = filp->driver_priv;
 185
 186        r = 0;
 187        id = args->in.ctx_id;
 188
 189        switch (args->in.op) {
 190                case AMDGPU_CTX_OP_ALLOC_CTX:
 191                        r = amdgpu_ctx_alloc(adev, fpriv, &id);
 192                        args->out.alloc.ctx_id = id;
 193                        break;
 194                case AMDGPU_CTX_OP_FREE_CTX:
 195                        r = amdgpu_ctx_free(fpriv, id);
 196                        break;
 197                case AMDGPU_CTX_OP_QUERY_STATE:
 198                        r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
 199                        break;
 200                default:
 201                        return -EINVAL;
 202        }
 203
 204        return r;
 205}
 206
 207struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 208{
 209        struct amdgpu_ctx *ctx;
 210        struct amdgpu_ctx_mgr *mgr;
 211
 212        if (!fpriv)
 213                return NULL;
 214
 215        mgr = &fpriv->ctx_mgr;
 216
 217        mutex_lock(&mgr->lock);
 218        ctx = idr_find(&mgr->ctx_handles, id);
 219        if (ctx)
 220                kref_get(&ctx->refcount);
 221        mutex_unlock(&mgr->lock);
 222        return ctx;
 223}
 224
 225int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
 226{
 227        if (ctx == NULL)
 228                return -EINVAL;
 229
 230        kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 231        return 0;
 232}
 233
 234uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 235                              struct fence *fence)
 236{
 237        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 238        uint64_t seq = cring->sequence;
 239        unsigned idx = 0;
 240        struct fence *other = NULL;
 241
 242        idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
 243        other = cring->fences[idx];
 244        if (other) {
 245                signed long r;
 246                r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
 247                if (r < 0)
 248                        DRM_ERROR("Error (%ld) waiting for fence!\n", r);
 249        }
 250
 251        fence_get(fence);
 252
 253        spin_lock(&ctx->ring_lock);
 254        cring->fences[idx] = fence;
 255        cring->sequence++;
 256        spin_unlock(&ctx->ring_lock);
 257
 258        fence_put(other);
 259
 260        return seq;
 261}
 262
 263struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 264                                   struct amdgpu_ring *ring, uint64_t seq)
 265{
 266        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
 267        struct fence *fence;
 268
 269        spin_lock(&ctx->ring_lock);
 270
 271        if (seq >= cring->sequence) {
 272                spin_unlock(&ctx->ring_lock);
 273                return ERR_PTR(-EINVAL);
 274        }
 275
 276
 277        if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
 278                spin_unlock(&ctx->ring_lock);
 279                return NULL;
 280        }
 281
 282        fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
 283        spin_unlock(&ctx->ring_lock);
 284
 285        return fence;
 286}
 287
 288void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 289{
 290        mutex_init(&mgr->lock);
 291        idr_init(&mgr->ctx_handles);
 292}
 293
 294void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 295{
 296        struct amdgpu_ctx *ctx;
 297        struct idr *idp;
 298        uint32_t id;
 299
 300        idp = &mgr->ctx_handles;
 301
 302        idr_for_each_entry(idp, ctx, id) {
 303                if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
 304                        DRM_ERROR("ctx %p is still alive\n", ctx);
 305        }
 306
 307        idr_destroy(&mgr->ctx_handles);
 308        mutex_destroy(&mgr->lock);
 309}
 310