linux/drivers/gpu/drm/msm/msm_submitqueue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/kref.h>
   6#include <linux/uaccess.h>
   7
   8#include "msm_gpu.h"
   9
  10void __msm_file_private_destroy(struct kref *kref)
  11{
  12        struct msm_file_private *ctx = container_of(kref,
  13                struct msm_file_private, ref);
  14        int i;
  15
  16        for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
  17                if (!ctx->entities[i])
  18                        continue;
  19
  20                drm_sched_entity_destroy(ctx->entities[i]);
  21                kfree(ctx->entities[i]);
  22        }
  23
  24        msm_gem_address_space_put(ctx->aspace);
  25        kfree(ctx);
  26}
  27
  28void msm_submitqueue_destroy(struct kref *kref)
  29{
  30        struct msm_gpu_submitqueue *queue = container_of(kref,
  31                struct msm_gpu_submitqueue, ref);
  32
  33        idr_destroy(&queue->fence_idr);
  34
  35        msm_file_private_put(queue->ctx);
  36
  37        kfree(queue);
  38}
  39
  40struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
  41                u32 id)
  42{
  43        struct msm_gpu_submitqueue *entry;
  44
  45        if (!ctx)
  46                return NULL;
  47
  48        read_lock(&ctx->queuelock);
  49
  50        list_for_each_entry(entry, &ctx->submitqueues, node) {
  51                if (entry->id == id) {
  52                        kref_get(&entry->ref);
  53                        read_unlock(&ctx->queuelock);
  54
  55                        return entry;
  56                }
  57        }
  58
  59        read_unlock(&ctx->queuelock);
  60        return NULL;
  61}
  62
  63void msm_submitqueue_close(struct msm_file_private *ctx)
  64{
  65        struct msm_gpu_submitqueue *entry, *tmp;
  66
  67        if (!ctx)
  68                return;
  69
  70        /*
  71         * No lock needed in close and there won't
  72         * be any more user ioctls coming our way
  73         */
  74        list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
  75                list_del(&entry->node);
  76                msm_submitqueue_put(entry);
  77        }
  78}
  79
  80static struct drm_sched_entity *
  81get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
  82                 unsigned ring_nr, enum drm_sched_priority sched_prio)
  83{
  84        static DEFINE_MUTEX(entity_lock);
  85        unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
  86
  87        /* We should have already validated that the requested priority is
  88         * valid by the time we get here.
  89         */
  90        if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
  91                return ERR_PTR(-EINVAL);
  92
  93        mutex_lock(&entity_lock);
  94
  95        if (!ctx->entities[idx]) {
  96                struct drm_sched_entity *entity;
  97                struct drm_gpu_scheduler *sched = &ring->sched;
  98                int ret;
  99
 100                entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
 101
 102                ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
 103                if (ret) {
 104                        kfree(entity);
 105                        return ERR_PTR(ret);
 106                }
 107
 108                ctx->entities[idx] = entity;
 109        }
 110
 111        mutex_unlock(&entity_lock);
 112
 113        return ctx->entities[idx];
 114}
 115
 116int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
 117                u32 prio, u32 flags, u32 *id)
 118{
 119        struct msm_drm_private *priv = drm->dev_private;
 120        struct msm_gpu_submitqueue *queue;
 121        enum drm_sched_priority sched_prio;
 122        unsigned ring_nr;
 123        int ret;
 124
 125        if (!ctx)
 126                return -ENODEV;
 127
 128        if (!priv->gpu)
 129                return -ENODEV;
 130
 131        ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
 132        if (ret)
 133                return ret;
 134
 135        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
 136
 137        if (!queue)
 138                return -ENOMEM;
 139
 140        kref_init(&queue->ref);
 141        queue->flags = flags;
 142        queue->ring_nr = ring_nr;
 143
 144        queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
 145                                         ring_nr, sched_prio);
 146        if (IS_ERR(queue->entity)) {
 147                ret = PTR_ERR(queue->entity);
 148                kfree(queue);
 149                return ret;
 150        }
 151
 152        write_lock(&ctx->queuelock);
 153
 154        queue->ctx = msm_file_private_get(ctx);
 155        queue->id = ctx->queueid++;
 156
 157        if (id)
 158                *id = queue->id;
 159
 160        idr_init(&queue->fence_idr);
 161        mutex_init(&queue->lock);
 162
 163        list_add_tail(&queue->node, &ctx->submitqueues);
 164
 165        write_unlock(&ctx->queuelock);
 166
 167        return 0;
 168}
 169
 170/*
 171 * Create the default submit-queue (id==0), used for backwards compatibility
 172 * for userspace that pre-dates the introduction of submitqueues.
 173 */
 174int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
 175{
 176        struct msm_drm_private *priv = drm->dev_private;
 177        int default_prio, max_priority;
 178
 179        if (!priv->gpu)
 180                return -ENODEV;
 181
 182        max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
 183
 184        /*
 185         * Pick a medium priority level as default.  Lower numeric value is
 186         * higher priority, so round-up to pick a priority that is not higher
 187         * than the middle priority level.
 188         */
 189        default_prio = DIV_ROUND_UP(max_priority, 2);
 190
 191        return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
 192}
 193
 194static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
 195                struct drm_msm_submitqueue_query *args)
 196{
 197        size_t size = min_t(size_t, args->len, sizeof(queue->faults));
 198        int ret;
 199
 200        /* If a zero length was passed in, return the data size we expect */
 201        if (!args->len) {
 202                args->len = sizeof(queue->faults);
 203                return 0;
 204        }
 205
 206        /* Set the length to the actual size of the data */
 207        args->len = size;
 208
 209        ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
 210
 211        return ret ? -EFAULT : 0;
 212}
 213
 214int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
 215                struct drm_msm_submitqueue_query *args)
 216{
 217        struct msm_gpu_submitqueue *queue;
 218        int ret = -EINVAL;
 219
 220        if (args->pad)
 221                return -EINVAL;
 222
 223        queue = msm_submitqueue_get(ctx, args->id);
 224        if (!queue)
 225                return -ENOENT;
 226
 227        if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
 228                ret = msm_submitqueue_query_faults(queue, args);
 229
 230        msm_submitqueue_put(queue);
 231
 232        return ret;
 233}
 234
 235int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
 236{
 237        struct msm_gpu_submitqueue *entry;
 238
 239        if (!ctx)
 240                return 0;
 241
 242        /*
 243         * id 0 is the "default" queue and can't be destroyed
 244         * by the user
 245         */
 246        if (!id)
 247                return -ENOENT;
 248
 249        write_lock(&ctx->queuelock);
 250
 251        list_for_each_entry(entry, &ctx->submitqueues, node) {
 252                if (entry->id == id) {
 253                        list_del(&entry->node);
 254                        write_unlock(&ctx->queuelock);
 255
 256                        msm_submitqueue_put(entry);
 257                        return 0;
 258                }
 259        }
 260
 261        write_unlock(&ctx->queuelock);
 262        return -ENOENT;
 263}
 264
 265