linux/drivers/gpu/drm/msm/msm_submitqueue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/kref.h>
   6#include "msm_gpu.h"
   7
   8void msm_submitqueue_destroy(struct kref *kref)
   9{
  10        struct msm_gpu_submitqueue *queue = container_of(kref,
  11                struct msm_gpu_submitqueue, ref);
  12
  13        kfree(queue);
  14}
  15
  16struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
  17                u32 id)
  18{
  19        struct msm_gpu_submitqueue *entry;
  20
  21        if (!ctx)
  22                return NULL;
  23
  24        read_lock(&ctx->queuelock);
  25
  26        list_for_each_entry(entry, &ctx->submitqueues, node) {
  27                if (entry->id == id) {
  28                        kref_get(&entry->ref);
  29                        read_unlock(&ctx->queuelock);
  30
  31                        return entry;
  32                }
  33        }
  34
  35        read_unlock(&ctx->queuelock);
  36        return NULL;
  37}
  38
  39void msm_submitqueue_close(struct msm_file_private *ctx)
  40{
  41        struct msm_gpu_submitqueue *entry, *tmp;
  42
  43        if (!ctx)
  44                return;
  45
  46        /*
  47         * No lock needed in close and there won't
  48         * be any more user ioctls coming our way
  49         */
  50        list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
  51                msm_submitqueue_put(entry);
  52}
  53
  54int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
  55                u32 prio, u32 flags, u32 *id)
  56{
  57        struct msm_drm_private *priv = drm->dev_private;
  58        struct msm_gpu_submitqueue *queue;
  59
  60        if (!ctx)
  61                return -ENODEV;
  62
  63        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  64
  65        if (!queue)
  66                return -ENOMEM;
  67
  68        kref_init(&queue->ref);
  69        queue->flags = flags;
  70
  71        if (priv->gpu) {
  72                if (prio >= priv->gpu->nr_rings)
  73                        return -EINVAL;
  74
  75                queue->prio = prio;
  76        }
  77
  78        write_lock(&ctx->queuelock);
  79
  80        queue->id = ctx->queueid++;
  81
  82        if (id)
  83                *id = queue->id;
  84
  85        list_add_tail(&queue->node, &ctx->submitqueues);
  86
  87        write_unlock(&ctx->queuelock);
  88
  89        return 0;
  90}
  91
  92int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
  93{
  94        struct msm_drm_private *priv = drm->dev_private;
  95        int default_prio;
  96
  97        if (!ctx)
  98                return 0;
  99
 100        /*
 101         * Select priority 2 as the "default priority" unless nr_rings is less
 102         * than 2 and then pick the lowest pirority
 103         */
 104        default_prio = priv->gpu ?
 105                clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0;
 106
 107        INIT_LIST_HEAD(&ctx->submitqueues);
 108
 109        rwlock_init(&ctx->queuelock);
 110
 111        return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
 112}
 113
 114static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
 115                struct drm_msm_submitqueue_query *args)
 116{
 117        size_t size = min_t(size_t, args->len, sizeof(queue->faults));
 118        int ret;
 119
 120        /* If a zero length was passed in, return the data size we expect */
 121        if (!args->len) {
 122                args->len = sizeof(queue->faults);
 123                return 0;
 124        }
 125
 126        /* Set the length to the actual size of the data */
 127        args->len = size;
 128
 129        ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
 130
 131        return ret ? -EFAULT : 0;
 132}
 133
 134int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
 135                struct drm_msm_submitqueue_query *args)
 136{
 137        struct msm_gpu_submitqueue *queue;
 138        int ret = -EINVAL;
 139
 140        if (args->pad)
 141                return -EINVAL;
 142
 143        queue = msm_submitqueue_get(ctx, args->id);
 144        if (!queue)
 145                return -ENOENT;
 146
 147        if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
 148                ret = msm_submitqueue_query_faults(queue, args);
 149
 150        msm_submitqueue_put(queue);
 151
 152        return ret;
 153}
 154
 155int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
 156{
 157        struct msm_gpu_submitqueue *entry;
 158
 159        if (!ctx)
 160                return 0;
 161
 162        /*
 163         * id 0 is the "default" queue and can't be destroyed
 164         * by the user
 165         */
 166        if (!id)
 167                return -ENOENT;
 168
 169        write_lock(&ctx->queuelock);
 170
 171        list_for_each_entry(entry, &ctx->submitqueues, node) {
 172                if (entry->id == id) {
 173                        list_del(&entry->node);
 174                        write_unlock(&ctx->queuelock);
 175
 176                        msm_submitqueue_put(entry);
 177                        return 0;
 178                }
 179        }
 180
 181        write_unlock(&ctx->queuelock);
 182        return -ENOENT;
 183}
 184
 185