linux/drivers/gpu/drm/virtio/virtgpu_ioctl.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie
   7 *    Alon Levy
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25 * OTHER DEALINGS IN THE SOFTWARE.
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/sync_file.h>
  30#include <linux/uaccess.h>
  31
  32#include <drm/drm_file.h>
  33#include <drm/virtgpu_drm.h>
  34
  35#include "virtgpu_drv.h"
  36
  37void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
  38{
  39        struct virtio_gpu_device *vgdev = dev->dev_private;
  40        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  41        char dbgname[TASK_COMM_LEN];
  42
  43        mutex_lock(&vfpriv->context_lock);
  44        if (vfpriv->context_created)
  45                goto out_unlock;
  46
  47        get_task_comm(dbgname, current);
  48        virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
  49                                      strlen(dbgname), dbgname);
  50        vfpriv->context_created = true;
  51
  52out_unlock:
  53        mutex_unlock(&vfpriv->context_lock);
  54}
  55
  56static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
  57                                struct drm_file *file)
  58{
  59        struct virtio_gpu_device *vgdev = dev->dev_private;
  60        struct drm_virtgpu_map *virtio_gpu_map = data;
  61
  62        return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
  63                                         virtio_gpu_map->handle,
  64                                         &virtio_gpu_map->offset);
  65}
  66
  67/*
  68 * Usage of execbuffer:
  69 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
  70 * However, the command as passed from user space must *not* contain the initial
  71 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
  72 */
  73static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
  74                                 struct drm_file *file)
  75{
  76        struct drm_virtgpu_execbuffer *exbuf = data;
  77        struct virtio_gpu_device *vgdev = dev->dev_private;
  78        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  79        struct virtio_gpu_fence *out_fence;
  80        int ret;
  81        uint32_t *bo_handles = NULL;
  82        void __user *user_bo_handles = NULL;
  83        struct virtio_gpu_object_array *buflist = NULL;
  84        struct sync_file *sync_file;
  85        int in_fence_fd = exbuf->fence_fd;
  86        int out_fence_fd = -1;
  87        void *buf;
  88
  89        if (vgdev->has_virgl_3d == false)
  90                return -ENOSYS;
  91
  92        if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
  93                return -EINVAL;
  94
  95        exbuf->fence_fd = -1;
  96
  97        virtio_gpu_create_context(dev, file);
  98        if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
  99                struct dma_fence *in_fence;
 100
 101                in_fence = sync_file_get_fence(in_fence_fd);
 102
 103                if (!in_fence)
 104                        return -EINVAL;
 105
 106                /*
 107                 * Wait if the fence is from a foreign context, or if the fence
 108                 * array contains any fence from a foreign context.
 109                 */
 110                ret = 0;
 111                if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
 112                        ret = dma_fence_wait(in_fence, true);
 113
 114                dma_fence_put(in_fence);
 115                if (ret)
 116                        return ret;
 117        }
 118
 119        if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
 120                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
 121                if (out_fence_fd < 0)
 122                        return out_fence_fd;
 123        }
 124
 125        if (exbuf->num_bo_handles) {
 126                bo_handles = kvmalloc_array(exbuf->num_bo_handles,
 127                                            sizeof(uint32_t), GFP_KERNEL);
 128                if (!bo_handles) {
 129                        ret = -ENOMEM;
 130                        goto out_unused_fd;
 131                }
 132
 133                user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
 134                if (copy_from_user(bo_handles, user_bo_handles,
 135                                   exbuf->num_bo_handles * sizeof(uint32_t))) {
 136                        ret = -EFAULT;
 137                        goto out_unused_fd;
 138                }
 139
 140                buflist = virtio_gpu_array_from_handles(file, bo_handles,
 141                                                        exbuf->num_bo_handles);
 142                if (!buflist) {
 143                        ret = -ENOENT;
 144                        goto out_unused_fd;
 145                }
 146                kvfree(bo_handles);
 147                bo_handles = NULL;
 148        }
 149
 150        buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
 151        if (IS_ERR(buf)) {
 152                ret = PTR_ERR(buf);
 153                goto out_unused_fd;
 154        }
 155
 156        if (buflist) {
 157                ret = virtio_gpu_array_lock_resv(buflist);
 158                if (ret)
 159                        goto out_memdup;
 160        }
 161
 162        out_fence = virtio_gpu_fence_alloc(vgdev);
 163        if(!out_fence) {
 164                ret = -ENOMEM;
 165                goto out_unresv;
 166        }
 167
 168        if (out_fence_fd >= 0) {
 169                sync_file = sync_file_create(&out_fence->f);
 170                if (!sync_file) {
 171                        dma_fence_put(&out_fence->f);
 172                        ret = -ENOMEM;
 173                        goto out_memdup;
 174                }
 175
 176                exbuf->fence_fd = out_fence_fd;
 177                fd_install(out_fence_fd, sync_file->file);
 178        }
 179
 180        virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
 181                              vfpriv->ctx_id, buflist, out_fence);
 182        virtio_gpu_notify(vgdev);
 183        return 0;
 184
 185out_unresv:
 186        if (buflist)
 187                virtio_gpu_array_unlock_resv(buflist);
 188out_memdup:
 189        kvfree(buf);
 190out_unused_fd:
 191        kvfree(bo_handles);
 192        if (buflist)
 193                virtio_gpu_array_put_free(buflist);
 194
 195        if (out_fence_fd >= 0)
 196                put_unused_fd(out_fence_fd);
 197
 198        return ret;
 199}
 200
 201static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 202                                     struct drm_file *file)
 203{
 204        struct virtio_gpu_device *vgdev = dev->dev_private;
 205        struct drm_virtgpu_getparam *param = data;
 206        int value;
 207
 208        switch (param->param) {
 209        case VIRTGPU_PARAM_3D_FEATURES:
 210                value = vgdev->has_virgl_3d == true ? 1 : 0;
 211                break;
 212        case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
 213                value = 1;
 214                break;
 215        default:
 216                return -EINVAL;
 217        }
 218        if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
 219                return -EFAULT;
 220
 221        return 0;
 222}
 223
 224static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 225                                            struct drm_file *file)
 226{
 227        struct virtio_gpu_device *vgdev = dev->dev_private;
 228        struct drm_virtgpu_resource_create *rc = data;
 229        struct virtio_gpu_fence *fence;
 230        int ret;
 231        struct virtio_gpu_object *qobj;
 232        struct drm_gem_object *obj;
 233        uint32_t handle = 0;
 234        struct virtio_gpu_object_params params = { 0 };
 235
 236        if (vgdev->has_virgl_3d) {
 237                virtio_gpu_create_context(dev, file);
 238                params.virgl = true;
 239                params.target = rc->target;
 240                params.bind = rc->bind;
 241                params.depth = rc->depth;
 242                params.array_size = rc->array_size;
 243                params.last_level = rc->last_level;
 244                params.nr_samples = rc->nr_samples;
 245                params.flags = rc->flags;
 246        } else {
 247                if (rc->depth > 1)
 248                        return -EINVAL;
 249                if (rc->nr_samples > 1)
 250                        return -EINVAL;
 251                if (rc->last_level > 1)
 252                        return -EINVAL;
 253                if (rc->target != 2)
 254                        return -EINVAL;
 255                if (rc->array_size > 1)
 256                        return -EINVAL;
 257        }
 258
 259        params.format = rc->format;
 260        params.width = rc->width;
 261        params.height = rc->height;
 262        params.size = rc->size;
 263        /* allocate a single page size object */
 264        if (params.size == 0)
 265                params.size = PAGE_SIZE;
 266
 267        fence = virtio_gpu_fence_alloc(vgdev);
 268        if (!fence)
 269                return -ENOMEM;
 270        ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
 271        dma_fence_put(&fence->f);
 272        if (ret < 0)
 273                return ret;
 274        obj = &qobj->base.base;
 275
 276        ret = drm_gem_handle_create(file, obj, &handle);
 277        if (ret) {
 278                drm_gem_object_release(obj);
 279                return ret;
 280        }
 281        drm_gem_object_put_unlocked(obj);
 282
 283        rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
 284        rc->bo_handle = handle;
 285        return 0;
 286}
 287
 288static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
 289                                          struct drm_file *file)
 290{
 291        struct drm_virtgpu_resource_info *ri = data;
 292        struct drm_gem_object *gobj = NULL;
 293        struct virtio_gpu_object *qobj = NULL;
 294
 295        gobj = drm_gem_object_lookup(file, ri->bo_handle);
 296        if (gobj == NULL)
 297                return -ENOENT;
 298
 299        qobj = gem_to_virtio_gpu_obj(gobj);
 300
 301        ri->size = qobj->base.base.size;
 302        ri->res_handle = qobj->hw_res_handle;
 303        drm_gem_object_put_unlocked(gobj);
 304        return 0;
 305}
 306
 307static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 308                                               void *data,
 309                                               struct drm_file *file)
 310{
 311        struct virtio_gpu_device *vgdev = dev->dev_private;
 312        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 313        struct drm_virtgpu_3d_transfer_from_host *args = data;
 314        struct virtio_gpu_object_array *objs;
 315        struct virtio_gpu_fence *fence;
 316        int ret;
 317        u32 offset = args->offset;
 318
 319        if (vgdev->has_virgl_3d == false)
 320                return -ENOSYS;
 321
 322        virtio_gpu_create_context(dev, file);
 323        objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
 324        if (objs == NULL)
 325                return -ENOENT;
 326
 327        ret = virtio_gpu_array_lock_resv(objs);
 328        if (ret != 0)
 329                goto err_put_free;
 330
 331        fence = virtio_gpu_fence_alloc(vgdev);
 332        if (!fence) {
 333                ret = -ENOMEM;
 334                goto err_unlock;
 335        }
 336        virtio_gpu_cmd_transfer_from_host_3d
 337                (vgdev, vfpriv->ctx_id, offset, args->level,
 338                 &args->box, objs, fence);
 339        dma_fence_put(&fence->f);
 340        virtio_gpu_notify(vgdev);
 341        return 0;
 342
 343err_unlock:
 344        virtio_gpu_array_unlock_resv(objs);
 345err_put_free:
 346        virtio_gpu_array_put_free(objs);
 347        return ret;
 348}
 349
 350static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 351                                             struct drm_file *file)
 352{
 353        struct virtio_gpu_device *vgdev = dev->dev_private;
 354        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 355        struct drm_virtgpu_3d_transfer_to_host *args = data;
 356        struct virtio_gpu_object_array *objs;
 357        struct virtio_gpu_fence *fence;
 358        int ret;
 359        u32 offset = args->offset;
 360
 361        objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
 362        if (objs == NULL)
 363                return -ENOENT;
 364
 365        if (!vgdev->has_virgl_3d) {
 366                virtio_gpu_cmd_transfer_to_host_2d
 367                        (vgdev, offset,
 368                         args->box.w, args->box.h, args->box.x, args->box.y,
 369                         objs, NULL);
 370        } else {
 371                virtio_gpu_create_context(dev, file);
 372                ret = virtio_gpu_array_lock_resv(objs);
 373                if (ret != 0)
 374                        goto err_put_free;
 375
 376                ret = -ENOMEM;
 377                fence = virtio_gpu_fence_alloc(vgdev);
 378                if (!fence)
 379                        goto err_unlock;
 380
 381                virtio_gpu_cmd_transfer_to_host_3d
 382                        (vgdev,
 383                         vfpriv ? vfpriv->ctx_id : 0, offset,
 384                         args->level, &args->box, objs, fence);
 385                dma_fence_put(&fence->f);
 386        }
 387        virtio_gpu_notify(vgdev);
 388        return 0;
 389
 390err_unlock:
 391        virtio_gpu_array_unlock_resv(objs);
 392err_put_free:
 393        virtio_gpu_array_put_free(objs);
 394        return ret;
 395}
 396
 397static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
 398                                 struct drm_file *file)
 399{
 400        struct drm_virtgpu_3d_wait *args = data;
 401        struct drm_gem_object *obj;
 402        long timeout = 15 * HZ;
 403        int ret;
 404
 405        obj = drm_gem_object_lookup(file, args->handle);
 406        if (obj == NULL)
 407                return -ENOENT;
 408
 409        if (args->flags & VIRTGPU_WAIT_NOWAIT) {
 410                ret = dma_resv_test_signaled_rcu(obj->resv, true);
 411        } else {
 412                ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
 413                                                timeout);
 414        }
 415        if (ret == 0)
 416                ret = -EBUSY;
 417        else if (ret > 0)
 418                ret = 0;
 419
 420        drm_gem_object_put_unlocked(obj);
 421        return ret;
 422}
 423
 424static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
 425                                void *data, struct drm_file *file)
 426{
 427        struct virtio_gpu_device *vgdev = dev->dev_private;
 428        struct drm_virtgpu_get_caps *args = data;
 429        unsigned size, host_caps_size;
 430        int i;
 431        int found_valid = -1;
 432        int ret;
 433        struct virtio_gpu_drv_cap_cache *cache_ent;
 434        void *ptr;
 435
 436        if (vgdev->num_capsets == 0)
 437                return -ENOSYS;
 438
 439        /* don't allow userspace to pass 0 */
 440        if (args->size == 0)
 441                return -EINVAL;
 442
 443        spin_lock(&vgdev->display_info_lock);
 444        for (i = 0; i < vgdev->num_capsets; i++) {
 445                if (vgdev->capsets[i].id == args->cap_set_id) {
 446                        if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
 447                                found_valid = i;
 448                                break;
 449                        }
 450                }
 451        }
 452
 453        if (found_valid == -1) {
 454                spin_unlock(&vgdev->display_info_lock);
 455                return -EINVAL;
 456        }
 457
 458        host_caps_size = vgdev->capsets[found_valid].max_size;
 459        /* only copy to user the minimum of the host caps size or the guest caps size */
 460        size = min(args->size, host_caps_size);
 461
 462        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 463                if (cache_ent->id == args->cap_set_id &&
 464                    cache_ent->version == args->cap_set_ver) {
 465                        spin_unlock(&vgdev->display_info_lock);
 466                        goto copy_exit;
 467                }
 468        }
 469        spin_unlock(&vgdev->display_info_lock);
 470
 471        /* not in cache - need to talk to hw */
 472        virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
 473                                  &cache_ent);
 474        virtio_gpu_notify(vgdev);
 475
 476copy_exit:
 477        ret = wait_event_timeout(vgdev->resp_wq,
 478                                 atomic_read(&cache_ent->is_valid), 5 * HZ);
 479        if (!ret)
 480                return -EBUSY;
 481
 482        /* is_valid check must proceed before copy of the cache entry. */
 483        smp_rmb();
 484
 485        ptr = cache_ent->caps_cache;
 486
 487        if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
 488                return -EFAULT;
 489
 490        return 0;
 491}
 492
 493struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
 494        DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
 495                          DRM_RENDER_ALLOW),
 496
 497        DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
 498                          DRM_RENDER_ALLOW),
 499
 500        DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
 501                          DRM_RENDER_ALLOW),
 502
 503        DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
 504                          virtio_gpu_resource_create_ioctl,
 505                          DRM_RENDER_ALLOW),
 506
 507        DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
 508                          DRM_RENDER_ALLOW),
 509
 510        /* make transfer async to the main ring? - no sure, can we
 511         * thread these in the underlying GL
 512         */
 513        DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
 514                          virtio_gpu_transfer_from_host_ioctl,
 515                          DRM_RENDER_ALLOW),
 516        DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
 517                          virtio_gpu_transfer_to_host_ioctl,
 518                          DRM_RENDER_ALLOW),
 519
 520        DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
 521                          DRM_RENDER_ALLOW),
 522
 523        DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
 524                          DRM_RENDER_ALLOW),
 525};
 526