linux/drivers/gpu/drm/virtio/virtgpu_ioctl.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Authors:
   6 *    Dave Airlie
   7 *    Alon Levy
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a
  10 * copy of this software and associated documentation files (the "Software"),
  11 * to deal in the Software without restriction, including without limitation
  12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13 * and/or sell copies of the Software, and to permit persons to whom the
  14 * Software is furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  25 * OTHER DEALINGS IN THE SOFTWARE.
  26 */
  27
  28#include <linux/file.h>
  29#include <linux/sync_file.h>
  30#include <linux/uaccess.h>
  31
  32#include <drm/drm_file.h>
  33#include <drm/virtgpu_drm.h>
  34
  35#include "virtgpu_drv.h"
  36
  37#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
  38                                    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
  39                                    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
  40
  41void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
  42{
  43        struct virtio_gpu_device *vgdev = dev->dev_private;
  44        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  45        char dbgname[TASK_COMM_LEN];
  46
  47        mutex_lock(&vfpriv->context_lock);
  48        if (vfpriv->context_created)
  49                goto out_unlock;
  50
  51        get_task_comm(dbgname, current);
  52        virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
  53                                      strlen(dbgname), dbgname);
  54        vfpriv->context_created = true;
  55
  56out_unlock:
  57        mutex_unlock(&vfpriv->context_lock);
  58}
  59
  60static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
  61                                struct drm_file *file)
  62{
  63        struct virtio_gpu_device *vgdev = dev->dev_private;
  64        struct drm_virtgpu_map *virtio_gpu_map = data;
  65
  66        return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
  67                                         virtio_gpu_map->handle,
  68                                         &virtio_gpu_map->offset);
  69}
  70
  71/*
  72 * Usage of execbuffer:
  73 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
  74 * However, the command as passed from user space must *not* contain the initial
  75 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
  76 */
  77static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
  78                                 struct drm_file *file)
  79{
  80        struct drm_virtgpu_execbuffer *exbuf = data;
  81        struct virtio_gpu_device *vgdev = dev->dev_private;
  82        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
  83        struct virtio_gpu_fence *out_fence;
  84        int ret;
  85        uint32_t *bo_handles = NULL;
  86        void __user *user_bo_handles = NULL;
  87        struct virtio_gpu_object_array *buflist = NULL;
  88        struct sync_file *sync_file;
  89        int in_fence_fd = exbuf->fence_fd;
  90        int out_fence_fd = -1;
  91        void *buf;
  92
  93        if (vgdev->has_virgl_3d == false)
  94                return -ENOSYS;
  95
  96        if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
  97                return -EINVAL;
  98
  99        exbuf->fence_fd = -1;
 100
 101        virtio_gpu_create_context(dev, file);
 102        if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
 103                struct dma_fence *in_fence;
 104
 105                in_fence = sync_file_get_fence(in_fence_fd);
 106
 107                if (!in_fence)
 108                        return -EINVAL;
 109
 110                /*
 111                 * Wait if the fence is from a foreign context, or if the fence
 112                 * array contains any fence from a foreign context.
 113                 */
 114                ret = 0;
 115                if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
 116                        ret = dma_fence_wait(in_fence, true);
 117
 118                dma_fence_put(in_fence);
 119                if (ret)
 120                        return ret;
 121        }
 122
 123        if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
 124                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
 125                if (out_fence_fd < 0)
 126                        return out_fence_fd;
 127        }
 128
 129        if (exbuf->num_bo_handles) {
 130                bo_handles = kvmalloc_array(exbuf->num_bo_handles,
 131                                            sizeof(uint32_t), GFP_KERNEL);
 132                if (!bo_handles) {
 133                        ret = -ENOMEM;
 134                        goto out_unused_fd;
 135                }
 136
 137                user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
 138                if (copy_from_user(bo_handles, user_bo_handles,
 139                                   exbuf->num_bo_handles * sizeof(uint32_t))) {
 140                        ret = -EFAULT;
 141                        goto out_unused_fd;
 142                }
 143
 144                buflist = virtio_gpu_array_from_handles(file, bo_handles,
 145                                                        exbuf->num_bo_handles);
 146                if (!buflist) {
 147                        ret = -ENOENT;
 148                        goto out_unused_fd;
 149                }
 150                kvfree(bo_handles);
 151                bo_handles = NULL;
 152        }
 153
 154        buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
 155        if (IS_ERR(buf)) {
 156                ret = PTR_ERR(buf);
 157                goto out_unused_fd;
 158        }
 159
 160        if (buflist) {
 161                ret = virtio_gpu_array_lock_resv(buflist);
 162                if (ret)
 163                        goto out_memdup;
 164        }
 165
 166        out_fence = virtio_gpu_fence_alloc(vgdev);
 167        if(!out_fence) {
 168                ret = -ENOMEM;
 169                goto out_unresv;
 170        }
 171
 172        if (out_fence_fd >= 0) {
 173                sync_file = sync_file_create(&out_fence->f);
 174                if (!sync_file) {
 175                        dma_fence_put(&out_fence->f);
 176                        ret = -ENOMEM;
 177                        goto out_unresv;
 178                }
 179
 180                exbuf->fence_fd = out_fence_fd;
 181                fd_install(out_fence_fd, sync_file->file);
 182        }
 183
 184        virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
 185                              vfpriv->ctx_id, buflist, out_fence);
 186        dma_fence_put(&out_fence->f);
 187        virtio_gpu_notify(vgdev);
 188        return 0;
 189
 190out_unresv:
 191        if (buflist)
 192                virtio_gpu_array_unlock_resv(buflist);
 193out_memdup:
 194        kvfree(buf);
 195out_unused_fd:
 196        kvfree(bo_handles);
 197        if (buflist)
 198                virtio_gpu_array_put_free(buflist);
 199
 200        if (out_fence_fd >= 0)
 201                put_unused_fd(out_fence_fd);
 202
 203        return ret;
 204}
 205
 206static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
 207                                     struct drm_file *file)
 208{
 209        struct virtio_gpu_device *vgdev = dev->dev_private;
 210        struct drm_virtgpu_getparam *param = data;
 211        int value;
 212
 213        switch (param->param) {
 214        case VIRTGPU_PARAM_3D_FEATURES:
 215                value = vgdev->has_virgl_3d ? 1 : 0;
 216                break;
 217        case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
 218                value = 1;
 219                break;
 220        case VIRTGPU_PARAM_RESOURCE_BLOB:
 221                value = vgdev->has_resource_blob ? 1 : 0;
 222                break;
 223        case VIRTGPU_PARAM_HOST_VISIBLE:
 224                value = vgdev->has_host_visible ? 1 : 0;
 225                break;
 226        case VIRTGPU_PARAM_CROSS_DEVICE:
 227                value = vgdev->has_resource_assign_uuid ? 1 : 0;
 228                break;
 229        default:
 230                return -EINVAL;
 231        }
 232        if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
 233                return -EFAULT;
 234
 235        return 0;
 236}
 237
 238static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
 239                                            struct drm_file *file)
 240{
 241        struct virtio_gpu_device *vgdev = dev->dev_private;
 242        struct drm_virtgpu_resource_create *rc = data;
 243        struct virtio_gpu_fence *fence;
 244        int ret;
 245        struct virtio_gpu_object *qobj;
 246        struct drm_gem_object *obj;
 247        uint32_t handle = 0;
 248        struct virtio_gpu_object_params params = { 0 };
 249
 250        if (vgdev->has_virgl_3d) {
 251                virtio_gpu_create_context(dev, file);
 252                params.virgl = true;
 253                params.target = rc->target;
 254                params.bind = rc->bind;
 255                params.depth = rc->depth;
 256                params.array_size = rc->array_size;
 257                params.last_level = rc->last_level;
 258                params.nr_samples = rc->nr_samples;
 259                params.flags = rc->flags;
 260        } else {
 261                if (rc->depth > 1)
 262                        return -EINVAL;
 263                if (rc->nr_samples > 1)
 264                        return -EINVAL;
 265                if (rc->last_level > 1)
 266                        return -EINVAL;
 267                if (rc->target != 2)
 268                        return -EINVAL;
 269                if (rc->array_size > 1)
 270                        return -EINVAL;
 271        }
 272
 273        params.format = rc->format;
 274        params.width = rc->width;
 275        params.height = rc->height;
 276        params.size = rc->size;
 277        /* allocate a single page size object */
 278        if (params.size == 0)
 279                params.size = PAGE_SIZE;
 280
 281        fence = virtio_gpu_fence_alloc(vgdev);
 282        if (!fence)
 283                return -ENOMEM;
 284        ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
 285        dma_fence_put(&fence->f);
 286        if (ret < 0)
 287                return ret;
 288        obj = &qobj->base.base;
 289
 290        ret = drm_gem_handle_create(file, obj, &handle);
 291        if (ret) {
 292                drm_gem_object_release(obj);
 293                return ret;
 294        }
 295        drm_gem_object_put(obj);
 296
 297        rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
 298        rc->bo_handle = handle;
 299        return 0;
 300}
 301
 302static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
 303                                          struct drm_file *file)
 304{
 305        struct drm_virtgpu_resource_info *ri = data;
 306        struct drm_gem_object *gobj = NULL;
 307        struct virtio_gpu_object *qobj = NULL;
 308
 309        gobj = drm_gem_object_lookup(file, ri->bo_handle);
 310        if (gobj == NULL)
 311                return -ENOENT;
 312
 313        qobj = gem_to_virtio_gpu_obj(gobj);
 314
 315        ri->size = qobj->base.base.size;
 316        ri->res_handle = qobj->hw_res_handle;
 317        if (qobj->host3d_blob || qobj->guest_blob)
 318                ri->blob_mem = qobj->blob_mem;
 319
 320        drm_gem_object_put(gobj);
 321        return 0;
 322}
 323
 324static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 325                                               void *data,
 326                                               struct drm_file *file)
 327{
 328        struct virtio_gpu_device *vgdev = dev->dev_private;
 329        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 330        struct drm_virtgpu_3d_transfer_from_host *args = data;
 331        struct virtio_gpu_object *bo;
 332        struct virtio_gpu_object_array *objs;
 333        struct virtio_gpu_fence *fence;
 334        int ret;
 335        u32 offset = args->offset;
 336
 337        if (vgdev->has_virgl_3d == false)
 338                return -ENOSYS;
 339
 340        virtio_gpu_create_context(dev, file);
 341        objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
 342        if (objs == NULL)
 343                return -ENOENT;
 344
 345        bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 346        if (bo->guest_blob && !bo->host3d_blob) {
 347                ret = -EINVAL;
 348                goto err_put_free;
 349        }
 350
 351        if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
 352                ret = -EINVAL;
 353                goto err_put_free;
 354        }
 355
 356        ret = virtio_gpu_array_lock_resv(objs);
 357        if (ret != 0)
 358                goto err_put_free;
 359
 360        fence = virtio_gpu_fence_alloc(vgdev);
 361        if (!fence) {
 362                ret = -ENOMEM;
 363                goto err_unlock;
 364        }
 365
 366        virtio_gpu_cmd_transfer_from_host_3d
 367                (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
 368                 args->layer_stride, &args->box, objs, fence);
 369        dma_fence_put(&fence->f);
 370        virtio_gpu_notify(vgdev);
 371        return 0;
 372
 373err_unlock:
 374        virtio_gpu_array_unlock_resv(objs);
 375err_put_free:
 376        virtio_gpu_array_put_free(objs);
 377        return ret;
 378}
 379
 380static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 381                                             struct drm_file *file)
 382{
 383        struct virtio_gpu_device *vgdev = dev->dev_private;
 384        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 385        struct drm_virtgpu_3d_transfer_to_host *args = data;
 386        struct virtio_gpu_object *bo;
 387        struct virtio_gpu_object_array *objs;
 388        struct virtio_gpu_fence *fence;
 389        int ret;
 390        u32 offset = args->offset;
 391
 392        objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
 393        if (objs == NULL)
 394                return -ENOENT;
 395
 396        bo = gem_to_virtio_gpu_obj(objs->objs[0]);
 397        if (bo->guest_blob && !bo->host3d_blob) {
 398                ret = -EINVAL;
 399                goto err_put_free;
 400        }
 401
 402        if (!vgdev->has_virgl_3d) {
 403                virtio_gpu_cmd_transfer_to_host_2d
 404                        (vgdev, offset,
 405                         args->box.w, args->box.h, args->box.x, args->box.y,
 406                         objs, NULL);
 407        } else {
 408                virtio_gpu_create_context(dev, file);
 409
 410                if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
 411                        ret = -EINVAL;
 412                        goto err_put_free;
 413                }
 414
 415                ret = virtio_gpu_array_lock_resv(objs);
 416                if (ret != 0)
 417                        goto err_put_free;
 418
 419                ret = -ENOMEM;
 420                fence = virtio_gpu_fence_alloc(vgdev);
 421                if (!fence)
 422                        goto err_unlock;
 423
 424                virtio_gpu_cmd_transfer_to_host_3d
 425                        (vgdev,
 426                         vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
 427                         args->stride, args->layer_stride, &args->box, objs,
 428                         fence);
 429                dma_fence_put(&fence->f);
 430        }
 431        virtio_gpu_notify(vgdev);
 432        return 0;
 433
 434err_unlock:
 435        virtio_gpu_array_unlock_resv(objs);
 436err_put_free:
 437        virtio_gpu_array_put_free(objs);
 438        return ret;
 439}
 440
 441static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
 442                                 struct drm_file *file)
 443{
 444        struct drm_virtgpu_3d_wait *args = data;
 445        struct drm_gem_object *obj;
 446        long timeout = 15 * HZ;
 447        int ret;
 448
 449        obj = drm_gem_object_lookup(file, args->handle);
 450        if (obj == NULL)
 451                return -ENOENT;
 452
 453        if (args->flags & VIRTGPU_WAIT_NOWAIT) {
 454                ret = dma_resv_test_signaled(obj->resv, true);
 455        } else {
 456                ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
 457        }
 458        if (ret == 0)
 459                ret = -EBUSY;
 460        else if (ret > 0)
 461                ret = 0;
 462
 463        drm_gem_object_put(obj);
 464        return ret;
 465}
 466
 467static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
 468                                void *data, struct drm_file *file)
 469{
 470        struct virtio_gpu_device *vgdev = dev->dev_private;
 471        struct drm_virtgpu_get_caps *args = data;
 472        unsigned size, host_caps_size;
 473        int i;
 474        int found_valid = -1;
 475        int ret;
 476        struct virtio_gpu_drv_cap_cache *cache_ent;
 477        void *ptr;
 478
 479        if (vgdev->num_capsets == 0)
 480                return -ENOSYS;
 481
 482        /* don't allow userspace to pass 0 */
 483        if (args->size == 0)
 484                return -EINVAL;
 485
 486        spin_lock(&vgdev->display_info_lock);
 487        for (i = 0; i < vgdev->num_capsets; i++) {
 488                if (vgdev->capsets[i].id == args->cap_set_id) {
 489                        if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
 490                                found_valid = i;
 491                                break;
 492                        }
 493                }
 494        }
 495
 496        if (found_valid == -1) {
 497                spin_unlock(&vgdev->display_info_lock);
 498                return -EINVAL;
 499        }
 500
 501        host_caps_size = vgdev->capsets[found_valid].max_size;
 502        /* only copy to user the minimum of the host caps size or the guest caps size */
 503        size = min(args->size, host_caps_size);
 504
 505        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
 506                if (cache_ent->id == args->cap_set_id &&
 507                    cache_ent->version == args->cap_set_ver) {
 508                        spin_unlock(&vgdev->display_info_lock);
 509                        goto copy_exit;
 510                }
 511        }
 512        spin_unlock(&vgdev->display_info_lock);
 513
 514        /* not in cache - need to talk to hw */
 515        virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
 516                                  &cache_ent);
 517        virtio_gpu_notify(vgdev);
 518
 519copy_exit:
 520        ret = wait_event_timeout(vgdev->resp_wq,
 521                                 atomic_read(&cache_ent->is_valid), 5 * HZ);
 522        if (!ret)
 523                return -EBUSY;
 524
 525        /* is_valid check must proceed before copy of the cache entry. */
 526        smp_rmb();
 527
 528        ptr = cache_ent->caps_cache;
 529
 530        if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
 531                return -EFAULT;
 532
 533        return 0;
 534}
 535
 536static int verify_blob(struct virtio_gpu_device *vgdev,
 537                       struct virtio_gpu_fpriv *vfpriv,
 538                       struct virtio_gpu_object_params *params,
 539                       struct drm_virtgpu_resource_create_blob *rc_blob,
 540                       bool *guest_blob, bool *host3d_blob)
 541{
 542        if (!vgdev->has_resource_blob)
 543                return -EINVAL;
 544
 545        if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
 546            !rc_blob->blob_flags)
 547                return -EINVAL;
 548
 549        if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
 550                if (!vgdev->has_resource_assign_uuid)
 551                        return -EINVAL;
 552        }
 553
 554        switch (rc_blob->blob_mem) {
 555        case VIRTGPU_BLOB_MEM_GUEST:
 556                *guest_blob = true;
 557                break;
 558        case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
 559                *guest_blob = true;
 560                fallthrough;
 561        case VIRTGPU_BLOB_MEM_HOST3D:
 562                *host3d_blob = true;
 563                break;
 564        default:
 565                return -EINVAL;
 566        }
 567
 568        if (*host3d_blob) {
 569                if (!vgdev->has_virgl_3d)
 570                        return -EINVAL;
 571
 572                /* Must be dword aligned. */
 573                if (rc_blob->cmd_size % 4 != 0)
 574                        return -EINVAL;
 575
 576                params->ctx_id = vfpriv->ctx_id;
 577                params->blob_id = rc_blob->blob_id;
 578        } else {
 579                if (rc_blob->blob_id != 0)
 580                        return -EINVAL;
 581
 582                if (rc_blob->cmd_size != 0)
 583                        return -EINVAL;
 584        }
 585
 586        params->blob_mem = rc_blob->blob_mem;
 587        params->size = rc_blob->size;
 588        params->blob = true;
 589        params->blob_flags = rc_blob->blob_flags;
 590        return 0;
 591}
 592
 593static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
 594                                                 void *data,
 595                                                 struct drm_file *file)
 596{
 597        int ret = 0;
 598        uint32_t handle = 0;
 599        bool guest_blob = false;
 600        bool host3d_blob = false;
 601        struct drm_gem_object *obj;
 602        struct virtio_gpu_object *bo;
 603        struct virtio_gpu_object_params params = { 0 };
 604        struct virtio_gpu_device *vgdev = dev->dev_private;
 605        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 606        struct drm_virtgpu_resource_create_blob *rc_blob = data;
 607
 608        if (verify_blob(vgdev, vfpriv, &params, rc_blob,
 609                        &guest_blob, &host3d_blob))
 610                return -EINVAL;
 611
 612        if (vgdev->has_virgl_3d)
 613                virtio_gpu_create_context(dev, file);
 614
 615        if (rc_blob->cmd_size) {
 616                void *buf;
 617
 618                buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
 619                                  rc_blob->cmd_size);
 620
 621                if (IS_ERR(buf))
 622                        return PTR_ERR(buf);
 623
 624                virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
 625                                      vfpriv->ctx_id, NULL, NULL);
 626        }
 627
 628        if (guest_blob)
 629                ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
 630        else if (!guest_blob && host3d_blob)
 631                ret = virtio_gpu_vram_create(vgdev, &params, &bo);
 632        else
 633                return -EINVAL;
 634
 635        if (ret < 0)
 636                return ret;
 637
 638        bo->guest_blob = guest_blob;
 639        bo->host3d_blob = host3d_blob;
 640        bo->blob_mem = rc_blob->blob_mem;
 641        bo->blob_flags = rc_blob->blob_flags;
 642
 643        obj = &bo->base.base;
 644        if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
 645                ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
 646                if (ret) {
 647                        drm_gem_object_release(obj);
 648                        return ret;
 649                }
 650        }
 651
 652        ret = drm_gem_handle_create(file, obj, &handle);
 653        if (ret) {
 654                drm_gem_object_release(obj);
 655                return ret;
 656        }
 657        drm_gem_object_put(obj);
 658
 659        rc_blob->res_handle = bo->hw_res_handle;
 660        rc_blob->bo_handle = handle;
 661
 662        return 0;
 663}
 664
 665struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
 666        DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
 667                          DRM_RENDER_ALLOW),
 668
 669        DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
 670                          DRM_RENDER_ALLOW),
 671
 672        DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
 673                          DRM_RENDER_ALLOW),
 674
 675        DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
 676                          virtio_gpu_resource_create_ioctl,
 677                          DRM_RENDER_ALLOW),
 678
 679        DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
 680                          DRM_RENDER_ALLOW),
 681
 682        /* make transfer async to the main ring? - no sure, can we
 683         * thread these in the underlying GL
 684         */
 685        DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
 686                          virtio_gpu_transfer_from_host_ioctl,
 687                          DRM_RENDER_ALLOW),
 688        DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
 689                          virtio_gpu_transfer_to_host_ioctl,
 690                          DRM_RENDER_ALLOW),
 691
 692        DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
 693                          DRM_RENDER_ALLOW),
 694
 695        DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
 696                          DRM_RENDER_ALLOW),
 697
 698        DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
 699                          virtio_gpu_resource_create_blob_ioctl,
 700                          DRM_RENDER_ALLOW),
 701};
 702