linux/drivers/gpu/drm/virtio/virtgpu_prime.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Canonical
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Andreas Pokorny
  23 */
  24
  25#include <drm/drm_prime.h>
  26#include <linux/virtio_dma_buf.h>
  27
  28#include "virtgpu_drv.h"
  29
  30static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
  31                                   uuid_t *uuid)
  32{
  33        struct drm_gem_object *obj = buf->priv;
  34        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  35        struct virtio_gpu_device *vgdev = obj->dev->dev_private;
  36
  37        wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
  38        if (bo->uuid_state != STATE_OK)
  39                return -ENODEV;
  40
  41        uuid_copy(uuid, &bo->uuid);
  42
  43        return 0;
  44}
  45
  46static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
  47        .ops = {
  48                .cache_sgt_mapping = true,
  49                .attach = virtio_dma_buf_attach,
  50                .detach = drm_gem_map_detach,
  51                .map_dma_buf = drm_gem_map_dma_buf,
  52                .unmap_dma_buf = drm_gem_unmap_dma_buf,
  53                .release = drm_gem_dmabuf_release,
  54                .mmap = drm_gem_dmabuf_mmap,
  55                .vmap = drm_gem_dmabuf_vmap,
  56                .vunmap = drm_gem_dmabuf_vunmap,
  57        },
  58        .device_attach = drm_gem_map_attach,
  59        .get_uuid = virtgpu_virtio_get_uuid,
  60};
  61
  62int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
  63                                    struct virtio_gpu_object *bo)
  64{
  65        int ret;
  66        struct virtio_gpu_object_array *objs;
  67
  68        objs = virtio_gpu_array_alloc(1);
  69        if (!objs)
  70                return -ENOMEM;
  71
  72        virtio_gpu_array_add_obj(objs, &bo->base.base);
  73        ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
  74        if (ret)
  75                return ret;
  76
  77        return 0;
  78}
  79
  80struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
  81                                         int flags)
  82{
  83        struct dma_buf *buf;
  84        struct drm_device *dev = obj->dev;
  85        struct virtio_gpu_device *vgdev = dev->dev_private;
  86        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  87        int ret = 0;
  88        bool blob = bo->host3d_blob || bo->guest_blob;
  89        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
  90
  91        if (!blob) {
  92                if (vgdev->has_resource_assign_uuid) {
  93                        ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
  94                        if (ret)
  95                                return ERR_PTR(ret);
  96
  97                        virtio_gpu_notify(vgdev);
  98                } else {
  99                        bo->uuid_state = STATE_ERR;
 100                }
 101        } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
 102                bo->uuid_state = STATE_ERR;
 103        }
 104
 105        exp_info.ops = &virtgpu_dmabuf_ops.ops;
 106        exp_info.size = obj->size;
 107        exp_info.flags = flags;
 108        exp_info.priv = obj;
 109        exp_info.resv = obj->resv;
 110
 111        buf = virtio_dma_buf_export(&exp_info);
 112        if (IS_ERR(buf))
 113                return buf;
 114
 115        drm_dev_get(dev);
 116        drm_gem_object_get(obj);
 117
 118        return buf;
 119}
 120
 121struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
 122                                                struct dma_buf *buf)
 123{
 124        struct drm_gem_object *obj;
 125
 126        if (buf->ops == &virtgpu_dmabuf_ops.ops) {
 127                obj = buf->priv;
 128                if (obj->dev == dev) {
 129                        /*
 130                         * Importing dmabuf exported from our own gem increases
 131                         * refcount on gem itself instead of f_count of dmabuf.
 132                         */
 133                        drm_gem_object_get(obj);
 134                        return obj;
 135                }
 136        }
 137
 138        return drm_gem_prime_import(dev, buf);
 139}
 140
 141struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 142        struct drm_device *dev, struct dma_buf_attachment *attach,
 143        struct sg_table *table)
 144{
 145        return ERR_PTR(-ENODEV);
 146}
 147