1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drm_prime.h>
26#include <linux/virtio_dma_buf.h>
27
28#include "virtgpu_drv.h"
29
30static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
31 uuid_t *uuid)
32{
33 struct drm_gem_object *obj = buf->priv;
34 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
35 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
36
37 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
38 if (bo->uuid_state != STATE_OK)
39 return -ENODEV;
40
41 uuid_copy(uuid, &bo->uuid);
42
43 return 0;
44}
45
46static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
47 .ops = {
48 .cache_sgt_mapping = true,
49 .attach = virtio_dma_buf_attach,
50 .detach = drm_gem_map_detach,
51 .map_dma_buf = drm_gem_map_dma_buf,
52 .unmap_dma_buf = drm_gem_unmap_dma_buf,
53 .release = drm_gem_dmabuf_release,
54 .mmap = drm_gem_dmabuf_mmap,
55 .vmap = drm_gem_dmabuf_vmap,
56 .vunmap = drm_gem_dmabuf_vunmap,
57 },
58 .device_attach = drm_gem_map_attach,
59 .get_uuid = virtgpu_virtio_get_uuid,
60};
61
62int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
63 struct virtio_gpu_object *bo)
64{
65 int ret;
66 struct virtio_gpu_object_array *objs;
67
68 objs = virtio_gpu_array_alloc(1);
69 if (!objs)
70 return -ENOMEM;
71
72 virtio_gpu_array_add_obj(objs, &bo->base.base);
73 ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
74 if (ret)
75 return ret;
76
77 return 0;
78}
79
80struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
81 int flags)
82{
83 struct dma_buf *buf;
84 struct drm_device *dev = obj->dev;
85 struct virtio_gpu_device *vgdev = dev->dev_private;
86 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
87 int ret = 0;
88 bool blob = bo->host3d_blob || bo->guest_blob;
89 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
90
91 if (!blob) {
92 if (vgdev->has_resource_assign_uuid) {
93 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
94 if (ret)
95 return ERR_PTR(ret);
96
97 virtio_gpu_notify(vgdev);
98 } else {
99 bo->uuid_state = STATE_ERR;
100 }
101 } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
102 bo->uuid_state = STATE_ERR;
103 }
104
105 exp_info.ops = &virtgpu_dmabuf_ops.ops;
106 exp_info.size = obj->size;
107 exp_info.flags = flags;
108 exp_info.priv = obj;
109 exp_info.resv = obj->resv;
110
111 buf = virtio_dma_buf_export(&exp_info);
112 if (IS_ERR(buf))
113 return buf;
114
115 drm_dev_get(dev);
116 drm_gem_object_get(obj);
117
118 return buf;
119}
120
121struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
122 struct dma_buf *buf)
123{
124 struct drm_gem_object *obj;
125
126 if (buf->ops == &virtgpu_dmabuf_ops.ops) {
127 obj = buf->priv;
128 if (obj->dev == dev) {
129
130
131
132
133 drm_gem_object_get(obj);
134 return obj;
135 }
136 }
137
138 return drm_gem_prime_import(dev, buf);
139}
140
141struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
142 struct drm_device *dev, struct dma_buf_attachment *attach,
143 struct sg_table *table)
144{
145 return ERR_PTR(-ENODEV);
146}
147