1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/dma-mapping.h>
27#include <linux/moduleparam.h>
28
29#include "virtgpu_drv.h"
30
31static int virtio_gpu_virglrenderer_workaround = 1;
32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33
34static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
35 uint32_t *resid)
36{
37 if (virtio_gpu_virglrenderer_workaround) {
38
39
40
41
42
43
44
45
46 static atomic_t seqno = ATOMIC_INIT(0);
47 int handle = atomic_inc_return(&seqno);
48 *resid = handle + 1;
49 } else {
50 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
51 if (handle < 0)
52 return handle;
53 *resid = handle + 1;
54 }
55 return 0;
56}
57
58static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
59{
60 if (!virtio_gpu_virglrenderer_workaround) {
61 ida_free(&vgdev->resource_ida, id - 1);
62 }
63}
64
65void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
66{
67 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
68
69 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
70 if (virtio_gpu_is_shmem(bo)) {
71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
72
73 if (shmem->pages) {
74 if (shmem->mapped) {
75 dma_unmap_sg(vgdev->vdev->dev.parent,
76 shmem->pages->sgl, shmem->mapped,
77 DMA_TO_DEVICE);
78 shmem->mapped = 0;
79 }
80
81 sg_free_table(shmem->pages);
82 shmem->pages = NULL;
83 drm_gem_shmem_unpin(&bo->base.base);
84 }
85
86 drm_gem_shmem_free_object(&bo->base.base);
87 }
88}
89
90static void virtio_gpu_free_object(struct drm_gem_object *obj)
91{
92 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
93 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
94
95 if (bo->created) {
96 virtio_gpu_cmd_unref_resource(vgdev, bo);
97 virtio_gpu_notify(vgdev);
98
99 return;
100 }
101 virtio_gpu_cleanup_object(bo);
102}
103
104static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
105 .free = virtio_gpu_free_object,
106 .open = virtio_gpu_gem_object_open,
107 .close = virtio_gpu_gem_object_close,
108
109 .print_info = drm_gem_shmem_print_info,
110 .pin = drm_gem_shmem_pin,
111 .unpin = drm_gem_shmem_unpin,
112 .get_sg_table = drm_gem_shmem_get_sg_table,
113 .vmap = drm_gem_shmem_vmap,
114 .vunmap = drm_gem_shmem_vunmap,
115 .mmap = drm_gem_shmem_mmap,
116};
117
118bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
119{
120 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
121}
122
123struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
124 size_t size)
125{
126 struct virtio_gpu_object_shmem *shmem;
127 struct drm_gem_shmem_object *dshmem;
128
129 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
130 if (!shmem)
131 return NULL;
132
133 dshmem = &shmem->base.base;
134 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
135 dshmem->map_cached = true;
136 return &dshmem->base;
137}
138
139static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
140 struct virtio_gpu_object *bo,
141 struct virtio_gpu_mem_entry **ents,
142 unsigned int *nents)
143{
144 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
145 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
146 struct scatterlist *sg;
147 int si, ret;
148
149 ret = drm_gem_shmem_pin(&bo->base.base);
150 if (ret < 0)
151 return -EINVAL;
152
153 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
154 if (!shmem->pages) {
155 drm_gem_shmem_unpin(&bo->base.base);
156 return -EINVAL;
157 }
158
159 if (use_dma_api) {
160 shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
161 shmem->pages->sgl,
162 shmem->pages->nents,
163 DMA_TO_DEVICE);
164 *nents = shmem->mapped;
165 } else {
166 *nents = shmem->pages->nents;
167 }
168
169 *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
170 GFP_KERNEL);
171 if (!(*ents)) {
172 DRM_ERROR("failed to allocate ent list\n");
173 return -ENOMEM;
174 }
175
176 for_each_sg(shmem->pages->sgl, sg, *nents, si) {
177 (*ents)[si].addr = cpu_to_le64(use_dma_api
178 ? sg_dma_address(sg)
179 : sg_phys(sg));
180 (*ents)[si].length = cpu_to_le32(sg->length);
181 (*ents)[si].padding = 0;
182 }
183 return 0;
184}
185
186int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
187 struct virtio_gpu_object_params *params,
188 struct virtio_gpu_object **bo_ptr,
189 struct virtio_gpu_fence *fence)
190{
191 struct virtio_gpu_object_array *objs = NULL;
192 struct drm_gem_shmem_object *shmem_obj;
193 struct virtio_gpu_object *bo;
194 struct virtio_gpu_mem_entry *ents;
195 unsigned int nents;
196 int ret;
197
198 *bo_ptr = NULL;
199
200 params->size = roundup(params->size, PAGE_SIZE);
201 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
202 if (IS_ERR(shmem_obj))
203 return PTR_ERR(shmem_obj);
204 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
205
206 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
207 if (ret < 0)
208 goto err_free_gem;
209
210 bo->dumb = params->dumb;
211
212 if (fence) {
213 ret = -ENOMEM;
214 objs = virtio_gpu_array_alloc(1);
215 if (!objs)
216 goto err_put_id;
217 virtio_gpu_array_add_obj(objs, &bo->base.base);
218
219 ret = virtio_gpu_array_lock_resv(objs);
220 if (ret != 0)
221 goto err_put_objs;
222 }
223
224 if (params->virgl) {
225 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
226 objs, fence);
227 } else {
228 virtio_gpu_cmd_create_resource(vgdev, bo, params,
229 objs, fence);
230 }
231
232 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
233 if (ret != 0) {
234 virtio_gpu_free_object(&shmem_obj->base);
235 return ret;
236 }
237
238 virtio_gpu_object_attach(vgdev, bo, ents, nents);
239
240 *bo_ptr = bo;
241 return 0;
242
243err_put_objs:
244 virtio_gpu_array_put_free(objs);
245err_put_id:
246 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
247err_free_gem:
248 drm_gem_shmem_free_object(&shmem_obj->base);
249 return ret;
250}
251