1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drmP.h>
29#include <drm/virtgpu_drm.h>
30#include <drm/ttm/ttm_execbuf_util.h>
31#include <linux/sync_file.h>
32
33#include "virtgpu_drv.h"
34
35static void convert_to_hw_box(struct virtio_gpu_box *dst,
36 const struct drm_virtgpu_3d_box *src)
37{
38 dst->x = cpu_to_le32(src->x);
39 dst->y = cpu_to_le32(src->y);
40 dst->z = cpu_to_le32(src->z);
41 dst->w = cpu_to_le32(src->w);
42 dst->h = cpu_to_le32(src->h);
43 dst->d = cpu_to_le32(src->d);
44}
45
46static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
47 struct drm_file *file_priv)
48{
49 struct virtio_gpu_device *vgdev = dev->dev_private;
50 struct drm_virtgpu_map *virtio_gpu_map = data;
51
52 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
53 virtio_gpu_map->handle,
54 &virtio_gpu_map->offset);
55}
56
57static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
58 struct list_head *head)
59{
60 struct ttm_operation_ctx ctx = { false, false };
61 struct ttm_validate_buffer *buf;
62 struct ttm_buffer_object *bo;
63 struct virtio_gpu_object *qobj;
64 int ret;
65
66 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
67 if (ret != 0)
68 return ret;
69
70 list_for_each_entry(buf, head, head) {
71 bo = buf->bo;
72 qobj = container_of(bo, struct virtio_gpu_object, tbo);
73 ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
74 if (ret) {
75 ttm_eu_backoff_reservation(ticket, head);
76 return ret;
77 }
78 }
79 return 0;
80}
81
82static void virtio_gpu_unref_list(struct list_head *head)
83{
84 struct ttm_validate_buffer *buf;
85 struct ttm_buffer_object *bo;
86 struct virtio_gpu_object *qobj;
87
88 list_for_each_entry(buf, head, head) {
89 bo = buf->bo;
90 qobj = container_of(bo, struct virtio_gpu_object, tbo);
91
92 drm_gem_object_put_unlocked(&qobj->gem_base);
93 }
94}
95
96
97
98
99
100
101
102static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
103 struct drm_file *drm_file)
104{
105 struct drm_virtgpu_execbuffer *exbuf = data;
106 struct virtio_gpu_device *vgdev = dev->dev_private;
107 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
108 struct drm_gem_object *gobj;
109 struct virtio_gpu_fence *out_fence;
110 struct virtio_gpu_object *qobj;
111 int ret;
112 uint32_t *bo_handles = NULL;
113 void __user *user_bo_handles = NULL;
114 struct list_head validate_list;
115 struct ttm_validate_buffer *buflist = NULL;
116 int i;
117 struct ww_acquire_ctx ticket;
118 struct sync_file *sync_file;
119 int in_fence_fd = exbuf->fence_fd;
120 int out_fence_fd = -1;
121 void *buf;
122
123 if (vgdev->has_virgl_3d == false)
124 return -ENOSYS;
125
126 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
127 return -EINVAL;
128
129 exbuf->fence_fd = -1;
130
131 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
132 struct dma_fence *in_fence;
133
134 in_fence = sync_file_get_fence(in_fence_fd);
135
136 if (!in_fence)
137 return -EINVAL;
138
139
140
141
142
143 ret = 0;
144 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
145 ret = dma_fence_wait(in_fence, true);
146
147 dma_fence_put(in_fence);
148 if (ret)
149 return ret;
150 }
151
152 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
153 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
154 if (out_fence_fd < 0)
155 return out_fence_fd;
156 }
157
158 INIT_LIST_HEAD(&validate_list);
159 if (exbuf->num_bo_handles) {
160
161 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
162 sizeof(uint32_t), GFP_KERNEL);
163 buflist = kvmalloc_array(exbuf->num_bo_handles,
164 sizeof(struct ttm_validate_buffer),
165 GFP_KERNEL | __GFP_ZERO);
166 if (!bo_handles || !buflist) {
167 ret = -ENOMEM;
168 goto out_unused_fd;
169 }
170
171 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
172 if (copy_from_user(bo_handles, user_bo_handles,
173 exbuf->num_bo_handles * sizeof(uint32_t))) {
174 ret = -EFAULT;
175 goto out_unused_fd;
176 }
177
178 for (i = 0; i < exbuf->num_bo_handles; i++) {
179 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
180 if (!gobj) {
181 ret = -ENOENT;
182 goto out_unused_fd;
183 }
184
185 qobj = gem_to_virtio_gpu_obj(gobj);
186 buflist[i].bo = &qobj->tbo;
187
188 list_add(&buflist[i].head, &validate_list);
189 }
190 kvfree(bo_handles);
191 bo_handles = NULL;
192 }
193
194 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
195 if (ret)
196 goto out_free;
197
198 buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
199 exbuf->size);
200 if (IS_ERR(buf)) {
201 ret = PTR_ERR(buf);
202 goto out_unresv;
203 }
204
205 out_fence = virtio_gpu_fence_alloc(vgdev);
206 if(!out_fence) {
207 ret = -ENOMEM;
208 goto out_memdup;
209 }
210
211 if (out_fence_fd >= 0) {
212 sync_file = sync_file_create(&out_fence->f);
213 if (!sync_file) {
214 dma_fence_put(&out_fence->f);
215 ret = -ENOMEM;
216 goto out_memdup;
217 }
218
219 exbuf->fence_fd = out_fence_fd;
220 fd_install(out_fence_fd, sync_file->file);
221 }
222
223 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
224 vfpriv->ctx_id, out_fence);
225
226 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
227
228
229 virtio_gpu_unref_list(&validate_list);
230 kvfree(buflist);
231 return 0;
232
233out_memdup:
234 kfree(buf);
235out_unresv:
236 ttm_eu_backoff_reservation(&ticket, &validate_list);
237out_free:
238 virtio_gpu_unref_list(&validate_list);
239out_unused_fd:
240 kvfree(bo_handles);
241 kvfree(buflist);
242
243 if (out_fence_fd >= 0)
244 put_unused_fd(out_fence_fd);
245
246 return ret;
247}
248
249static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
250 struct drm_file *file_priv)
251{
252 struct virtio_gpu_device *vgdev = dev->dev_private;
253 struct drm_virtgpu_getparam *param = data;
254 int value;
255
256 switch (param->param) {
257 case VIRTGPU_PARAM_3D_FEATURES:
258 value = vgdev->has_virgl_3d == true ? 1 : 0;
259 break;
260 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
261 value = 1;
262 break;
263 default:
264 return -EINVAL;
265 }
266 if (copy_to_user((void __user *)(unsigned long)param->value,
267 &value, sizeof(int))) {
268 return -EFAULT;
269 }
270 return 0;
271}
272
273static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
274 struct drm_file *file_priv)
275{
276 struct virtio_gpu_device *vgdev = dev->dev_private;
277 struct drm_virtgpu_resource_create *rc = data;
278 int ret;
279 struct virtio_gpu_object *qobj;
280 struct drm_gem_object *obj;
281 uint32_t handle = 0;
282 uint32_t size;
283 struct list_head validate_list;
284 struct ttm_validate_buffer mainbuf;
285 struct virtio_gpu_fence *fence = NULL;
286 struct ww_acquire_ctx ticket;
287 struct virtio_gpu_resource_create_3d rc_3d;
288
289 if (vgdev->has_virgl_3d == false) {
290 if (rc->depth > 1)
291 return -EINVAL;
292 if (rc->nr_samples > 1)
293 return -EINVAL;
294 if (rc->last_level > 1)
295 return -EINVAL;
296 if (rc->target != 2)
297 return -EINVAL;
298 if (rc->array_size > 1)
299 return -EINVAL;
300 }
301
302 INIT_LIST_HEAD(&validate_list);
303 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
304
305 size = rc->size;
306
307
308 if (size == 0)
309 size = PAGE_SIZE;
310
311 qobj = virtio_gpu_alloc_object(dev, size, false, false);
312 if (IS_ERR(qobj))
313 return PTR_ERR(qobj);
314 obj = &qobj->gem_base;
315
316 if (!vgdev->has_virgl_3d) {
317 virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
318 rc->width, rc->height);
319
320 ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
321 } else {
322
323 drm_gem_object_get(&qobj->gem_base);
324 mainbuf.bo = &qobj->tbo;
325 list_add(&mainbuf.head, &validate_list);
326
327 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
328 if (ret) {
329 DRM_DEBUG("failed to validate\n");
330 goto fail_unref;
331 }
332
333 rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
334 rc_3d.target = cpu_to_le32(rc->target);
335 rc_3d.format = cpu_to_le32(rc->format);
336 rc_3d.bind = cpu_to_le32(rc->bind);
337 rc_3d.width = cpu_to_le32(rc->width);
338 rc_3d.height = cpu_to_le32(rc->height);
339 rc_3d.depth = cpu_to_le32(rc->depth);
340 rc_3d.array_size = cpu_to_le32(rc->array_size);
341 rc_3d.last_level = cpu_to_le32(rc->last_level);
342 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
343 rc_3d.flags = cpu_to_le32(rc->flags);
344
345 fence = virtio_gpu_fence_alloc(vgdev);
346 if (!fence) {
347 ret = -ENOMEM;
348 goto fail_backoff;
349 }
350
351 virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
352 ret = virtio_gpu_object_attach(vgdev, qobj, fence);
353 if (ret) {
354 virtio_gpu_fence_cleanup(fence);
355 goto fail_backoff;
356 }
357 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
358 }
359
360 ret = drm_gem_handle_create(file_priv, obj, &handle);
361 if (ret) {
362
363 drm_gem_object_release(obj);
364 if (vgdev->has_virgl_3d) {
365 virtio_gpu_unref_list(&validate_list);
366 dma_fence_put(&fence->f);
367 }
368 return ret;
369 }
370 drm_gem_object_put_unlocked(obj);
371
372 rc->res_handle = qobj->hw_res_handle;
373 rc->bo_handle = handle;
374
375 if (vgdev->has_virgl_3d) {
376 virtio_gpu_unref_list(&validate_list);
377 dma_fence_put(&fence->f);
378 }
379 return 0;
380fail_backoff:
381 ttm_eu_backoff_reservation(&ticket, &validate_list);
382fail_unref:
383 if (vgdev->has_virgl_3d) {
384 virtio_gpu_unref_list(&validate_list);
385 dma_fence_put(&fence->f);
386 }
387
388
389 return ret;
390}
391
392static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *file_priv)
394{
395 struct drm_virtgpu_resource_info *ri = data;
396 struct drm_gem_object *gobj = NULL;
397 struct virtio_gpu_object *qobj = NULL;
398
399 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
400 if (gobj == NULL)
401 return -ENOENT;
402
403 qobj = gem_to_virtio_gpu_obj(gobj);
404
405 ri->size = qobj->gem_base.size;
406 ri->res_handle = qobj->hw_res_handle;
407 drm_gem_object_put_unlocked(gobj);
408 return 0;
409}
410
411static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
412 void *data,
413 struct drm_file *file)
414{
415 struct virtio_gpu_device *vgdev = dev->dev_private;
416 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
417 struct drm_virtgpu_3d_transfer_from_host *args = data;
418 struct ttm_operation_ctx ctx = { true, false };
419 struct drm_gem_object *gobj = NULL;
420 struct virtio_gpu_object *qobj = NULL;
421 struct virtio_gpu_fence *fence;
422 int ret;
423 u32 offset = args->offset;
424 struct virtio_gpu_box box;
425
426 if (vgdev->has_virgl_3d == false)
427 return -ENOSYS;
428
429 gobj = drm_gem_object_lookup(file, args->bo_handle);
430 if (gobj == NULL)
431 return -ENOENT;
432
433 qobj = gem_to_virtio_gpu_obj(gobj);
434
435 ret = virtio_gpu_object_reserve(qobj, false);
436 if (ret)
437 goto out;
438
439 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
440 if (unlikely(ret))
441 goto out_unres;
442
443 convert_to_hw_box(&box, &args->box);
444
445 fence = virtio_gpu_fence_alloc(vgdev);
446 if (!fence) {
447 ret = -ENOMEM;
448 goto out_unres;
449 }
450 virtio_gpu_cmd_transfer_from_host_3d
451 (vgdev, qobj->hw_res_handle,
452 vfpriv->ctx_id, offset, args->level,
453 &box, fence);
454 reservation_object_add_excl_fence(qobj->tbo.resv,
455 &fence->f);
456
457 dma_fence_put(&fence->f);
458out_unres:
459 virtio_gpu_object_unreserve(qobj);
460out:
461 drm_gem_object_put_unlocked(gobj);
462 return ret;
463}
464
465static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
466 struct drm_file *file)
467{
468 struct virtio_gpu_device *vgdev = dev->dev_private;
469 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
470 struct drm_virtgpu_3d_transfer_to_host *args = data;
471 struct ttm_operation_ctx ctx = { true, false };
472 struct drm_gem_object *gobj = NULL;
473 struct virtio_gpu_object *qobj = NULL;
474 struct virtio_gpu_fence *fence;
475 struct virtio_gpu_box box;
476 int ret;
477 u32 offset = args->offset;
478
479 gobj = drm_gem_object_lookup(file, args->bo_handle);
480 if (gobj == NULL)
481 return -ENOENT;
482
483 qobj = gem_to_virtio_gpu_obj(gobj);
484
485 ret = virtio_gpu_object_reserve(qobj, false);
486 if (ret)
487 goto out;
488
489 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
490 if (unlikely(ret))
491 goto out_unres;
492
493 convert_to_hw_box(&box, &args->box);
494 if (!vgdev->has_virgl_3d) {
495 virtio_gpu_cmd_transfer_to_host_2d
496 (vgdev, qobj, offset,
497 box.w, box.h, box.x, box.y, NULL);
498 } else {
499 fence = virtio_gpu_fence_alloc(vgdev);
500 if (!fence) {
501 ret = -ENOMEM;
502 goto out_unres;
503 }
504 virtio_gpu_cmd_transfer_to_host_3d
505 (vgdev, qobj,
506 vfpriv ? vfpriv->ctx_id : 0, offset,
507 args->level, &box, fence);
508 reservation_object_add_excl_fence(qobj->tbo.resv,
509 &fence->f);
510 dma_fence_put(&fence->f);
511 }
512
513out_unres:
514 virtio_gpu_object_unreserve(qobj);
515out:
516 drm_gem_object_put_unlocked(gobj);
517 return ret;
518}
519
520static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
521 struct drm_file *file)
522{
523 struct drm_virtgpu_3d_wait *args = data;
524 struct drm_gem_object *gobj = NULL;
525 struct virtio_gpu_object *qobj = NULL;
526 int ret;
527 bool nowait = false;
528
529 gobj = drm_gem_object_lookup(file, args->handle);
530 if (gobj == NULL)
531 return -ENOENT;
532
533 qobj = gem_to_virtio_gpu_obj(gobj);
534
535 if (args->flags & VIRTGPU_WAIT_NOWAIT)
536 nowait = true;
537 ret = virtio_gpu_object_wait(qobj, nowait);
538
539 drm_gem_object_put_unlocked(gobj);
540 return ret;
541}
542
543static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
544 void *data, struct drm_file *file)
545{
546 struct virtio_gpu_device *vgdev = dev->dev_private;
547 struct drm_virtgpu_get_caps *args = data;
548 unsigned size, host_caps_size;
549 int i;
550 int found_valid = -1;
551 int ret;
552 struct virtio_gpu_drv_cap_cache *cache_ent;
553 void *ptr;
554
555 if (vgdev->num_capsets == 0)
556 return -ENOSYS;
557
558
559 if (args->size == 0)
560 return -EINVAL;
561
562 spin_lock(&vgdev->display_info_lock);
563 for (i = 0; i < vgdev->num_capsets; i++) {
564 if (vgdev->capsets[i].id == args->cap_set_id) {
565 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
566 found_valid = i;
567 break;
568 }
569 }
570 }
571
572 if (found_valid == -1) {
573 spin_unlock(&vgdev->display_info_lock);
574 return -EINVAL;
575 }
576
577 host_caps_size = vgdev->capsets[found_valid].max_size;
578
579 size = min(args->size, host_caps_size);
580
581 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
582 if (cache_ent->id == args->cap_set_id &&
583 cache_ent->version == args->cap_set_ver) {
584 ptr = cache_ent->caps_cache;
585 spin_unlock(&vgdev->display_info_lock);
586 goto copy_exit;
587 }
588 }
589 spin_unlock(&vgdev->display_info_lock);
590
591
592 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
593 &cache_ent);
594
595 ret = wait_event_timeout(vgdev->resp_wq,
596 atomic_read(&cache_ent->is_valid), 5 * HZ);
597 if (!ret)
598 return -EBUSY;
599
600 ptr = cache_ent->caps_cache;
601
602copy_exit:
603 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
604 return -EFAULT;
605
606 return 0;
607}
608
609struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
610 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
611 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
612
613 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
614 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
615
616 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
617 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
618
619 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
620 virtio_gpu_resource_create_ioctl,
621 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
622
623 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
624 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
625
626
627
628
629 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
630 virtio_gpu_transfer_from_host_ioctl,
631 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
632 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
633 virtio_gpu_transfer_to_host_ioctl,
634 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
635
636 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
637 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
638
639 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
640 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
641};
642