1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/drmP.h>
30#include "virtgpu_drv.h"
31#include "virtgpu_trace.h"
32#include <linux/virtio.h>
33#include <linux/virtio_config.h>
34#include <linux/virtio_ring.h>
35
36#define MAX_INLINE_CMD_SIZE 96
37#define MAX_INLINE_RESP_SIZE 24
38#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
39 + MAX_INLINE_CMD_SIZE \
40 + MAX_INLINE_RESP_SIZE)
41
42void virtio_gpu_ctrl_ack(struct virtqueue *vq)
43{
44 struct drm_device *dev = vq->vdev->priv;
45 struct virtio_gpu_device *vgdev = dev->dev_private;
46
47 schedule_work(&vgdev->ctrlq.dequeue_work);
48}
49
50void virtio_gpu_cursor_ack(struct virtqueue *vq)
51{
52 struct drm_device *dev = vq->vdev->priv;
53 struct virtio_gpu_device *vgdev = dev->dev_private;
54
55 schedule_work(&vgdev->cursorq.dequeue_work);
56}
57
58int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
59{
60 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
61 VBUFFER_SIZE,
62 __alignof__(struct virtio_gpu_vbuffer),
63 0, NULL);
64 if (!vgdev->vbufs)
65 return -ENOMEM;
66 return 0;
67}
68
69void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
70{
71 kmem_cache_destroy(vgdev->vbufs);
72 vgdev->vbufs = NULL;
73}
74
75static struct virtio_gpu_vbuffer*
76virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
77 int size, int resp_size, void *resp_buf,
78 virtio_gpu_resp_cb resp_cb)
79{
80 struct virtio_gpu_vbuffer *vbuf;
81
82 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
83 if (!vbuf)
84 return ERR_PTR(-ENOMEM);
85
86 BUG_ON(size > MAX_INLINE_CMD_SIZE);
87 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
88 vbuf->size = size;
89
90 vbuf->resp_cb = resp_cb;
91 vbuf->resp_size = resp_size;
92 if (resp_size <= MAX_INLINE_RESP_SIZE)
93 vbuf->resp_buf = (void *)vbuf->buf + size;
94 else
95 vbuf->resp_buf = resp_buf;
96 BUG_ON(!vbuf->resp_buf);
97 return vbuf;
98}
99
100static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
101 struct virtio_gpu_vbuffer **vbuffer_p,
102 int size)
103{
104 struct virtio_gpu_vbuffer *vbuf;
105
106 vbuf = virtio_gpu_get_vbuf(vgdev, size,
107 sizeof(struct virtio_gpu_ctrl_hdr),
108 NULL, NULL);
109 if (IS_ERR(vbuf)) {
110 *vbuffer_p = NULL;
111 return ERR_CAST(vbuf);
112 }
113 *vbuffer_p = vbuf;
114 return vbuf->buf;
115}
116
117static struct virtio_gpu_update_cursor*
118virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
119 struct virtio_gpu_vbuffer **vbuffer_p)
120{
121 struct virtio_gpu_vbuffer *vbuf;
122
123 vbuf = virtio_gpu_get_vbuf
124 (vgdev, sizeof(struct virtio_gpu_update_cursor),
125 0, NULL, NULL);
126 if (IS_ERR(vbuf)) {
127 *vbuffer_p = NULL;
128 return ERR_CAST(vbuf);
129 }
130 *vbuffer_p = vbuf;
131 return (struct virtio_gpu_update_cursor *)vbuf->buf;
132}
133
134static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
135 virtio_gpu_resp_cb cb,
136 struct virtio_gpu_vbuffer **vbuffer_p,
137 int cmd_size, int resp_size,
138 void *resp_buf)
139{
140 struct virtio_gpu_vbuffer *vbuf;
141
142 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
143 resp_size, resp_buf, cb);
144 if (IS_ERR(vbuf)) {
145 *vbuffer_p = NULL;
146 return ERR_CAST(vbuf);
147 }
148 *vbuffer_p = vbuf;
149 return (struct virtio_gpu_command *)vbuf->buf;
150}
151
152static void free_vbuf(struct virtio_gpu_device *vgdev,
153 struct virtio_gpu_vbuffer *vbuf)
154{
155 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
156 kfree(vbuf->resp_buf);
157 kfree(vbuf->data_buf);
158 kmem_cache_free(vgdev->vbufs, vbuf);
159}
160
161static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
162{
163 struct virtio_gpu_vbuffer *vbuf;
164 unsigned int len;
165 int freed = 0;
166
167 while ((vbuf = virtqueue_get_buf(vq, &len))) {
168 list_add_tail(&vbuf->list, reclaim_list);
169 freed++;
170 }
171 if (freed == 0)
172 DRM_DEBUG("Huh? zero vbufs reclaimed");
173}
174
175void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
176{
177 struct virtio_gpu_device *vgdev =
178 container_of(work, struct virtio_gpu_device,
179 ctrlq.dequeue_work);
180 struct list_head reclaim_list;
181 struct virtio_gpu_vbuffer *entry, *tmp;
182 struct virtio_gpu_ctrl_hdr *resp;
183 u64 fence_id = 0;
184
185 INIT_LIST_HEAD(&reclaim_list);
186 spin_lock(&vgdev->ctrlq.qlock);
187 do {
188 virtqueue_disable_cb(vgdev->ctrlq.vq);
189 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
190
191 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
192 spin_unlock(&vgdev->ctrlq.qlock);
193
194 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
195 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
196
197 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
198
199 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
200 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
201 struct virtio_gpu_ctrl_hdr *cmd;
202 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
203 DRM_ERROR("response 0x%x (command 0x%x)\n",
204 le32_to_cpu(resp->type),
205 le32_to_cpu(cmd->type));
206 } else
207 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
208 }
209 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
210 u64 f = le64_to_cpu(resp->fence_id);
211
212 if (fence_id > f) {
213 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
214 __func__, fence_id, f);
215 } else {
216 fence_id = f;
217 }
218 }
219 if (entry->resp_cb)
220 entry->resp_cb(vgdev, entry);
221
222 list_del(&entry->list);
223 free_vbuf(vgdev, entry);
224 }
225 wake_up(&vgdev->ctrlq.ack_queue);
226
227 if (fence_id)
228 virtio_gpu_fence_event_process(vgdev, fence_id);
229}
230
231void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
232{
233 struct virtio_gpu_device *vgdev =
234 container_of(work, struct virtio_gpu_device,
235 cursorq.dequeue_work);
236 struct list_head reclaim_list;
237 struct virtio_gpu_vbuffer *entry, *tmp;
238
239 INIT_LIST_HEAD(&reclaim_list);
240 spin_lock(&vgdev->cursorq.qlock);
241 do {
242 virtqueue_disable_cb(vgdev->cursorq.vq);
243 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
244 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
245 spin_unlock(&vgdev->cursorq.qlock);
246
247 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
248 list_del(&entry->list);
249 free_vbuf(vgdev, entry);
250 }
251 wake_up(&vgdev->cursorq.ack_queue);
252}
253
254static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
255 struct virtio_gpu_vbuffer *vbuf)
256 __releases(&vgdev->ctrlq.qlock)
257 __acquires(&vgdev->ctrlq.qlock)
258{
259 struct virtqueue *vq = vgdev->ctrlq.vq;
260 struct scatterlist *sgs[3], vcmd, vout, vresp;
261 int outcnt = 0, incnt = 0;
262 int ret;
263
264 if (!vgdev->vqs_ready)
265 return -ENODEV;
266
267 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
268 sgs[outcnt + incnt] = &vcmd;
269 outcnt++;
270
271 if (vbuf->data_size) {
272 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
273 sgs[outcnt + incnt] = &vout;
274 outcnt++;
275 }
276
277 if (vbuf->resp_size) {
278 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
279 sgs[outcnt + incnt] = &vresp;
280 incnt++;
281 }
282
283retry:
284 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
285 if (ret == -ENOSPC) {
286 spin_unlock(&vgdev->ctrlq.qlock);
287 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
288 spin_lock(&vgdev->ctrlq.qlock);
289 goto retry;
290 } else {
291 trace_virtio_gpu_cmd_queue(vq,
292 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
293
294 virtqueue_kick(vq);
295 }
296
297 if (!ret)
298 ret = vq->num_free;
299 return ret;
300}
301
302static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
303 struct virtio_gpu_vbuffer *vbuf)
304{
305 int rc;
306
307 spin_lock(&vgdev->ctrlq.qlock);
308 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
309 spin_unlock(&vgdev->ctrlq.qlock);
310 return rc;
311}
312
313static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
314 struct virtio_gpu_vbuffer *vbuf,
315 struct virtio_gpu_ctrl_hdr *hdr,
316 struct virtio_gpu_fence *fence)
317{
318 struct virtqueue *vq = vgdev->ctrlq.vq;
319 int rc;
320
321again:
322 spin_lock(&vgdev->ctrlq.qlock);
323
324
325
326
327
328
329
330
331
332 if (vq->num_free < 3) {
333 spin_unlock(&vgdev->ctrlq.qlock);
334 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
335 goto again;
336 }
337
338 if (fence)
339 virtio_gpu_fence_emit(vgdev, hdr, fence);
340 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
341 spin_unlock(&vgdev->ctrlq.qlock);
342 return rc;
343}
344
345static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
346 struct virtio_gpu_vbuffer *vbuf)
347{
348 struct virtqueue *vq = vgdev->cursorq.vq;
349 struct scatterlist *sgs[1], ccmd;
350 int ret;
351 int outcnt;
352
353 if (!vgdev->vqs_ready)
354 return -ENODEV;
355
356 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
357 sgs[0] = &ccmd;
358 outcnt = 1;
359
360 spin_lock(&vgdev->cursorq.qlock);
361retry:
362 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
363 if (ret == -ENOSPC) {
364 spin_unlock(&vgdev->cursorq.qlock);
365 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
366 spin_lock(&vgdev->cursorq.qlock);
367 goto retry;
368 } else {
369 trace_virtio_gpu_cmd_queue(vq,
370 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
371
372 virtqueue_kick(vq);
373 }
374
375 spin_unlock(&vgdev->cursorq.qlock);
376
377 if (!ret)
378 ret = vq->num_free;
379 return ret;
380}
381
382
383
384
385
386
387void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
388 struct virtio_gpu_object *bo,
389 struct virtio_gpu_object_params *params,
390 struct virtio_gpu_fence *fence)
391{
392 struct virtio_gpu_resource_create_2d *cmd_p;
393 struct virtio_gpu_vbuffer *vbuf;
394
395 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
396 memset(cmd_p, 0, sizeof(*cmd_p));
397
398 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
399 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
400 cmd_p->format = cpu_to_le32(params->format);
401 cmd_p->width = cpu_to_le32(params->width);
402 cmd_p->height = cpu_to_le32(params->height);
403
404 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
405 bo->created = true;
406}
407
408void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
409 uint32_t resource_id)
410{
411 struct virtio_gpu_resource_unref *cmd_p;
412 struct virtio_gpu_vbuffer *vbuf;
413
414 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
415 memset(cmd_p, 0, sizeof(*cmd_p));
416
417 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
418 cmd_p->resource_id = cpu_to_le32(resource_id);
419
420 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
421}
422
423static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
424 uint32_t resource_id,
425 struct virtio_gpu_fence *fence)
426{
427 struct virtio_gpu_resource_detach_backing *cmd_p;
428 struct virtio_gpu_vbuffer *vbuf;
429
430 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
431 memset(cmd_p, 0, sizeof(*cmd_p));
432
433 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
434 cmd_p->resource_id = cpu_to_le32(resource_id);
435
436 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
437}
438
439void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
440 uint32_t scanout_id, uint32_t resource_id,
441 uint32_t width, uint32_t height,
442 uint32_t x, uint32_t y)
443{
444 struct virtio_gpu_set_scanout *cmd_p;
445 struct virtio_gpu_vbuffer *vbuf;
446
447 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
448 memset(cmd_p, 0, sizeof(*cmd_p));
449
450 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
451 cmd_p->resource_id = cpu_to_le32(resource_id);
452 cmd_p->scanout_id = cpu_to_le32(scanout_id);
453 cmd_p->r.width = cpu_to_le32(width);
454 cmd_p->r.height = cpu_to_le32(height);
455 cmd_p->r.x = cpu_to_le32(x);
456 cmd_p->r.y = cpu_to_le32(y);
457
458 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
459}
460
461void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
462 uint32_t resource_id,
463 uint32_t x, uint32_t y,
464 uint32_t width, uint32_t height)
465{
466 struct virtio_gpu_resource_flush *cmd_p;
467 struct virtio_gpu_vbuffer *vbuf;
468
469 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
470 memset(cmd_p, 0, sizeof(*cmd_p));
471
472 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
473 cmd_p->resource_id = cpu_to_le32(resource_id);
474 cmd_p->r.width = cpu_to_le32(width);
475 cmd_p->r.height = cpu_to_le32(height);
476 cmd_p->r.x = cpu_to_le32(x);
477 cmd_p->r.y = cpu_to_le32(y);
478
479 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
480}
481
482void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
483 struct virtio_gpu_object *bo,
484 uint64_t offset,
485 __le32 width, __le32 height,
486 __le32 x, __le32 y,
487 struct virtio_gpu_fence *fence)
488{
489 struct virtio_gpu_transfer_to_host_2d *cmd_p;
490 struct virtio_gpu_vbuffer *vbuf;
491 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
492
493 if (use_dma_api)
494 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
495 bo->pages->sgl, bo->pages->nents,
496 DMA_TO_DEVICE);
497
498 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
499 memset(cmd_p, 0, sizeof(*cmd_p));
500
501 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
502 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
503 cmd_p->offset = cpu_to_le64(offset);
504 cmd_p->r.width = width;
505 cmd_p->r.height = height;
506 cmd_p->r.x = x;
507 cmd_p->r.y = y;
508
509 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
510}
511
512static void
513virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
514 uint32_t resource_id,
515 struct virtio_gpu_mem_entry *ents,
516 uint32_t nents,
517 struct virtio_gpu_fence *fence)
518{
519 struct virtio_gpu_resource_attach_backing *cmd_p;
520 struct virtio_gpu_vbuffer *vbuf;
521
522 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
523 memset(cmd_p, 0, sizeof(*cmd_p));
524
525 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
526 cmd_p->resource_id = cpu_to_le32(resource_id);
527 cmd_p->nr_entries = cpu_to_le32(nents);
528
529 vbuf->data_buf = ents;
530 vbuf->data_size = sizeof(*ents) * nents;
531
532 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
533}
534
535static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
536 struct virtio_gpu_vbuffer *vbuf)
537{
538 struct virtio_gpu_resp_display_info *resp =
539 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
540 int i;
541
542 spin_lock(&vgdev->display_info_lock);
543 for (i = 0; i < vgdev->num_scanouts; i++) {
544 vgdev->outputs[i].info = resp->pmodes[i];
545 if (resp->pmodes[i].enabled) {
546 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
547 le32_to_cpu(resp->pmodes[i].r.width),
548 le32_to_cpu(resp->pmodes[i].r.height),
549 le32_to_cpu(resp->pmodes[i].r.x),
550 le32_to_cpu(resp->pmodes[i].r.y));
551 } else {
552 DRM_DEBUG("output %d: disabled", i);
553 }
554 }
555
556 vgdev->display_info_pending = false;
557 spin_unlock(&vgdev->display_info_lock);
558 wake_up(&vgdev->resp_wq);
559
560 if (!drm_helper_hpd_irq_event(vgdev->ddev))
561 drm_kms_helper_hotplug_event(vgdev->ddev);
562}
563
564static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
565 struct virtio_gpu_vbuffer *vbuf)
566{
567 struct virtio_gpu_get_capset_info *cmd =
568 (struct virtio_gpu_get_capset_info *)vbuf->buf;
569 struct virtio_gpu_resp_capset_info *resp =
570 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
571 int i = le32_to_cpu(cmd->capset_index);
572
573 spin_lock(&vgdev->display_info_lock);
574 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
575 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
576 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
577 spin_unlock(&vgdev->display_info_lock);
578 wake_up(&vgdev->resp_wq);
579}
580
581static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
582 struct virtio_gpu_vbuffer *vbuf)
583{
584 struct virtio_gpu_get_capset *cmd =
585 (struct virtio_gpu_get_capset *)vbuf->buf;
586 struct virtio_gpu_resp_capset *resp =
587 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
588 struct virtio_gpu_drv_cap_cache *cache_ent;
589
590 spin_lock(&vgdev->display_info_lock);
591 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
592 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
593 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
594 memcpy(cache_ent->caps_cache, resp->capset_data,
595 cache_ent->size);
596
597 smp_wmb();
598 atomic_set(&cache_ent->is_valid, 1);
599 break;
600 }
601 }
602 spin_unlock(&vgdev->display_info_lock);
603 wake_up_all(&vgdev->resp_wq);
604}
605
606static int virtio_get_edid_block(void *data, u8 *buf,
607 unsigned int block, size_t len)
608{
609 struct virtio_gpu_resp_edid *resp = data;
610 size_t start = block * EDID_LENGTH;
611
612 if (start + len > le32_to_cpu(resp->size))
613 return -1;
614 memcpy(buf, resp->edid + start, len);
615 return 0;
616}
617
618static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
619 struct virtio_gpu_vbuffer *vbuf)
620{
621 struct virtio_gpu_cmd_get_edid *cmd =
622 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
623 struct virtio_gpu_resp_edid *resp =
624 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
625 uint32_t scanout = le32_to_cpu(cmd->scanout);
626 struct virtio_gpu_output *output;
627 struct edid *new_edid, *old_edid;
628
629 if (scanout >= vgdev->num_scanouts)
630 return;
631 output = vgdev->outputs + scanout;
632
633 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
634 drm_connector_update_edid_property(&output->conn, new_edid);
635
636 spin_lock(&vgdev->display_info_lock);
637 old_edid = output->edid;
638 output->edid = new_edid;
639 spin_unlock(&vgdev->display_info_lock);
640
641 kfree(old_edid);
642 wake_up(&vgdev->resp_wq);
643}
644
645int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
646{
647 struct virtio_gpu_ctrl_hdr *cmd_p;
648 struct virtio_gpu_vbuffer *vbuf;
649 void *resp_buf;
650
651 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
652 GFP_KERNEL);
653 if (!resp_buf)
654 return -ENOMEM;
655
656 cmd_p = virtio_gpu_alloc_cmd_resp
657 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
658 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
659 resp_buf);
660 memset(cmd_p, 0, sizeof(*cmd_p));
661
662 vgdev->display_info_pending = true;
663 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
664 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
665 return 0;
666}
667
668int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
669{
670 struct virtio_gpu_get_capset_info *cmd_p;
671 struct virtio_gpu_vbuffer *vbuf;
672 void *resp_buf;
673
674 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
675 GFP_KERNEL);
676 if (!resp_buf)
677 return -ENOMEM;
678
679 cmd_p = virtio_gpu_alloc_cmd_resp
680 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
681 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
682 resp_buf);
683 memset(cmd_p, 0, sizeof(*cmd_p));
684
685 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
686 cmd_p->capset_index = cpu_to_le32(idx);
687 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
688 return 0;
689}
690
691int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
692 int idx, int version,
693 struct virtio_gpu_drv_cap_cache **cache_p)
694{
695 struct virtio_gpu_get_capset *cmd_p;
696 struct virtio_gpu_vbuffer *vbuf;
697 int max_size;
698 struct virtio_gpu_drv_cap_cache *cache_ent;
699 struct virtio_gpu_drv_cap_cache *search_ent;
700 void *resp_buf;
701
702 *cache_p = NULL;
703
704 if (idx >= vgdev->num_capsets)
705 return -EINVAL;
706
707 if (version > vgdev->capsets[idx].max_version)
708 return -EINVAL;
709
710 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
711 if (!cache_ent)
712 return -ENOMEM;
713
714 max_size = vgdev->capsets[idx].max_size;
715 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
716 if (!cache_ent->caps_cache) {
717 kfree(cache_ent);
718 return -ENOMEM;
719 }
720
721 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
722 GFP_KERNEL);
723 if (!resp_buf) {
724 kfree(cache_ent->caps_cache);
725 kfree(cache_ent);
726 return -ENOMEM;
727 }
728
729 cache_ent->version = version;
730 cache_ent->id = vgdev->capsets[idx].id;
731 atomic_set(&cache_ent->is_valid, 0);
732 cache_ent->size = max_size;
733 spin_lock(&vgdev->display_info_lock);
734
735 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
736 if (search_ent->id == vgdev->capsets[idx].id &&
737 search_ent->version == version) {
738 *cache_p = search_ent;
739 break;
740 }
741 }
742 if (!*cache_p)
743 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
744 spin_unlock(&vgdev->display_info_lock);
745
746 if (*cache_p) {
747
748 kfree(resp_buf);
749 kfree(cache_ent->caps_cache);
750 kfree(cache_ent);
751 return 0;
752 }
753
754 cmd_p = virtio_gpu_alloc_cmd_resp
755 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
756 sizeof(struct virtio_gpu_resp_capset) + max_size,
757 resp_buf);
758 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
759 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
760 cmd_p->capset_version = cpu_to_le32(version);
761 *cache_p = cache_ent;
762 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
763
764 return 0;
765}
766
767int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
768{
769 struct virtio_gpu_cmd_get_edid *cmd_p;
770 struct virtio_gpu_vbuffer *vbuf;
771 void *resp_buf;
772 int scanout;
773
774 if (WARN_ON(!vgdev->has_edid))
775 return -EINVAL;
776
777 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
778 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
779 GFP_KERNEL);
780 if (!resp_buf)
781 return -ENOMEM;
782
783 cmd_p = virtio_gpu_alloc_cmd_resp
784 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
785 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
786 resp_buf);
787 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
788 cmd_p->scanout = cpu_to_le32(scanout);
789 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
790 }
791
792 return 0;
793}
794
795void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
796 uint32_t nlen, const char *name)
797{
798 struct virtio_gpu_ctx_create *cmd_p;
799 struct virtio_gpu_vbuffer *vbuf;
800
801 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
802 memset(cmd_p, 0, sizeof(*cmd_p));
803
804 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
805 cmd_p->hdr.ctx_id = cpu_to_le32(id);
806 cmd_p->nlen = cpu_to_le32(nlen);
807 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
808 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
809 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
810}
811
812void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
813 uint32_t id)
814{
815 struct virtio_gpu_ctx_destroy *cmd_p;
816 struct virtio_gpu_vbuffer *vbuf;
817
818 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
819 memset(cmd_p, 0, sizeof(*cmd_p));
820
821 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
822 cmd_p->hdr.ctx_id = cpu_to_le32(id);
823 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
824}
825
826void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
827 uint32_t ctx_id,
828 uint32_t resource_id)
829{
830 struct virtio_gpu_ctx_resource *cmd_p;
831 struct virtio_gpu_vbuffer *vbuf;
832
833 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
834 memset(cmd_p, 0, sizeof(*cmd_p));
835
836 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
837 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
838 cmd_p->resource_id = cpu_to_le32(resource_id);
839 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
840
841}
842
843void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
844 uint32_t ctx_id,
845 uint32_t resource_id)
846{
847 struct virtio_gpu_ctx_resource *cmd_p;
848 struct virtio_gpu_vbuffer *vbuf;
849
850 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
851 memset(cmd_p, 0, sizeof(*cmd_p));
852
853 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
854 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
855 cmd_p->resource_id = cpu_to_le32(resource_id);
856 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
857}
858
859void
860virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
861 struct virtio_gpu_object *bo,
862 struct virtio_gpu_object_params *params,
863 struct virtio_gpu_fence *fence)
864{
865 struct virtio_gpu_resource_create_3d *cmd_p;
866 struct virtio_gpu_vbuffer *vbuf;
867
868 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
869 memset(cmd_p, 0, sizeof(*cmd_p));
870
871 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
872 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
873 cmd_p->format = cpu_to_le32(params->format);
874 cmd_p->width = cpu_to_le32(params->width);
875 cmd_p->height = cpu_to_le32(params->height);
876
877 cmd_p->target = cpu_to_le32(params->target);
878 cmd_p->bind = cpu_to_le32(params->bind);
879 cmd_p->depth = cpu_to_le32(params->depth);
880 cmd_p->array_size = cpu_to_le32(params->array_size);
881 cmd_p->last_level = cpu_to_le32(params->last_level);
882 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
883 cmd_p->flags = cpu_to_le32(params->flags);
884
885 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
886 bo->created = true;
887}
888
889void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
890 struct virtio_gpu_object *bo,
891 uint32_t ctx_id,
892 uint64_t offset, uint32_t level,
893 struct virtio_gpu_box *box,
894 struct virtio_gpu_fence *fence)
895{
896 struct virtio_gpu_transfer_host_3d *cmd_p;
897 struct virtio_gpu_vbuffer *vbuf;
898 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
899
900 if (use_dma_api)
901 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
902 bo->pages->sgl, bo->pages->nents,
903 DMA_TO_DEVICE);
904
905 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
906 memset(cmd_p, 0, sizeof(*cmd_p));
907
908 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
909 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
910 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
911 cmd_p->box = *box;
912 cmd_p->offset = cpu_to_le64(offset);
913 cmd_p->level = cpu_to_le32(level);
914
915 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
916}
917
918void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
919 uint32_t resource_id, uint32_t ctx_id,
920 uint64_t offset, uint32_t level,
921 struct virtio_gpu_box *box,
922 struct virtio_gpu_fence *fence)
923{
924 struct virtio_gpu_transfer_host_3d *cmd_p;
925 struct virtio_gpu_vbuffer *vbuf;
926
927 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
928 memset(cmd_p, 0, sizeof(*cmd_p));
929
930 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
931 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
932 cmd_p->resource_id = cpu_to_le32(resource_id);
933 cmd_p->box = *box;
934 cmd_p->offset = cpu_to_le64(offset);
935 cmd_p->level = cpu_to_le32(level);
936
937 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
938}
939
940void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
941 void *data, uint32_t data_size,
942 uint32_t ctx_id, struct virtio_gpu_fence *fence)
943{
944 struct virtio_gpu_cmd_submit *cmd_p;
945 struct virtio_gpu_vbuffer *vbuf;
946
947 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
948 memset(cmd_p, 0, sizeof(*cmd_p));
949
950 vbuf->data_buf = data;
951 vbuf->data_size = data_size;
952
953 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
954 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
955 cmd_p->size = cpu_to_le32(data_size);
956
957 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
958}
959
960int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
961 struct virtio_gpu_object *obj,
962 struct virtio_gpu_fence *fence)
963{
964 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
965 struct virtio_gpu_mem_entry *ents;
966 struct scatterlist *sg;
967 int si, nents;
968
969 if (WARN_ON_ONCE(!obj->created))
970 return -EINVAL;
971
972 if (!obj->pages) {
973 int ret;
974
975 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
976 if (ret)
977 return ret;
978 }
979
980 if (use_dma_api) {
981 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
982 obj->pages->sgl, obj->pages->nents,
983 DMA_TO_DEVICE);
984 nents = obj->mapped;
985 } else {
986 nents = obj->pages->nents;
987 }
988
989
990 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
991 GFP_KERNEL);
992 if (!ents) {
993 DRM_ERROR("failed to allocate ent list\n");
994 return -ENOMEM;
995 }
996
997 for_each_sg(obj->pages->sgl, sg, nents, si) {
998 ents[si].addr = cpu_to_le64(use_dma_api
999 ? sg_dma_address(sg)
1000 : sg_phys(sg));
1001 ents[si].length = cpu_to_le32(sg->length);
1002 ents[si].padding = 0;
1003 }
1004
1005 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1006 ents, nents,
1007 fence);
1008 return 0;
1009}
1010
1011void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1012 struct virtio_gpu_object *obj)
1013{
1014 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1015
1016 if (use_dma_api && obj->mapped) {
1017 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1018
1019 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1020 dma_fence_wait(&fence->f, true);
1021 dma_fence_put(&fence->f);
1022
1023
1024 dma_unmap_sg(vgdev->vdev->dev.parent,
1025 obj->pages->sgl, obj->mapped,
1026 DMA_TO_DEVICE);
1027 obj->mapped = 0;
1028 } else {
1029 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1030 }
1031}
1032
1033void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1034 struct virtio_gpu_output *output)
1035{
1036 struct virtio_gpu_vbuffer *vbuf;
1037 struct virtio_gpu_update_cursor *cur_p;
1038
1039 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1040 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1041 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1042 virtio_gpu_queue_cursor(vgdev, vbuf);
1043}
1044