1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/units.h"
16#include "qemu/iov.h"
17#include "ui/console.h"
18#include "trace.h"
19#include "sysemu/dma.h"
20#include "sysemu/sysemu.h"
21#include "hw/virtio/virtio.h"
22#include "migration/qemu-file-types.h"
23#include "hw/virtio/virtio-gpu.h"
24#include "hw/virtio/virtio-gpu-bswap.h"
25#include "hw/virtio/virtio-gpu-pixman.h"
26#include "hw/virtio/virtio-bus.h"
27#include "hw/display/edid.h"
28#include "hw/qdev-properties.h"
29#include "qemu/log.h"
30#include "qemu/module.h"
31#include "qapi/error.h"
32#include "qemu/error-report.h"
33
34#define VIRTIO_GPU_VM_VERSION 1
35
36static struct virtio_gpu_simple_resource*
37virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
38static struct virtio_gpu_simple_resource *
39virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
40 bool require_backing,
41 const char *caller, uint32_t *error);
42
43static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
44 struct virtio_gpu_simple_resource *res);
45
46void virtio_gpu_update_cursor_data(VirtIOGPU *g,
47 struct virtio_gpu_scanout *s,
48 uint32_t resource_id)
49{
50 struct virtio_gpu_simple_resource *res;
51 uint32_t pixels;
52 void *data;
53
54 res = virtio_gpu_find_check_resource(g, resource_id, false,
55 __func__, NULL);
56 if (!res) {
57 return;
58 }
59
60 if (res->blob_size) {
61 if (res->blob_size < (s->current_cursor->width *
62 s->current_cursor->height * 4)) {
63 return;
64 }
65 data = res->blob;
66 } else {
67 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
68 pixman_image_get_height(res->image) != s->current_cursor->height) {
69 return;
70 }
71 data = pixman_image_get_data(res->image);
72 }
73
74 pixels = s->current_cursor->width * s->current_cursor->height;
75 memcpy(s->current_cursor->data, data,
76 pixels * sizeof(uint32_t));
77}
78
79static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
80{
81 struct virtio_gpu_scanout *s;
82 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
83 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
84
85 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
86 return;
87 }
88 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
89
90 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
91 cursor->pos.x,
92 cursor->pos.y,
93 move ? "move" : "update",
94 cursor->resource_id);
95
96 if (!move) {
97 if (!s->current_cursor) {
98 s->current_cursor = cursor_alloc(64, 64);
99 }
100
101 s->current_cursor->hot_x = cursor->hot_x;
102 s->current_cursor->hot_y = cursor->hot_y;
103
104 if (cursor->resource_id > 0) {
105 vgc->update_cursor_data(g, s, cursor->resource_id);
106 }
107 dpy_cursor_define(s->con, s->current_cursor);
108
109 s->cursor = *cursor;
110 } else {
111 s->cursor.pos.x = cursor->pos.x;
112 s->cursor.pos.y = cursor->pos.y;
113 }
114 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
115 cursor->resource_id ? 1 : 0);
116}
117
118static struct virtio_gpu_simple_resource *
119virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
120{
121 struct virtio_gpu_simple_resource *res;
122
123 QTAILQ_FOREACH(res, &g->reslist, next) {
124 if (res->resource_id == resource_id) {
125 return res;
126 }
127 }
128 return NULL;
129}
130
131static struct virtio_gpu_simple_resource *
132virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
133 bool require_backing,
134 const char *caller, uint32_t *error)
135{
136 struct virtio_gpu_simple_resource *res;
137
138 res = virtio_gpu_find_resource(g, resource_id);
139 if (!res) {
140 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
141 caller, resource_id);
142 if (error) {
143 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
144 }
145 return NULL;
146 }
147
148 if (require_backing) {
149 if (!res->iov || (!res->image && !res->blob)) {
150 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
151 caller, resource_id);
152 if (error) {
153 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
154 }
155 return NULL;
156 }
157 }
158
159 return res;
160}
161
162void virtio_gpu_ctrl_response(VirtIOGPU *g,
163 struct virtio_gpu_ctrl_command *cmd,
164 struct virtio_gpu_ctrl_hdr *resp,
165 size_t resp_len)
166{
167 size_t s;
168
169 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
170 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
171 resp->fence_id = cmd->cmd_hdr.fence_id;
172 resp->ctx_id = cmd->cmd_hdr.ctx_id;
173 }
174 virtio_gpu_ctrl_hdr_bswap(resp);
175 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
176 if (s != resp_len) {
177 qemu_log_mask(LOG_GUEST_ERROR,
178 "%s: response size incorrect %zu vs %zu\n",
179 __func__, s, resp_len);
180 }
181 virtqueue_push(cmd->vq, &cmd->elem, s);
182 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
183 cmd->finished = true;
184}
185
186void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
187 struct virtio_gpu_ctrl_command *cmd,
188 enum virtio_gpu_ctrl_type type)
189{
190 struct virtio_gpu_ctrl_hdr resp;
191
192 memset(&resp, 0, sizeof(resp));
193 resp.type = type;
194 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
195}
196
197void virtio_gpu_get_display_info(VirtIOGPU *g,
198 struct virtio_gpu_ctrl_command *cmd)
199{
200 struct virtio_gpu_resp_display_info display_info;
201
202 trace_virtio_gpu_cmd_get_display_info();
203 memset(&display_info, 0, sizeof(display_info));
204 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
205 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
206 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
207 sizeof(display_info));
208}
209
210static void
211virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
212 struct virtio_gpu_resp_edid *edid)
213{
214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
215 qemu_edid_info info = {
216 .width_mm = b->req_state[scanout].width_mm,
217 .height_mm = b->req_state[scanout].height_mm,
218 .prefx = b->req_state[scanout].width,
219 .prefy = b->req_state[scanout].height,
220 };
221
222 edid->size = cpu_to_le32(sizeof(edid->edid));
223 qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
224}
225
226void virtio_gpu_get_edid(VirtIOGPU *g,
227 struct virtio_gpu_ctrl_command *cmd)
228{
229 struct virtio_gpu_resp_edid edid;
230 struct virtio_gpu_cmd_get_edid get_edid;
231 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
232
233 VIRTIO_GPU_FILL_CMD(get_edid);
234 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
235
236 if (get_edid.scanout >= b->conf.max_outputs) {
237 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
238 return;
239 }
240
241 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
242 memset(&edid, 0, sizeof(edid));
243 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
244 virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
245 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
246}
247
248static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
249 uint32_t width, uint32_t height)
250{
251
252
253
254
255 int bpp = PIXMAN_FORMAT_BPP(pformat);
256 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
257 return height * stride;
258}
259
260static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
261 struct virtio_gpu_ctrl_command *cmd)
262{
263 pixman_format_code_t pformat;
264 struct virtio_gpu_simple_resource *res;
265 struct virtio_gpu_resource_create_2d c2d;
266
267 VIRTIO_GPU_FILL_CMD(c2d);
268 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
269 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
270 c2d.width, c2d.height);
271
272 if (c2d.resource_id == 0) {
273 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
274 __func__);
275 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
276 return;
277 }
278
279 res = virtio_gpu_find_resource(g, c2d.resource_id);
280 if (res) {
281 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
282 __func__, c2d.resource_id);
283 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
284 return;
285 }
286
287 res = g_new0(struct virtio_gpu_simple_resource, 1);
288
289 res->width = c2d.width;
290 res->height = c2d.height;
291 res->format = c2d.format;
292 res->resource_id = c2d.resource_id;
293
294 pformat = virtio_gpu_get_pixman_format(c2d.format);
295 if (!pformat) {
296 qemu_log_mask(LOG_GUEST_ERROR,
297 "%s: host couldn't handle guest format %d\n",
298 __func__, c2d.format);
299 g_free(res);
300 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
301 return;
302 }
303
304 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
305 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
306 res->image = pixman_image_create_bits(pformat,
307 c2d.width,
308 c2d.height,
309 NULL, 0);
310 }
311
312 if (!res->image) {
313 qemu_log_mask(LOG_GUEST_ERROR,
314 "%s: resource creation failed %d %d %d\n",
315 __func__, c2d.resource_id, c2d.width, c2d.height);
316 g_free(res);
317 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
318 return;
319 }
320
321 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
322 g->hostmem += res->hostmem;
323}
324
325static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
326 struct virtio_gpu_ctrl_command *cmd)
327{
328 struct virtio_gpu_simple_resource *res;
329 struct virtio_gpu_resource_create_blob cblob;
330 int ret;
331
332 VIRTIO_GPU_FILL_CMD(cblob);
333 virtio_gpu_create_blob_bswap(&cblob);
334 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
335
336 if (cblob.resource_id == 0) {
337 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
338 __func__);
339 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
340 return;
341 }
342
343 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
344 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
345 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
346 __func__);
347 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
348 return;
349 }
350
351 if (virtio_gpu_find_resource(g, cblob.resource_id)) {
352 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
353 __func__, cblob.resource_id);
354 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
355 return;
356 }
357
358 res = g_new0(struct virtio_gpu_simple_resource, 1);
359 res->resource_id = cblob.resource_id;
360 res->blob_size = cblob.size;
361
362 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
363 cmd, &res->addrs, &res->iov,
364 &res->iov_cnt);
365 if (ret != 0) {
366 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
367 g_free(res);
368 return;
369 }
370
371 virtio_gpu_init_udmabuf(res);
372 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
373}
374
375static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
376{
377 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
378 struct virtio_gpu_simple_resource *res;
379
380 if (scanout->resource_id == 0) {
381 return;
382 }
383
384 res = virtio_gpu_find_resource(g, scanout->resource_id);
385 if (res) {
386 res->scanout_bitmask &= ~(1 << scanout_id);
387 }
388
389 dpy_gfx_replace_surface(scanout->con, NULL);
390 scanout->resource_id = 0;
391 scanout->ds = NULL;
392 scanout->width = 0;
393 scanout->height = 0;
394}
395
396static void virtio_gpu_resource_destroy(VirtIOGPU *g,
397 struct virtio_gpu_simple_resource *res)
398{
399 int i;
400
401 if (res->scanout_bitmask) {
402 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
403 if (res->scanout_bitmask & (1 << i)) {
404 virtio_gpu_disable_scanout(g, i);
405 }
406 }
407 }
408
409 qemu_pixman_image_unref(res->image);
410 virtio_gpu_cleanup_mapping(g, res);
411 QTAILQ_REMOVE(&g->reslist, res, next);
412 g->hostmem -= res->hostmem;
413 g_free(res);
414}
415
416static void virtio_gpu_resource_unref(VirtIOGPU *g,
417 struct virtio_gpu_ctrl_command *cmd)
418{
419 struct virtio_gpu_simple_resource *res;
420 struct virtio_gpu_resource_unref unref;
421
422 VIRTIO_GPU_FILL_CMD(unref);
423 virtio_gpu_bswap_32(&unref, sizeof(unref));
424 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
425
426 res = virtio_gpu_find_resource(g, unref.resource_id);
427 if (!res) {
428 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
429 __func__, unref.resource_id);
430 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
431 return;
432 }
433 virtio_gpu_resource_destroy(g, res);
434}
435
436static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
437 struct virtio_gpu_ctrl_command *cmd)
438{
439 struct virtio_gpu_simple_resource *res;
440 int h;
441 uint32_t src_offset, dst_offset, stride;
442 int bpp;
443 pixman_format_code_t format;
444 struct virtio_gpu_transfer_to_host_2d t2d;
445
446 VIRTIO_GPU_FILL_CMD(t2d);
447 virtio_gpu_t2d_bswap(&t2d);
448 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
449
450 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
451 __func__, &cmd->error);
452 if (!res || res->blob) {
453 return;
454 }
455
456 if (t2d.r.x > res->width ||
457 t2d.r.y > res->height ||
458 t2d.r.width > res->width ||
459 t2d.r.height > res->height ||
460 t2d.r.x + t2d.r.width > res->width ||
461 t2d.r.y + t2d.r.height > res->height) {
462 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
463 " bounds for resource %d: %d %d %d %d vs %d %d\n",
464 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
465 t2d.r.width, t2d.r.height, res->width, res->height);
466 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
467 return;
468 }
469
470 format = pixman_image_get_format(res->image);
471 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
472 stride = pixman_image_get_stride(res->image);
473
474 if (t2d.offset || t2d.r.x || t2d.r.y ||
475 t2d.r.width != pixman_image_get_width(res->image)) {
476 void *img_data = pixman_image_get_data(res->image);
477 for (h = 0; h < t2d.r.height; h++) {
478 src_offset = t2d.offset + stride * h;
479 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
480
481 iov_to_buf(res->iov, res->iov_cnt, src_offset,
482 (uint8_t *)img_data
483 + dst_offset, t2d.r.width * bpp);
484 }
485 } else {
486 iov_to_buf(res->iov, res->iov_cnt, 0,
487 pixman_image_get_data(res->image),
488 pixman_image_get_stride(res->image)
489 * pixman_image_get_height(res->image));
490 }
491}
492
493static void virtio_gpu_resource_flush(VirtIOGPU *g,
494 struct virtio_gpu_ctrl_command *cmd)
495{
496 struct virtio_gpu_simple_resource *res;
497 struct virtio_gpu_resource_flush rf;
498 struct virtio_gpu_scanout *scanout;
499 pixman_region16_t flush_region;
500 int i;
501
502 VIRTIO_GPU_FILL_CMD(rf);
503 virtio_gpu_bswap_32(&rf, sizeof(rf));
504 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
505 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
506
507 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
508 __func__, &cmd->error);
509 if (!res) {
510 return;
511 }
512
513 if (res->blob) {
514 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
515 scanout = &g->parent_obj.scanout[i];
516 if (scanout->resource_id == res->resource_id &&
517 console_has_gl(scanout->con)) {
518 dpy_gl_update(scanout->con, 0, 0, scanout->width,
519 scanout->height);
520 }
521 }
522 return;
523 }
524
525 if (!res->blob &&
526 (rf.r.x > res->width ||
527 rf.r.y > res->height ||
528 rf.r.width > res->width ||
529 rf.r.height > res->height ||
530 rf.r.x + rf.r.width > res->width ||
531 rf.r.y + rf.r.height > res->height)) {
532 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
533 " bounds for resource %d: %d %d %d %d vs %d %d\n",
534 __func__, rf.resource_id, rf.r.x, rf.r.y,
535 rf.r.width, rf.r.height, res->width, res->height);
536 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
537 return;
538 }
539
540 pixman_region_init_rect(&flush_region,
541 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
542 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
543 pixman_region16_t region, finalregion;
544 pixman_box16_t *extents;
545
546 if (!(res->scanout_bitmask & (1 << i))) {
547 continue;
548 }
549 scanout = &g->parent_obj.scanout[i];
550
551 pixman_region_init(&finalregion);
552 pixman_region_init_rect(®ion, scanout->x, scanout->y,
553 scanout->width, scanout->height);
554
555 pixman_region_intersect(&finalregion, &flush_region, ®ion);
556 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
557 extents = pixman_region_extents(&finalregion);
558
559 dpy_gfx_update(g->parent_obj.scanout[i].con,
560 extents->x1, extents->y1,
561 extents->x2 - extents->x1,
562 extents->y2 - extents->y1);
563
564 pixman_region_fini(®ion);
565 pixman_region_fini(&finalregion);
566 }
567 pixman_region_fini(&flush_region);
568}
569
570static void virtio_unref_resource(pixman_image_t *image, void *data)
571{
572 pixman_image_unref(data);
573}
574
575static void virtio_gpu_update_scanout(VirtIOGPU *g,
576 uint32_t scanout_id,
577 struct virtio_gpu_simple_resource *res,
578 struct virtio_gpu_rect *r)
579{
580 struct virtio_gpu_simple_resource *ores;
581 struct virtio_gpu_scanout *scanout;
582
583 scanout = &g->parent_obj.scanout[scanout_id];
584 ores = virtio_gpu_find_resource(g, scanout->resource_id);
585 if (ores) {
586 ores->scanout_bitmask &= ~(1 << scanout_id);
587 }
588
589 res->scanout_bitmask |= (1 << scanout_id);
590 scanout->resource_id = res->resource_id;
591 scanout->x = r->x;
592 scanout->y = r->y;
593 scanout->width = r->width;
594 scanout->height = r->height;
595}
596
597static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
598 uint32_t scanout_id,
599 struct virtio_gpu_framebuffer *fb,
600 struct virtio_gpu_simple_resource *res,
601 struct virtio_gpu_rect *r,
602 uint32_t *error)
603{
604 struct virtio_gpu_scanout *scanout;
605 uint8_t *data;
606
607 scanout = &g->parent_obj.scanout[scanout_id];
608
609 if (r->x > fb->width ||
610 r->y > fb->height ||
611 r->width < 16 ||
612 r->height < 16 ||
613 r->width > fb->width ||
614 r->height > fb->height ||
615 r->x + r->width > fb->width ||
616 r->y + r->height > fb->height) {
617 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
618 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
619 __func__, scanout_id, res->resource_id,
620 r->x, r->y, r->width, r->height,
621 fb->width, fb->height);
622 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
623 return;
624 }
625
626 g->parent_obj.enable = 1;
627
628 if (res->blob) {
629 if (console_has_gl(scanout->con)) {
630 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
631 virtio_gpu_update_scanout(g, scanout_id, res, r);
632 return;
633 }
634 }
635
636 data = res->blob;
637 } else {
638 data = (uint8_t *)pixman_image_get_data(res->image);
639 }
640
641
642 if ((res->blob && !console_has_gl(scanout->con)) ||
643 !scanout->ds ||
644 surface_data(scanout->ds) != data + fb->offset ||
645 scanout->width != r->width ||
646 scanout->height != r->height) {
647 pixman_image_t *rect;
648 void *ptr = data + fb->offset;
649 rect = pixman_image_create_bits(fb->format, r->width, r->height,
650 ptr, fb->stride);
651
652 if (res->image) {
653 pixman_image_ref(res->image);
654 pixman_image_set_destroy_function(rect, virtio_unref_resource,
655 res->image);
656 }
657
658
659 scanout->ds = qemu_create_displaysurface_pixman(rect);
660 if (!scanout->ds) {
661 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
662 return;
663 }
664
665 pixman_image_unref(rect);
666 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
667 scanout->ds);
668 }
669
670 virtio_gpu_update_scanout(g, scanout_id, res, r);
671}
672
673static void virtio_gpu_set_scanout(VirtIOGPU *g,
674 struct virtio_gpu_ctrl_command *cmd)
675{
676 struct virtio_gpu_simple_resource *res;
677 struct virtio_gpu_framebuffer fb = { 0 };
678 struct virtio_gpu_set_scanout ss;
679
680 VIRTIO_GPU_FILL_CMD(ss);
681 virtio_gpu_bswap_32(&ss, sizeof(ss));
682 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
683 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
684
685 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
686 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
687 __func__, ss.scanout_id);
688 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
689 return;
690 }
691
692 if (ss.resource_id == 0) {
693 virtio_gpu_disable_scanout(g, ss.scanout_id);
694 return;
695 }
696
697 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
698 __func__, &cmd->error);
699 if (!res) {
700 return;
701 }
702
703 fb.format = pixman_image_get_format(res->image);
704 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
705 fb.width = pixman_image_get_width(res->image);
706 fb.height = pixman_image_get_height(res->image);
707 fb.stride = pixman_image_get_stride(res->image);
708 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
709
710 virtio_gpu_do_set_scanout(g, ss.scanout_id,
711 &fb, res, &ss.r, &cmd->error);
712}
713
714static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
715 struct virtio_gpu_ctrl_command *cmd)
716{
717 struct virtio_gpu_simple_resource *res;
718 struct virtio_gpu_framebuffer fb = { 0 };
719 struct virtio_gpu_set_scanout_blob ss;
720 uint64_t fbend;
721
722 VIRTIO_GPU_FILL_CMD(ss);
723 virtio_gpu_scanout_blob_bswap(&ss);
724 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
725 ss.r.width, ss.r.height, ss.r.x,
726 ss.r.y);
727
728 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
729 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
730 __func__, ss.scanout_id);
731 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
732 return;
733 }
734
735 if (ss.resource_id == 0) {
736 virtio_gpu_disable_scanout(g, ss.scanout_id);
737 return;
738 }
739
740 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
741 __func__, &cmd->error);
742 if (!res) {
743 return;
744 }
745
746 fb.format = virtio_gpu_get_pixman_format(ss.format);
747 if (!fb.format) {
748 qemu_log_mask(LOG_GUEST_ERROR,
749 "%s: host couldn't handle guest format %d\n",
750 __func__, ss.format);
751 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
752 return;
753 }
754
755 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
756 fb.width = ss.width;
757 fb.height = ss.height;
758 fb.stride = ss.strides[0];
759 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
760
761 fbend = fb.offset;
762 fbend += fb.stride * (ss.r.height - 1);
763 fbend += fb.bytes_pp * ss.r.width;
764 if (fbend > res->blob_size) {
765 qemu_log_mask(LOG_GUEST_ERROR,
766 "%s: fb end out of range\n",
767 __func__);
768 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
769 return;
770 }
771
772 virtio_gpu_do_set_scanout(g, ss.scanout_id,
773 &fb, res, &ss.r, &cmd->error);
774}
775
776int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
777 uint32_t nr_entries, uint32_t offset,
778 struct virtio_gpu_ctrl_command *cmd,
779 uint64_t **addr, struct iovec **iov,
780 uint32_t *niov)
781{
782 struct virtio_gpu_mem_entry *ents;
783 size_t esize, s;
784 int e, v;
785
786 if (nr_entries > 16384) {
787 qemu_log_mask(LOG_GUEST_ERROR,
788 "%s: nr_entries is too big (%d > 16384)\n",
789 __func__, nr_entries);
790 return -1;
791 }
792
793 esize = sizeof(*ents) * nr_entries;
794 ents = g_malloc(esize);
795 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
796 offset, ents, esize);
797 if (s != esize) {
798 qemu_log_mask(LOG_GUEST_ERROR,
799 "%s: command data size incorrect %zu vs %zu\n",
800 __func__, s, esize);
801 g_free(ents);
802 return -1;
803 }
804
805 *iov = NULL;
806 if (addr) {
807 *addr = NULL;
808 }
809 for (e = 0, v = 0; e < nr_entries; e++) {
810 uint64_t a = le64_to_cpu(ents[e].addr);
811 uint32_t l = le32_to_cpu(ents[e].length);
812 hwaddr len;
813 void *map;
814
815 do {
816 len = l;
817 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
818 a, &len, DMA_DIRECTION_TO_DEVICE);
819 if (!map) {
820 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
821 " element %d\n", __func__, e);
822 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
823 g_free(ents);
824 *iov = NULL;
825 if (addr) {
826 g_free(*addr);
827 *addr = NULL;
828 }
829 return -1;
830 }
831
832 if (!(v % 16)) {
833 *iov = g_realloc(*iov, sizeof(struct iovec) * (v + 16));
834 if (addr) {
835 *addr = g_realloc(*addr, sizeof(uint64_t) * (v + 16));
836 }
837 }
838 (*iov)[v].iov_base = map;
839 (*iov)[v].iov_len = len;
840 if (addr) {
841 (*addr)[v] = a;
842 }
843
844 a += len;
845 l -= len;
846 v += 1;
847 } while (l > 0);
848 }
849 *niov = v;
850
851 g_free(ents);
852 return 0;
853}
854
855void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
856 struct iovec *iov, uint32_t count)
857{
858 int i;
859
860 for (i = 0; i < count; i++) {
861 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
862 iov[i].iov_base, iov[i].iov_len,
863 DMA_DIRECTION_TO_DEVICE,
864 iov[i].iov_len);
865 }
866 g_free(iov);
867}
868
869static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
870 struct virtio_gpu_simple_resource *res)
871{
872 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
873 res->iov = NULL;
874 res->iov_cnt = 0;
875 g_free(res->addrs);
876 res->addrs = NULL;
877
878 if (res->blob) {
879 virtio_gpu_fini_udmabuf(res);
880 }
881}
882
883static void
884virtio_gpu_resource_attach_backing(VirtIOGPU *g,
885 struct virtio_gpu_ctrl_command *cmd)
886{
887 struct virtio_gpu_simple_resource *res;
888 struct virtio_gpu_resource_attach_backing ab;
889 int ret;
890
891 VIRTIO_GPU_FILL_CMD(ab);
892 virtio_gpu_bswap_32(&ab, sizeof(ab));
893 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
894
895 res = virtio_gpu_find_resource(g, ab.resource_id);
896 if (!res) {
897 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
898 __func__, ab.resource_id);
899 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
900 return;
901 }
902
903 if (res->iov) {
904 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
905 return;
906 }
907
908 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
909 &res->addrs, &res->iov, &res->iov_cnt);
910 if (ret != 0) {
911 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
912 return;
913 }
914}
915
916static void
917virtio_gpu_resource_detach_backing(VirtIOGPU *g,
918 struct virtio_gpu_ctrl_command *cmd)
919{
920 struct virtio_gpu_simple_resource *res;
921 struct virtio_gpu_resource_detach_backing detach;
922
923 VIRTIO_GPU_FILL_CMD(detach);
924 virtio_gpu_bswap_32(&detach, sizeof(detach));
925 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
926
927 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
928 __func__, &cmd->error);
929 if (!res) {
930 return;
931 }
932 virtio_gpu_cleanup_mapping(g, res);
933}
934
935void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
936 struct virtio_gpu_ctrl_command *cmd)
937{
938 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
939 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
940
941 switch (cmd->cmd_hdr.type) {
942 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
943 virtio_gpu_get_display_info(g, cmd);
944 break;
945 case VIRTIO_GPU_CMD_GET_EDID:
946 virtio_gpu_get_edid(g, cmd);
947 break;
948 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
949 virtio_gpu_resource_create_2d(g, cmd);
950 break;
951 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
952 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
953 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
954 break;
955 }
956 virtio_gpu_resource_create_blob(g, cmd);
957 break;
958 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
959 virtio_gpu_resource_unref(g, cmd);
960 break;
961 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
962 virtio_gpu_resource_flush(g, cmd);
963 break;
964 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
965 virtio_gpu_transfer_to_host_2d(g, cmd);
966 break;
967 case VIRTIO_GPU_CMD_SET_SCANOUT:
968 virtio_gpu_set_scanout(g, cmd);
969 break;
970 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
971 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
972 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
973 break;
974 }
975 virtio_gpu_set_scanout_blob(g, cmd);
976 break;
977 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
978 virtio_gpu_resource_attach_backing(g, cmd);
979 break;
980 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
981 virtio_gpu_resource_detach_backing(g, cmd);
982 break;
983 default:
984 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
985 break;
986 }
987 if (!cmd->finished) {
988 if (!g->parent_obj.renderer_blocked) {
989 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
990 VIRTIO_GPU_RESP_OK_NODATA);
991 }
992 }
993}
994
995static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
996{
997 VirtIOGPU *g = VIRTIO_GPU(vdev);
998 qemu_bh_schedule(g->ctrl_bh);
999}
1000
1001static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
1002{
1003 VirtIOGPU *g = VIRTIO_GPU(vdev);
1004 qemu_bh_schedule(g->cursor_bh);
1005}
1006
1007void virtio_gpu_process_cmdq(VirtIOGPU *g)
1008{
1009 struct virtio_gpu_ctrl_command *cmd;
1010 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1011
1012 if (g->processing_cmdq) {
1013 return;
1014 }
1015 g->processing_cmdq = true;
1016 while (!QTAILQ_EMPTY(&g->cmdq)) {
1017 cmd = QTAILQ_FIRST(&g->cmdq);
1018
1019 if (g->parent_obj.renderer_blocked) {
1020 break;
1021 }
1022
1023
1024 vgc->process_cmd(g, cmd);
1025
1026 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1027 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1028 g->stats.requests++;
1029 }
1030
1031 if (!cmd->finished) {
1032 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
1033 g->inflight++;
1034 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1035 if (g->stats.max_inflight < g->inflight) {
1036 g->stats.max_inflight = g->inflight;
1037 }
1038 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
1039 }
1040 } else {
1041 g_free(cmd);
1042 }
1043 }
1044 g->processing_cmdq = false;
1045}
1046
1047static void virtio_gpu_process_fenceq(VirtIOGPU *g)
1048{
1049 struct virtio_gpu_ctrl_command *cmd, *tmp;
1050
1051 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1052 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1053 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1054 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1055 g_free(cmd);
1056 g->inflight--;
1057 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1058 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
1059 }
1060 }
1061}
1062
1063static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
1064{
1065 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
1066
1067 virtio_gpu_process_fenceq(g);
1068 virtio_gpu_process_cmdq(g);
1069}
1070
1071static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1072{
1073 VirtIOGPU *g = VIRTIO_GPU(vdev);
1074 struct virtio_gpu_ctrl_command *cmd;
1075
1076 if (!virtio_queue_ready(vq)) {
1077 return;
1078 }
1079
1080 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1081 while (cmd) {
1082 cmd->vq = vq;
1083 cmd->error = 0;
1084 cmd->finished = false;
1085 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
1086 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1087 }
1088
1089 virtio_gpu_process_cmdq(g);
1090}
1091
1092static void virtio_gpu_ctrl_bh(void *opaque)
1093{
1094 VirtIOGPU *g = opaque;
1095 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1096
1097 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
1098}
1099
1100static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1101{
1102 VirtIOGPU *g = VIRTIO_GPU(vdev);
1103 VirtQueueElement *elem;
1104 size_t s;
1105 struct virtio_gpu_update_cursor cursor_info;
1106
1107 if (!virtio_queue_ready(vq)) {
1108 return;
1109 }
1110 for (;;) {
1111 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1112 if (!elem) {
1113 break;
1114 }
1115
1116 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
1117 &cursor_info, sizeof(cursor_info));
1118 if (s != sizeof(cursor_info)) {
1119 qemu_log_mask(LOG_GUEST_ERROR,
1120 "%s: cursor size incorrect %zu vs %zu\n",
1121 __func__, s, sizeof(cursor_info));
1122 } else {
1123 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
1124 update_cursor(g, &cursor_info);
1125 }
1126 virtqueue_push(vq, elem, 0);
1127 virtio_notify(vdev, vq);
1128 g_free(elem);
1129 }
1130}
1131
1132static void virtio_gpu_cursor_bh(void *opaque)
1133{
1134 VirtIOGPU *g = opaque;
1135 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
1136}
1137
1138static const VMStateDescription vmstate_virtio_gpu_scanout = {
1139 .name = "virtio-gpu-one-scanout",
1140 .version_id = 1,
1141 .fields = (VMStateField[]) {
1142 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1143 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1144 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1145 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1146 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1147 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1148 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1149 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1150 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1151 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1152 VMSTATE_END_OF_LIST()
1153 },
1154};
1155
1156static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1157 .name = "virtio-gpu-scanouts",
1158 .version_id = 1,
1159 .fields = (VMStateField[]) {
1160 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
1161 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
1162 struct VirtIOGPU, NULL),
1163 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
1164 parent_obj.conf.max_outputs, 1,
1165 vmstate_virtio_gpu_scanout,
1166 struct virtio_gpu_scanout),
1167 VMSTATE_END_OF_LIST()
1168 },
1169};
1170
1171static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1172 const VMStateField *field, JSONWriter *vmdesc)
1173{
1174 VirtIOGPU *g = opaque;
1175 struct virtio_gpu_simple_resource *res;
1176 int i;
1177
1178
1179 assert(QTAILQ_EMPTY(&g->cmdq));
1180
1181 QTAILQ_FOREACH(res, &g->reslist, next) {
1182 qemu_put_be32(f, res->resource_id);
1183 qemu_put_be32(f, res->width);
1184 qemu_put_be32(f, res->height);
1185 qemu_put_be32(f, res->format);
1186 qemu_put_be32(f, res->iov_cnt);
1187 for (i = 0; i < res->iov_cnt; i++) {
1188 qemu_put_be64(f, res->addrs[i]);
1189 qemu_put_be32(f, res->iov[i].iov_len);
1190 }
1191 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1192 pixman_image_get_stride(res->image) * res->height);
1193 }
1194 qemu_put_be32(f, 0);
1195
1196 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1197}
1198
1199static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1200 const VMStateField *field)
1201{
1202 VirtIOGPU *g = opaque;
1203 struct virtio_gpu_simple_resource *res;
1204 struct virtio_gpu_scanout *scanout;
1205 uint32_t resource_id, pformat;
1206 int i;
1207
1208 g->hostmem = 0;
1209
1210 resource_id = qemu_get_be32(f);
1211 while (resource_id != 0) {
1212 res = virtio_gpu_find_resource(g, resource_id);
1213 if (res) {
1214 return -EINVAL;
1215 }
1216
1217 res = g_new0(struct virtio_gpu_simple_resource, 1);
1218 res->resource_id = resource_id;
1219 res->width = qemu_get_be32(f);
1220 res->height = qemu_get_be32(f);
1221 res->format = qemu_get_be32(f);
1222 res->iov_cnt = qemu_get_be32(f);
1223
1224
1225 pformat = virtio_gpu_get_pixman_format(res->format);
1226 if (!pformat) {
1227 g_free(res);
1228 return -EINVAL;
1229 }
1230 res->image = pixman_image_create_bits(pformat,
1231 res->width, res->height,
1232 NULL, 0);
1233 if (!res->image) {
1234 g_free(res);
1235 return -EINVAL;
1236 }
1237
1238 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1239
1240 res->addrs = g_new(uint64_t, res->iov_cnt);
1241 res->iov = g_new(struct iovec, res->iov_cnt);
1242
1243
1244 for (i = 0; i < res->iov_cnt; i++) {
1245 res->addrs[i] = qemu_get_be64(f);
1246 res->iov[i].iov_len = qemu_get_be32(f);
1247 }
1248 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1249 pixman_image_get_stride(res->image) * res->height);
1250
1251
1252 for (i = 0; i < res->iov_cnt; i++) {
1253 hwaddr len = res->iov[i].iov_len;
1254 res->iov[i].iov_base =
1255 dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1256 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1257
1258 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1259
1260 if (res->iov[i].iov_base) {
1261 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1262 res->iov[i].iov_base,
1263 len,
1264 DMA_DIRECTION_TO_DEVICE,
1265 0);
1266 }
1267
1268 res->iov_cnt = i;
1269 virtio_gpu_cleanup_mapping(g, res);
1270 pixman_image_unref(res->image);
1271 g_free(res);
1272 return -EINVAL;
1273 }
1274 }
1275
1276 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1277 g->hostmem += res->hostmem;
1278
1279 resource_id = qemu_get_be32(f);
1280 }
1281
1282
1283 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1284 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1285 scanout = &g->parent_obj.scanout[i];
1286 if (!scanout->resource_id) {
1287 continue;
1288 }
1289 res = virtio_gpu_find_resource(g, scanout->resource_id);
1290 if (!res) {
1291 return -EINVAL;
1292 }
1293 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1294 if (!scanout->ds) {
1295 return -EINVAL;
1296 }
1297
1298 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1299 dpy_gfx_update_full(scanout->con);
1300 if (scanout->cursor.resource_id) {
1301 update_cursor(g, &scanout->cursor);
1302 }
1303 res->scanout_bitmask |= (1 << i);
1304 }
1305
1306 return 0;
1307}
1308
1309void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1310{
1311 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1312 VirtIOGPU *g = VIRTIO_GPU(qdev);
1313
1314 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1315 if (!virtio_gpu_have_udmabuf()) {
1316 error_setg(errp, "cannot enable blob resources without udmabuf");
1317 return;
1318 }
1319
1320 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
1321 error_setg(errp, "blobs and virgl are not compatible (yet)");
1322 return;
1323 }
1324 }
1325
1326 if (!virtio_gpu_base_device_realize(qdev,
1327 virtio_gpu_handle_ctrl_cb,
1328 virtio_gpu_handle_cursor_cb,
1329 errp)) {
1330 return;
1331 }
1332
1333 g->ctrl_vq = virtio_get_queue(vdev, 0);
1334 g->cursor_vq = virtio_get_queue(vdev, 1);
1335 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1336 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1337 QTAILQ_INIT(&g->reslist);
1338 QTAILQ_INIT(&g->cmdq);
1339 QTAILQ_INIT(&g->fenceq);
1340}
1341
1342void virtio_gpu_reset(VirtIODevice *vdev)
1343{
1344 VirtIOGPU *g = VIRTIO_GPU(vdev);
1345 struct virtio_gpu_simple_resource *res, *tmp;
1346 struct virtio_gpu_ctrl_command *cmd;
1347
1348 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1349 virtio_gpu_resource_destroy(g, res);
1350 }
1351
1352 while (!QTAILQ_EMPTY(&g->cmdq)) {
1353 cmd = QTAILQ_FIRST(&g->cmdq);
1354 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1355 g_free(cmd);
1356 }
1357
1358 while (!QTAILQ_EMPTY(&g->fenceq)) {
1359 cmd = QTAILQ_FIRST(&g->fenceq);
1360 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1361 g->inflight--;
1362 g_free(cmd);
1363 }
1364
1365 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1366}
1367
1368static void
1369virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1370{
1371 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1372
1373 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1374}
1375
1376static void
1377virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1378{
1379 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1380 const struct virtio_gpu_config *vgconfig =
1381 (const struct virtio_gpu_config *)config;
1382
1383 if (vgconfig->events_clear) {
1384 g->virtio_config.events_read &= ~vgconfig->events_clear;
1385 }
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static const VMStateDescription vmstate_virtio_gpu = {
1397 .name = "virtio-gpu",
1398 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1399 .version_id = VIRTIO_GPU_VM_VERSION,
1400 .fields = (VMStateField[]) {
1401 VMSTATE_VIRTIO_DEVICE ,
1402 {
1403 .name = "virtio-gpu",
1404 .info = &(const VMStateInfo) {
1405 .name = "virtio-gpu",
1406 .get = virtio_gpu_load,
1407 .put = virtio_gpu_save,
1408 },
1409 .flags = VMS_SINGLE,
1410 } ,
1411 VMSTATE_END_OF_LIST()
1412 },
1413};
1414
1415static Property virtio_gpu_properties[] = {
1416 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1417 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1418 256 * MiB),
1419 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
1420 VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
1421 DEFINE_PROP_END_OF_LIST(),
1422};
1423
1424static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1425{
1426 DeviceClass *dc = DEVICE_CLASS(klass);
1427 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1428 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
1429 VirtIOGPUBaseClass *vgbc = &vgc->parent;
1430
1431 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
1432 vgc->process_cmd = virtio_gpu_simple_process_cmd;
1433 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
1434 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
1435
1436 vdc->realize = virtio_gpu_device_realize;
1437 vdc->reset = virtio_gpu_reset;
1438 vdc->get_config = virtio_gpu_get_config;
1439 vdc->set_config = virtio_gpu_set_config;
1440
1441 dc->vmsd = &vmstate_virtio_gpu;
1442 device_class_set_props(dc, virtio_gpu_properties);
1443}
1444
1445static const TypeInfo virtio_gpu_info = {
1446 .name = TYPE_VIRTIO_GPU,
1447 .parent = TYPE_VIRTIO_GPU_BASE,
1448 .instance_size = sizeof(VirtIOGPU),
1449 .class_size = sizeof(VirtIOGPUClass),
1450 .class_init = virtio_gpu_class_init,
1451};
1452module_obj(TYPE_VIRTIO_GPU);
1453
1454static void virtio_register_types(void)
1455{
1456 type_register_static(&virtio_gpu_info);
1457}
1458
1459type_init(virtio_register_types)
1460