1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu/units.h"
16#include "qemu-common.h"
17#include "qemu/iov.h"
18#include "ui/console.h"
19#include "trace.h"
20#include "hw/virtio/virtio.h"
21#include "hw/virtio/virtio-gpu.h"
22#include "hw/virtio/virtio-bus.h"
23#include "migration/blocker.h"
24#include "qemu/log.h"
25#include "qapi/error.h"
26
27#define VIRTIO_GPU_VM_VERSION 1
28
29static struct virtio_gpu_simple_resource*
30virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
31
32static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
33
34static void
35virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
36{
37 le32_to_cpus(&hdr->type);
38 le32_to_cpus(&hdr->flags);
39 le64_to_cpus(&hdr->fence_id);
40 le32_to_cpus(&hdr->ctx_id);
41 le32_to_cpus(&hdr->padding);
42}
43
44static void virtio_gpu_bswap_32(void *ptr,
45 size_t size)
46{
47#ifdef HOST_WORDS_BIGENDIAN
48
49 size_t i;
50 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
51
52 virtio_gpu_ctrl_hdr_bswap(hdr);
53
54 i = sizeof(struct virtio_gpu_ctrl_hdr);
55 while (i < size) {
56 le32_to_cpus((uint32_t *)(ptr + i));
57 i = i + sizeof(uint32_t);
58 }
59
60#endif
61}
62
63static void
64virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
65{
66 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
67 le32_to_cpus(&t2d->r.x);
68 le32_to_cpus(&t2d->r.y);
69 le32_to_cpus(&t2d->r.width);
70 le32_to_cpus(&t2d->r.height);
71 le64_to_cpus(&t2d->offset);
72 le32_to_cpus(&t2d->resource_id);
73 le32_to_cpus(&t2d->padding);
74}
75
76#ifdef CONFIG_VIRGL
77#include <virglrenderer.h>
78#define VIRGL(_g, _virgl, _simple, ...) \
79 do { \
80 if (_g->use_virgl_renderer) { \
81 _virgl(__VA_ARGS__); \
82 } else { \
83 _simple(__VA_ARGS__); \
84 } \
85 } while (0)
86#else
87#define VIRGL(_g, _virgl, _simple, ...) \
88 do { \
89 _simple(__VA_ARGS__); \
90 } while (0)
91#endif
92
93static void update_cursor_data_simple(VirtIOGPU *g,
94 struct virtio_gpu_scanout *s,
95 uint32_t resource_id)
96{
97 struct virtio_gpu_simple_resource *res;
98 uint32_t pixels;
99
100 res = virtio_gpu_find_resource(g, resource_id);
101 if (!res) {
102 return;
103 }
104
105 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
106 pixman_image_get_height(res->image) != s->current_cursor->height) {
107 return;
108 }
109
110 pixels = s->current_cursor->width * s->current_cursor->height;
111 memcpy(s->current_cursor->data,
112 pixman_image_get_data(res->image),
113 pixels * sizeof(uint32_t));
114}
115
116#ifdef CONFIG_VIRGL
117
118static void update_cursor_data_virgl(VirtIOGPU *g,
119 struct virtio_gpu_scanout *s,
120 uint32_t resource_id)
121{
122 uint32_t width, height;
123 uint32_t pixels, *data;
124
125 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
126 if (!data) {
127 return;
128 }
129
130 if (width != s->current_cursor->width ||
131 height != s->current_cursor->height) {
132 free(data);
133 return;
134 }
135
136 pixels = s->current_cursor->width * s->current_cursor->height;
137 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
138 free(data);
139}
140
141#endif
142
143static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
144{
145 struct virtio_gpu_scanout *s;
146 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
147
148 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
149 return;
150 }
151 s = &g->scanout[cursor->pos.scanout_id];
152
153 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
154 cursor->pos.x,
155 cursor->pos.y,
156 move ? "move" : "update",
157 cursor->resource_id);
158
159 if (!move) {
160 if (!s->current_cursor) {
161 s->current_cursor = cursor_alloc(64, 64);
162 }
163
164 s->current_cursor->hot_x = cursor->hot_x;
165 s->current_cursor->hot_y = cursor->hot_y;
166
167 if (cursor->resource_id > 0) {
168 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
169 g, s, cursor->resource_id);
170 }
171 dpy_cursor_define(s->con, s->current_cursor);
172
173 s->cursor = *cursor;
174 } else {
175 s->cursor.pos.x = cursor->pos.x;
176 s->cursor.pos.y = cursor->pos.y;
177 }
178 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
179 cursor->resource_id ? 1 : 0);
180}
181
182static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
183{
184 VirtIOGPU *g = VIRTIO_GPU(vdev);
185 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
186}
187
188static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
189{
190 VirtIOGPU *g = VIRTIO_GPU(vdev);
191 struct virtio_gpu_config vgconfig;
192
193 memcpy(&vgconfig, config, sizeof(g->virtio_config));
194
195 if (vgconfig.events_clear) {
196 g->virtio_config.events_read &= ~vgconfig.events_clear;
197 }
198}
199
200static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
201 Error **errp)
202{
203 VirtIOGPU *g = VIRTIO_GPU(vdev);
204
205 if (virtio_gpu_virgl_enabled(g->conf)) {
206 features |= (1 << VIRTIO_GPU_F_VIRGL);
207 }
208 return features;
209}
210
211static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
212{
213 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
214 VirtIOGPU *g = VIRTIO_GPU(vdev);
215
216 g->use_virgl_renderer = ((features & virgl) == virgl);
217 trace_virtio_gpu_features(g->use_virgl_renderer);
218}
219
220static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
221{
222 g->virtio_config.events_read |= event_type;
223 virtio_notify_config(&g->parent_obj);
224}
225
226static struct virtio_gpu_simple_resource *
227virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
228{
229 struct virtio_gpu_simple_resource *res;
230
231 QTAILQ_FOREACH(res, &g->reslist, next) {
232 if (res->resource_id == resource_id) {
233 return res;
234 }
235 }
236 return NULL;
237}
238
239void virtio_gpu_ctrl_response(VirtIOGPU *g,
240 struct virtio_gpu_ctrl_command *cmd,
241 struct virtio_gpu_ctrl_hdr *resp,
242 size_t resp_len)
243{
244 size_t s;
245
246 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
247 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
248 resp->fence_id = cmd->cmd_hdr.fence_id;
249 resp->ctx_id = cmd->cmd_hdr.ctx_id;
250 }
251 virtio_gpu_ctrl_hdr_bswap(resp);
252 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
253 if (s != resp_len) {
254 qemu_log_mask(LOG_GUEST_ERROR,
255 "%s: response size incorrect %zu vs %zu\n",
256 __func__, s, resp_len);
257 }
258 virtqueue_push(cmd->vq, &cmd->elem, s);
259 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
260 cmd->finished = true;
261}
262
263void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
264 struct virtio_gpu_ctrl_command *cmd,
265 enum virtio_gpu_ctrl_type type)
266{
267 struct virtio_gpu_ctrl_hdr resp;
268
269 memset(&resp, 0, sizeof(resp));
270 resp.type = type;
271 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
272}
273
274static void
275virtio_gpu_fill_display_info(VirtIOGPU *g,
276 struct virtio_gpu_resp_display_info *dpy_info)
277{
278 int i;
279
280 for (i = 0; i < g->conf.max_outputs; i++) {
281 if (g->enabled_output_bitmask & (1 << i)) {
282 dpy_info->pmodes[i].enabled = 1;
283 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
284 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
285 }
286 }
287}
288
289void virtio_gpu_get_display_info(VirtIOGPU *g,
290 struct virtio_gpu_ctrl_command *cmd)
291{
292 struct virtio_gpu_resp_display_info display_info;
293
294 trace_virtio_gpu_cmd_get_display_info();
295 memset(&display_info, 0, sizeof(display_info));
296 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
297 virtio_gpu_fill_display_info(g, &display_info);
298 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
299 sizeof(display_info));
300}
301
302static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
303{
304 switch (virtio_gpu_format) {
305 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
306 return PIXMAN_BE_b8g8r8x8;
307 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
308 return PIXMAN_BE_b8g8r8a8;
309 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
310 return PIXMAN_BE_x8r8g8b8;
311 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
312 return PIXMAN_BE_a8r8g8b8;
313 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
314 return PIXMAN_BE_r8g8b8x8;
315 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
316 return PIXMAN_BE_r8g8b8a8;
317 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
318 return PIXMAN_BE_x8b8g8r8;
319 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
320 return PIXMAN_BE_a8b8g8r8;
321 default:
322 return 0;
323 }
324}
325
326static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
327 uint32_t width, uint32_t height)
328{
329
330
331
332
333 int bpp = PIXMAN_FORMAT_BPP(pformat);
334 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
335 return height * stride;
336}
337
338static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
339 struct virtio_gpu_ctrl_command *cmd)
340{
341 pixman_format_code_t pformat;
342 struct virtio_gpu_simple_resource *res;
343 struct virtio_gpu_resource_create_2d c2d;
344
345 VIRTIO_GPU_FILL_CMD(c2d);
346 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
347 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
348 c2d.width, c2d.height);
349
350 if (c2d.resource_id == 0) {
351 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
352 __func__);
353 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
354 return;
355 }
356
357 res = virtio_gpu_find_resource(g, c2d.resource_id);
358 if (res) {
359 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
360 __func__, c2d.resource_id);
361 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
362 return;
363 }
364
365 res = g_new0(struct virtio_gpu_simple_resource, 1);
366
367 res->width = c2d.width;
368 res->height = c2d.height;
369 res->format = c2d.format;
370 res->resource_id = c2d.resource_id;
371
372 pformat = get_pixman_format(c2d.format);
373 if (!pformat) {
374 qemu_log_mask(LOG_GUEST_ERROR,
375 "%s: host couldn't handle guest format %d\n",
376 __func__, c2d.format);
377 g_free(res);
378 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
379 return;
380 }
381
382 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
383 if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
384 res->image = pixman_image_create_bits(pformat,
385 c2d.width,
386 c2d.height,
387 NULL, 0);
388 }
389
390 if (!res->image) {
391 qemu_log_mask(LOG_GUEST_ERROR,
392 "%s: resource creation failed %d %d %d\n",
393 __func__, c2d.resource_id, c2d.width, c2d.height);
394 g_free(res);
395 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
396 return;
397 }
398
399 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
400 g->hostmem += res->hostmem;
401}
402
403static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
404{
405 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
406 struct virtio_gpu_simple_resource *res;
407 DisplaySurface *ds = NULL;
408
409 if (scanout->resource_id == 0) {
410 return;
411 }
412
413 res = virtio_gpu_find_resource(g, scanout->resource_id);
414 if (res) {
415 res->scanout_bitmask &= ~(1 << scanout_id);
416 }
417
418 if (scanout_id == 0) {
419
420 ds = qemu_create_message_surface(scanout->width ?: 640,
421 scanout->height ?: 480,
422 "Guest disabled display.");
423 }
424
425 if (g->disable_scanout) {
426 g->disable_scanout(g, scanout_id);
427 }
428
429 dpy_gfx_replace_surface(scanout->con, ds);
430 scanout->resource_id = 0;
431 scanout->ds = NULL;
432 scanout->width = 0;
433 scanout->height = 0;
434}
435
436static void virtio_gpu_resource_destroy(VirtIOGPU *g,
437 struct virtio_gpu_simple_resource *res)
438{
439 int i;
440
441 if (res->scanout_bitmask) {
442 for (i = 0; i < g->conf.max_outputs; i++) {
443 if (res->scanout_bitmask & (1 << i)) {
444 virtio_gpu_disable_scanout(g, i);
445 }
446 }
447 }
448
449 pixman_image_unref(res->image);
450 virtio_gpu_cleanup_mapping(res);
451 QTAILQ_REMOVE(&g->reslist, res, next);
452 g->hostmem -= res->hostmem;
453 g_free(res);
454}
455
456static void virtio_gpu_resource_unref(VirtIOGPU *g,
457 struct virtio_gpu_ctrl_command *cmd)
458{
459 struct virtio_gpu_simple_resource *res;
460 struct virtio_gpu_resource_unref unref;
461
462 VIRTIO_GPU_FILL_CMD(unref);
463 virtio_gpu_bswap_32(&unref, sizeof(unref));
464 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
465
466 res = virtio_gpu_find_resource(g, unref.resource_id);
467 if (!res) {
468 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
469 __func__, unref.resource_id);
470 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
471 return;
472 }
473 virtio_gpu_resource_destroy(g, res);
474}
475
476static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
477 struct virtio_gpu_ctrl_command *cmd)
478{
479 struct virtio_gpu_simple_resource *res;
480 int h;
481 uint32_t src_offset, dst_offset, stride;
482 int bpp;
483 pixman_format_code_t format;
484 struct virtio_gpu_transfer_to_host_2d t2d;
485
486 VIRTIO_GPU_FILL_CMD(t2d);
487 virtio_gpu_t2d_bswap(&t2d);
488 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
489
490 res = virtio_gpu_find_resource(g, t2d.resource_id);
491 if (!res || !res->iov) {
492 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
493 __func__, t2d.resource_id);
494 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
495 return;
496 }
497
498 if (t2d.r.x > res->width ||
499 t2d.r.y > res->height ||
500 t2d.r.width > res->width ||
501 t2d.r.height > res->height ||
502 t2d.r.x + t2d.r.width > res->width ||
503 t2d.r.y + t2d.r.height > res->height) {
504 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
505 " bounds for resource %d: %d %d %d %d vs %d %d\n",
506 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
507 t2d.r.width, t2d.r.height, res->width, res->height);
508 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
509 return;
510 }
511
512 format = pixman_image_get_format(res->image);
513 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
514 stride = pixman_image_get_stride(res->image);
515
516 if (t2d.offset || t2d.r.x || t2d.r.y ||
517 t2d.r.width != pixman_image_get_width(res->image)) {
518 void *img_data = pixman_image_get_data(res->image);
519 for (h = 0; h < t2d.r.height; h++) {
520 src_offset = t2d.offset + stride * h;
521 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
522
523 iov_to_buf(res->iov, res->iov_cnt, src_offset,
524 (uint8_t *)img_data
525 + dst_offset, t2d.r.width * bpp);
526 }
527 } else {
528 iov_to_buf(res->iov, res->iov_cnt, 0,
529 pixman_image_get_data(res->image),
530 pixman_image_get_stride(res->image)
531 * pixman_image_get_height(res->image));
532 }
533}
534
535static void virtio_gpu_resource_flush(VirtIOGPU *g,
536 struct virtio_gpu_ctrl_command *cmd)
537{
538 struct virtio_gpu_simple_resource *res;
539 struct virtio_gpu_resource_flush rf;
540 pixman_region16_t flush_region;
541 int i;
542
543 VIRTIO_GPU_FILL_CMD(rf);
544 virtio_gpu_bswap_32(&rf, sizeof(rf));
545 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
546 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
547
548 res = virtio_gpu_find_resource(g, rf.resource_id);
549 if (!res) {
550 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
551 __func__, rf.resource_id);
552 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
553 return;
554 }
555
556 if (rf.r.x > res->width ||
557 rf.r.y > res->height ||
558 rf.r.width > res->width ||
559 rf.r.height > res->height ||
560 rf.r.x + rf.r.width > res->width ||
561 rf.r.y + rf.r.height > res->height) {
562 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
563 " bounds for resource %d: %d %d %d %d vs %d %d\n",
564 __func__, rf.resource_id, rf.r.x, rf.r.y,
565 rf.r.width, rf.r.height, res->width, res->height);
566 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
567 return;
568 }
569
570 pixman_region_init_rect(&flush_region,
571 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
572 for (i = 0; i < g->conf.max_outputs; i++) {
573 struct virtio_gpu_scanout *scanout;
574 pixman_region16_t region, finalregion;
575 pixman_box16_t *extents;
576
577 if (!(res->scanout_bitmask & (1 << i))) {
578 continue;
579 }
580 scanout = &g->scanout[i];
581
582 pixman_region_init(&finalregion);
583 pixman_region_init_rect(®ion, scanout->x, scanout->y,
584 scanout->width, scanout->height);
585
586 pixman_region_intersect(&finalregion, &flush_region, ®ion);
587 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
588 extents = pixman_region_extents(&finalregion);
589
590 dpy_gfx_update(g->scanout[i].con,
591 extents->x1, extents->y1,
592 extents->x2 - extents->x1,
593 extents->y2 - extents->y1);
594
595 pixman_region_fini(®ion);
596 pixman_region_fini(&finalregion);
597 }
598 pixman_region_fini(&flush_region);
599}
600
601static void virtio_unref_resource(pixman_image_t *image, void *data)
602{
603 pixman_image_unref(data);
604}
605
606static void virtio_gpu_set_scanout(VirtIOGPU *g,
607 struct virtio_gpu_ctrl_command *cmd)
608{
609 struct virtio_gpu_simple_resource *res, *ores;
610 struct virtio_gpu_scanout *scanout;
611 pixman_format_code_t format;
612 uint32_t offset;
613 int bpp;
614 struct virtio_gpu_set_scanout ss;
615
616 VIRTIO_GPU_FILL_CMD(ss);
617 virtio_gpu_bswap_32(&ss, sizeof(ss));
618 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
619 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
620
621 if (ss.scanout_id >= g->conf.max_outputs) {
622 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
623 __func__, ss.scanout_id);
624 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
625 return;
626 }
627
628 g->enable = 1;
629 if (ss.resource_id == 0) {
630 virtio_gpu_disable_scanout(g, ss.scanout_id);
631 return;
632 }
633
634
635 res = virtio_gpu_find_resource(g, ss.resource_id);
636 if (!res) {
637 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
638 __func__, ss.resource_id);
639 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
640 return;
641 }
642
643 if (ss.r.x > res->width ||
644 ss.r.y > res->height ||
645 ss.r.width > res->width ||
646 ss.r.height > res->height ||
647 ss.r.x + ss.r.width > res->width ||
648 ss.r.y + ss.r.height > res->height) {
649 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
650 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
651 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
652 ss.r.width, ss.r.height, res->width, res->height);
653 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
654 return;
655 }
656
657 scanout = &g->scanout[ss.scanout_id];
658
659 format = pixman_image_get_format(res->image);
660 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
661 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
662 if (!scanout->ds || surface_data(scanout->ds)
663 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
664 scanout->width != ss.r.width ||
665 scanout->height != ss.r.height) {
666 pixman_image_t *rect;
667 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
668 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
669 pixman_image_get_stride(res->image));
670 pixman_image_ref(res->image);
671 pixman_image_set_destroy_function(rect, virtio_unref_resource,
672 res->image);
673
674 scanout->ds = qemu_create_displaysurface_pixman(rect);
675 if (!scanout->ds) {
676 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
677 return;
678 }
679 pixman_image_unref(rect);
680 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
681 }
682
683 ores = virtio_gpu_find_resource(g, scanout->resource_id);
684 if (ores) {
685 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
686 }
687
688 res->scanout_bitmask |= (1 << ss.scanout_id);
689 scanout->resource_id = ss.resource_id;
690 scanout->x = ss.r.x;
691 scanout->y = ss.r.y;
692 scanout->width = ss.r.width;
693 scanout->height = ss.r.height;
694}
695
696int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
697 struct virtio_gpu_ctrl_command *cmd,
698 uint64_t **addr, struct iovec **iov)
699{
700 struct virtio_gpu_mem_entry *ents;
701 size_t esize, s;
702 int i;
703
704 if (ab->nr_entries > 16384) {
705 qemu_log_mask(LOG_GUEST_ERROR,
706 "%s: nr_entries is too big (%d > 16384)\n",
707 __func__, ab->nr_entries);
708 return -1;
709 }
710
711 esize = sizeof(*ents) * ab->nr_entries;
712 ents = g_malloc(esize);
713 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
714 sizeof(*ab), ents, esize);
715 if (s != esize) {
716 qemu_log_mask(LOG_GUEST_ERROR,
717 "%s: command data size incorrect %zu vs %zu\n",
718 __func__, s, esize);
719 g_free(ents);
720 return -1;
721 }
722
723 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
724 if (addr) {
725 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
726 }
727 for (i = 0; i < ab->nr_entries; i++) {
728 uint64_t a = le64_to_cpu(ents[i].addr);
729 uint32_t l = le32_to_cpu(ents[i].length);
730 hwaddr len = l;
731 (*iov)[i].iov_len = l;
732 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1);
733 if (addr) {
734 (*addr)[i] = a;
735 }
736 if (!(*iov)[i].iov_base || len != l) {
737 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
738 " resource %d element %d\n",
739 __func__, ab->resource_id, i);
740 virtio_gpu_cleanup_mapping_iov(*iov, i);
741 g_free(ents);
742 *iov = NULL;
743 if (addr) {
744 g_free(*addr);
745 *addr = NULL;
746 }
747 return -1;
748 }
749 }
750 g_free(ents);
751 return 0;
752}
753
754void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
755{
756 int i;
757
758 for (i = 0; i < count; i++) {
759 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
760 iov[i].iov_len);
761 }
762 g_free(iov);
763}
764
765static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
766{
767 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
768 res->iov = NULL;
769 res->iov_cnt = 0;
770 g_free(res->addrs);
771 res->addrs = NULL;
772}
773
774static void
775virtio_gpu_resource_attach_backing(VirtIOGPU *g,
776 struct virtio_gpu_ctrl_command *cmd)
777{
778 struct virtio_gpu_simple_resource *res;
779 struct virtio_gpu_resource_attach_backing ab;
780 int ret;
781
782 VIRTIO_GPU_FILL_CMD(ab);
783 virtio_gpu_bswap_32(&ab, sizeof(ab));
784 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
785
786 res = virtio_gpu_find_resource(g, ab.resource_id);
787 if (!res) {
788 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
789 __func__, ab.resource_id);
790 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
791 return;
792 }
793
794 if (res->iov) {
795 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
796 return;
797 }
798
799 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
800 if (ret != 0) {
801 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
802 return;
803 }
804
805 res->iov_cnt = ab.nr_entries;
806}
807
808static void
809virtio_gpu_resource_detach_backing(VirtIOGPU *g,
810 struct virtio_gpu_ctrl_command *cmd)
811{
812 struct virtio_gpu_simple_resource *res;
813 struct virtio_gpu_resource_detach_backing detach;
814
815 VIRTIO_GPU_FILL_CMD(detach);
816 virtio_gpu_bswap_32(&detach, sizeof(detach));
817 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
818
819 res = virtio_gpu_find_resource(g, detach.resource_id);
820 if (!res || !res->iov) {
821 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
822 __func__, detach.resource_id);
823 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
824 return;
825 }
826 virtio_gpu_cleanup_mapping(res);
827}
828
829static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
830 struct virtio_gpu_ctrl_command *cmd)
831{
832 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
833 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
834
835 switch (cmd->cmd_hdr.type) {
836 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
837 virtio_gpu_get_display_info(g, cmd);
838 break;
839 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
840 virtio_gpu_resource_create_2d(g, cmd);
841 break;
842 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
843 virtio_gpu_resource_unref(g, cmd);
844 break;
845 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
846 virtio_gpu_resource_flush(g, cmd);
847 break;
848 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
849 virtio_gpu_transfer_to_host_2d(g, cmd);
850 break;
851 case VIRTIO_GPU_CMD_SET_SCANOUT:
852 virtio_gpu_set_scanout(g, cmd);
853 break;
854 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
855 virtio_gpu_resource_attach_backing(g, cmd);
856 break;
857 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
858 virtio_gpu_resource_detach_backing(g, cmd);
859 break;
860 default:
861 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
862 break;
863 }
864 if (!cmd->finished) {
865 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
866 VIRTIO_GPU_RESP_OK_NODATA);
867 }
868}
869
870static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
871{
872 VirtIOGPU *g = VIRTIO_GPU(vdev);
873 qemu_bh_schedule(g->ctrl_bh);
874}
875
876static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
877{
878 VirtIOGPU *g = VIRTIO_GPU(vdev);
879 qemu_bh_schedule(g->cursor_bh);
880}
881
882void virtio_gpu_process_cmdq(VirtIOGPU *g)
883{
884 struct virtio_gpu_ctrl_command *cmd;
885
886 while (!QTAILQ_EMPTY(&g->cmdq)) {
887 cmd = QTAILQ_FIRST(&g->cmdq);
888
889
890 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
891 g, cmd);
892 if (cmd->waiting) {
893 break;
894 }
895 QTAILQ_REMOVE(&g->cmdq, cmd, next);
896 if (virtio_gpu_stats_enabled(g->conf)) {
897 g->stats.requests++;
898 }
899
900 if (!cmd->finished) {
901 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
902 g->inflight++;
903 if (virtio_gpu_stats_enabled(g->conf)) {
904 if (g->stats.max_inflight < g->inflight) {
905 g->stats.max_inflight = g->inflight;
906 }
907 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
908 }
909 } else {
910 g_free(cmd);
911 }
912 }
913}
914
915static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
916{
917 VirtIOGPU *g = VIRTIO_GPU(vdev);
918 struct virtio_gpu_ctrl_command *cmd;
919
920 if (!virtio_queue_ready(vq)) {
921 return;
922 }
923
924#ifdef CONFIG_VIRGL
925 if (!g->renderer_inited && g->use_virgl_renderer) {
926 virtio_gpu_virgl_init(g);
927 g->renderer_inited = true;
928 }
929#endif
930
931 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
932 while (cmd) {
933 cmd->vq = vq;
934 cmd->error = 0;
935 cmd->finished = false;
936 cmd->waiting = false;
937 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
938 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
939 }
940
941 virtio_gpu_process_cmdq(g);
942
943#ifdef CONFIG_VIRGL
944 if (g->use_virgl_renderer) {
945 virtio_gpu_virgl_fence_poll(g);
946 }
947#endif
948}
949
950static void virtio_gpu_ctrl_bh(void *opaque)
951{
952 VirtIOGPU *g = opaque;
953 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
954}
955
956static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
957{
958 VirtIOGPU *g = VIRTIO_GPU(vdev);
959 VirtQueueElement *elem;
960 size_t s;
961 struct virtio_gpu_update_cursor cursor_info;
962
963 if (!virtio_queue_ready(vq)) {
964 return;
965 }
966 for (;;) {
967 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
968 if (!elem) {
969 break;
970 }
971
972 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
973 &cursor_info, sizeof(cursor_info));
974 if (s != sizeof(cursor_info)) {
975 qemu_log_mask(LOG_GUEST_ERROR,
976 "%s: cursor size incorrect %zu vs %zu\n",
977 __func__, s, sizeof(cursor_info));
978 } else {
979 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
980 update_cursor(g, &cursor_info);
981 }
982 virtqueue_push(vq, elem, 0);
983 virtio_notify(vdev, vq);
984 g_free(elem);
985 }
986}
987
988static void virtio_gpu_cursor_bh(void *opaque)
989{
990 VirtIOGPU *g = opaque;
991 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
992}
993
994static void virtio_gpu_invalidate_display(void *opaque)
995{
996}
997
998static void virtio_gpu_update_display(void *opaque)
999{
1000}
1001
1002static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
1003{
1004}
1005
1006static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
1007{
1008 VirtIOGPU *g = opaque;
1009
1010 if (idx >= g->conf.max_outputs) {
1011 return -1;
1012 }
1013
1014 g->req_state[idx].x = info->xoff;
1015 g->req_state[idx].y = info->yoff;
1016 g->req_state[idx].width = info->width;
1017 g->req_state[idx].height = info->height;
1018
1019 if (info->width && info->height) {
1020 g->enabled_output_bitmask |= (1 << idx);
1021 } else {
1022 g->enabled_output_bitmask &= ~(1 << idx);
1023 }
1024
1025
1026 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
1027 return 0;
1028}
1029
1030const GraphicHwOps virtio_gpu_ops = {
1031 .invalidate = virtio_gpu_invalidate_display,
1032 .gfx_update = virtio_gpu_update_display,
1033 .text_update = virtio_gpu_text_update,
1034 .ui_info = virtio_gpu_ui_info,
1035#ifdef CONFIG_VIRGL
1036 .gl_block = virtio_gpu_gl_block,
1037#endif
1038};
1039
1040static const VMStateDescription vmstate_virtio_gpu_scanout = {
1041 .name = "virtio-gpu-one-scanout",
1042 .version_id = 1,
1043 .fields = (VMStateField[]) {
1044 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1045 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1046 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1047 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1048 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1049 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1050 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1051 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1052 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1053 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1054 VMSTATE_END_OF_LIST()
1055 },
1056};
1057
1058static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1059 .name = "virtio-gpu-scanouts",
1060 .version_id = 1,
1061 .fields = (VMStateField[]) {
1062 VMSTATE_INT32(enable, struct VirtIOGPU),
1063 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
1064 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
1065 conf.max_outputs, 1,
1066 vmstate_virtio_gpu_scanout,
1067 struct virtio_gpu_scanout),
1068 VMSTATE_END_OF_LIST()
1069 },
1070};
1071
1072static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1073 VMStateField *field, QJSON *vmdesc)
1074{
1075 VirtIOGPU *g = opaque;
1076 struct virtio_gpu_simple_resource *res;
1077 int i;
1078
1079
1080 assert(QTAILQ_EMPTY(&g->cmdq));
1081
1082 QTAILQ_FOREACH(res, &g->reslist, next) {
1083 qemu_put_be32(f, res->resource_id);
1084 qemu_put_be32(f, res->width);
1085 qemu_put_be32(f, res->height);
1086 qemu_put_be32(f, res->format);
1087 qemu_put_be32(f, res->iov_cnt);
1088 for (i = 0; i < res->iov_cnt; i++) {
1089 qemu_put_be64(f, res->addrs[i]);
1090 qemu_put_be32(f, res->iov[i].iov_len);
1091 }
1092 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1093 pixman_image_get_stride(res->image) * res->height);
1094 }
1095 qemu_put_be32(f, 0);
1096
1097 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1098}
1099
1100static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1101 VMStateField *field)
1102{
1103 VirtIOGPU *g = opaque;
1104 struct virtio_gpu_simple_resource *res;
1105 struct virtio_gpu_scanout *scanout;
1106 uint32_t resource_id, pformat;
1107 int i;
1108
1109 g->hostmem = 0;
1110
1111 resource_id = qemu_get_be32(f);
1112 while (resource_id != 0) {
1113 res = g_new0(struct virtio_gpu_simple_resource, 1);
1114 res->resource_id = resource_id;
1115 res->width = qemu_get_be32(f);
1116 res->height = qemu_get_be32(f);
1117 res->format = qemu_get_be32(f);
1118 res->iov_cnt = qemu_get_be32(f);
1119
1120
1121 pformat = get_pixman_format(res->format);
1122 if (!pformat) {
1123 g_free(res);
1124 return -EINVAL;
1125 }
1126 res->image = pixman_image_create_bits(pformat,
1127 res->width, res->height,
1128 NULL, 0);
1129 if (!res->image) {
1130 g_free(res);
1131 return -EINVAL;
1132 }
1133
1134 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1135
1136 res->addrs = g_new(uint64_t, res->iov_cnt);
1137 res->iov = g_new(struct iovec, res->iov_cnt);
1138
1139
1140 for (i = 0; i < res->iov_cnt; i++) {
1141 res->addrs[i] = qemu_get_be64(f);
1142 res->iov[i].iov_len = qemu_get_be32(f);
1143 }
1144 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1145 pixman_image_get_stride(res->image) * res->height);
1146
1147
1148 for (i = 0; i < res->iov_cnt; i++) {
1149 hwaddr len = res->iov[i].iov_len;
1150 res->iov[i].iov_base =
1151 cpu_physical_memory_map(res->addrs[i], &len, 1);
1152 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1153
1154 if (res->iov[i].iov_base) {
1155 cpu_physical_memory_unmap(res->iov[i].iov_base,
1156 len, 0, 0);
1157 }
1158
1159 res->iov_cnt = i;
1160 virtio_gpu_cleanup_mapping(res);
1161 pixman_image_unref(res->image);
1162 g_free(res);
1163 return -EINVAL;
1164 }
1165 }
1166
1167 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1168 g->hostmem += res->hostmem;
1169
1170 resource_id = qemu_get_be32(f);
1171 }
1172
1173
1174 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1175 for (i = 0; i < g->conf.max_outputs; i++) {
1176 scanout = &g->scanout[i];
1177 if (!scanout->resource_id) {
1178 continue;
1179 }
1180 res = virtio_gpu_find_resource(g, scanout->resource_id);
1181 if (!res) {
1182 return -EINVAL;
1183 }
1184 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1185 if (!scanout->ds) {
1186 return -EINVAL;
1187 }
1188
1189 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1190 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
1191 if (scanout->cursor.resource_id) {
1192 update_cursor(g, &scanout->cursor);
1193 }
1194 res->scanout_bitmask |= (1 << i);
1195 }
1196
1197 return 0;
1198}
1199
1200static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1201{
1202 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1203 VirtIOGPU *g = VIRTIO_GPU(qdev);
1204 bool have_virgl;
1205 Error *local_err = NULL;
1206 int i;
1207
1208 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1209 error_setg(errp, "virtio-gpu does not support vIOMMU yet");
1210 return;
1211 }
1212
1213 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1214 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1215 return;
1216 }
1217
1218 g->use_virgl_renderer = false;
1219#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1220 have_virgl = false;
1221#else
1222 have_virgl = display_opengl;
1223#endif
1224 if (!have_virgl) {
1225 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1226 }
1227
1228 if (virtio_gpu_virgl_enabled(g->conf)) {
1229 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1230 migrate_add_blocker(g->migration_blocker, &local_err);
1231 if (local_err) {
1232 error_propagate(errp, local_err);
1233 error_free(g->migration_blocker);
1234 return;
1235 }
1236 }
1237
1238 g->config_size = sizeof(struct virtio_gpu_config);
1239 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
1240 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1241 g->config_size);
1242
1243 g->req_state[0].width = g->conf.xres;
1244 g->req_state[0].height = g->conf.yres;
1245
1246 if (virtio_gpu_virgl_enabled(g->conf)) {
1247
1248 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1249 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1250
1251#if defined(CONFIG_VIRGL)
1252 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
1253#else
1254 g->virtio_config.num_capsets = 0;
1255#endif
1256 } else {
1257 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1258 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1259 }
1260
1261 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1262 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1263 QTAILQ_INIT(&g->reslist);
1264 QTAILQ_INIT(&g->cmdq);
1265 QTAILQ_INIT(&g->fenceq);
1266
1267 g->enabled_output_bitmask = 1;
1268 g->qdev = qdev;
1269
1270 for (i = 0; i < g->conf.max_outputs; i++) {
1271 g->scanout[i].con =
1272 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1273 if (i > 0) {
1274 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1275 }
1276 }
1277}
1278
1279static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1280{
1281 VirtIOGPU *g = VIRTIO_GPU(qdev);
1282 if (g->migration_blocker) {
1283 migrate_del_blocker(g->migration_blocker);
1284 error_free(g->migration_blocker);
1285 }
1286}
1287
1288static void virtio_gpu_instance_init(Object *obj)
1289{
1290}
1291
1292static void virtio_gpu_reset(VirtIODevice *vdev)
1293{
1294 VirtIOGPU *g = VIRTIO_GPU(vdev);
1295 struct virtio_gpu_simple_resource *res, *tmp;
1296 int i;
1297
1298 g->enable = 0;
1299
1300 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1301 virtio_gpu_resource_destroy(g, res);
1302 }
1303 for (i = 0; i < g->conf.max_outputs; i++) {
1304 g->scanout[i].resource_id = 0;
1305 g->scanout[i].width = 0;
1306 g->scanout[i].height = 0;
1307 g->scanout[i].x = 0;
1308 g->scanout[i].y = 0;
1309 g->scanout[i].ds = NULL;
1310 }
1311
1312#ifdef CONFIG_VIRGL
1313 if (g->use_virgl_renderer) {
1314 virtio_gpu_virgl_reset(g);
1315 g->use_virgl_renderer = 0;
1316 }
1317#endif
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static const VMStateDescription vmstate_virtio_gpu = {
1329 .name = "virtio-gpu",
1330 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1331 .version_id = VIRTIO_GPU_VM_VERSION,
1332 .fields = (VMStateField[]) {
1333 VMSTATE_VIRTIO_DEVICE ,
1334 {
1335 .name = "virtio-gpu",
1336 .info = &(const VMStateInfo) {
1337 .name = "virtio-gpu",
1338 .get = virtio_gpu_load,
1339 .put = virtio_gpu_save,
1340 },
1341 .flags = VMS_SINGLE,
1342 } ,
1343 VMSTATE_END_OF_LIST()
1344 },
1345};
1346
1347static Property virtio_gpu_properties[] = {
1348 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1349 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
1350#ifdef CONFIG_VIRGL
1351 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1352 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1353 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1354 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1355#endif
1356 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
1357 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
1358 DEFINE_PROP_END_OF_LIST(),
1359};
1360
1361static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1362{
1363 DeviceClass *dc = DEVICE_CLASS(klass);
1364 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1365
1366 vdc->realize = virtio_gpu_device_realize;
1367 vdc->unrealize = virtio_gpu_device_unrealize;
1368 vdc->get_config = virtio_gpu_get_config;
1369 vdc->set_config = virtio_gpu_set_config;
1370 vdc->get_features = virtio_gpu_get_features;
1371 vdc->set_features = virtio_gpu_set_features;
1372
1373 vdc->reset = virtio_gpu_reset;
1374
1375 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
1376 dc->props = virtio_gpu_properties;
1377 dc->vmsd = &vmstate_virtio_gpu;
1378 dc->hotpluggable = false;
1379}
1380
1381static const TypeInfo virtio_gpu_info = {
1382 .name = TYPE_VIRTIO_GPU,
1383 .parent = TYPE_VIRTIO_DEVICE,
1384 .instance_size = sizeof(VirtIOGPU),
1385 .instance_init = virtio_gpu_instance_init,
1386 .class_init = virtio_gpu_class_init,
1387};
1388
1389static void virtio_register_types(void)
1390{
1391 type_register_static(&virtio_gpu_info);
1392}
1393
1394type_init(virtio_register_types)
1395
1396QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1397QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1398QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1399QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1400QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1401QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1402QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1403QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1404QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1405QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1406QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
1407
1408QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1409QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1410QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1411QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1412QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1413QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1414QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1415QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1416QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1417QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
1418