1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "qemu/osdep.h"
15#include "qemu-common.h"
16#include "qemu/iov.h"
17#include "ui/console.h"
18#include "trace.h"
19#include "hw/virtio/virtio.h"
20#include "hw/virtio/virtio-gpu.h"
21#include "hw/virtio/virtio-bus.h"
22#include "migration/migration.h"
23#include "qemu/log.h"
24#include "qapi/error.h"
25
26#define VIRTIO_GPU_VM_VERSION 1
27
28static struct virtio_gpu_simple_resource*
29virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
30
31#ifdef CONFIG_VIRGL
32#include <virglrenderer.h>
33#define VIRGL(_g, _virgl, _simple, ...) \
34 do { \
35 if (_g->use_virgl_renderer) { \
36 _virgl(__VA_ARGS__); \
37 } else { \
38 _simple(__VA_ARGS__); \
39 } \
40 } while (0)
41#else
42#define VIRGL(_g, _virgl, _simple, ...) \
43 do { \
44 _simple(__VA_ARGS__); \
45 } while (0)
46#endif
47
48static void update_cursor_data_simple(VirtIOGPU *g,
49 struct virtio_gpu_scanout *s,
50 uint32_t resource_id)
51{
52 struct virtio_gpu_simple_resource *res;
53 uint32_t pixels;
54
55 res = virtio_gpu_find_resource(g, resource_id);
56 if (!res) {
57 return;
58 }
59
60 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
61 pixman_image_get_height(res->image) != s->current_cursor->height) {
62 return;
63 }
64
65 pixels = s->current_cursor->width * s->current_cursor->height;
66 memcpy(s->current_cursor->data,
67 pixman_image_get_data(res->image),
68 pixels * sizeof(uint32_t));
69}
70
71#ifdef CONFIG_VIRGL
72
73static void update_cursor_data_virgl(VirtIOGPU *g,
74 struct virtio_gpu_scanout *s,
75 uint32_t resource_id)
76{
77 uint32_t width, height;
78 uint32_t pixels, *data;
79
80 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
81 if (!data) {
82 return;
83 }
84
85 if (width != s->current_cursor->width ||
86 height != s->current_cursor->height) {
87 free(data);
88 return;
89 }
90
91 pixels = s->current_cursor->width * s->current_cursor->height;
92 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
93 free(data);
94}
95
96#endif
97
98static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
99{
100 struct virtio_gpu_scanout *s;
101 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
102
103 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
104 return;
105 }
106 s = &g->scanout[cursor->pos.scanout_id];
107
108 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
109 cursor->pos.x,
110 cursor->pos.y,
111 move ? "move" : "update",
112 cursor->resource_id);
113
114 if (!move) {
115 if (!s->current_cursor) {
116 s->current_cursor = cursor_alloc(64, 64);
117 }
118
119 s->current_cursor->hot_x = cursor->hot_x;
120 s->current_cursor->hot_y = cursor->hot_y;
121
122 if (cursor->resource_id > 0) {
123 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
124 g, s, cursor->resource_id);
125 }
126 dpy_cursor_define(s->con, s->current_cursor);
127
128 s->cursor = *cursor;
129 } else {
130 s->cursor.pos.x = cursor->pos.x;
131 s->cursor.pos.y = cursor->pos.y;
132 }
133 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
134 cursor->resource_id ? 1 : 0);
135}
136
137static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
138{
139 VirtIOGPU *g = VIRTIO_GPU(vdev);
140 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
141}
142
143static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
144{
145 VirtIOGPU *g = VIRTIO_GPU(vdev);
146 struct virtio_gpu_config vgconfig;
147
148 memcpy(&vgconfig, config, sizeof(g->virtio_config));
149
150 if (vgconfig.events_clear) {
151 g->virtio_config.events_read &= ~vgconfig.events_clear;
152 }
153}
154
155static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
156 Error **errp)
157{
158 VirtIOGPU *g = VIRTIO_GPU(vdev);
159
160 if (virtio_gpu_virgl_enabled(g->conf)) {
161 features |= (1 << VIRTIO_GPU_F_VIRGL);
162 }
163 return features;
164}
165
166static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
167{
168 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
169 VirtIOGPU *g = VIRTIO_GPU(vdev);
170
171 g->use_virgl_renderer = ((features & virgl) == virgl);
172 trace_virtio_gpu_features(g->use_virgl_renderer);
173}
174
175static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
176{
177 g->virtio_config.events_read |= event_type;
178 virtio_notify_config(&g->parent_obj);
179}
180
181static struct virtio_gpu_simple_resource *
182virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
183{
184 struct virtio_gpu_simple_resource *res;
185
186 QTAILQ_FOREACH(res, &g->reslist, next) {
187 if (res->resource_id == resource_id) {
188 return res;
189 }
190 }
191 return NULL;
192}
193
194void virtio_gpu_ctrl_response(VirtIOGPU *g,
195 struct virtio_gpu_ctrl_command *cmd,
196 struct virtio_gpu_ctrl_hdr *resp,
197 size_t resp_len)
198{
199 size_t s;
200
201 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
202 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
203 resp->fence_id = cmd->cmd_hdr.fence_id;
204 resp->ctx_id = cmd->cmd_hdr.ctx_id;
205 }
206 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
207 if (s != resp_len) {
208 qemu_log_mask(LOG_GUEST_ERROR,
209 "%s: response size incorrect %zu vs %zu\n",
210 __func__, s, resp_len);
211 }
212 virtqueue_push(cmd->vq, &cmd->elem, s);
213 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
214 cmd->finished = true;
215}
216
217void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
218 struct virtio_gpu_ctrl_command *cmd,
219 enum virtio_gpu_ctrl_type type)
220{
221 struct virtio_gpu_ctrl_hdr resp;
222
223 memset(&resp, 0, sizeof(resp));
224 resp.type = type;
225 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
226}
227
228static void
229virtio_gpu_fill_display_info(VirtIOGPU *g,
230 struct virtio_gpu_resp_display_info *dpy_info)
231{
232 int i;
233
234 for (i = 0; i < g->conf.max_outputs; i++) {
235 if (g->enabled_output_bitmask & (1 << i)) {
236 dpy_info->pmodes[i].enabled = 1;
237 dpy_info->pmodes[i].r.width = g->req_state[i].width;
238 dpy_info->pmodes[i].r.height = g->req_state[i].height;
239 }
240 }
241}
242
243void virtio_gpu_get_display_info(VirtIOGPU *g,
244 struct virtio_gpu_ctrl_command *cmd)
245{
246 struct virtio_gpu_resp_display_info display_info;
247
248 trace_virtio_gpu_cmd_get_display_info();
249 memset(&display_info, 0, sizeof(display_info));
250 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
251 virtio_gpu_fill_display_info(g, &display_info);
252 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
253 sizeof(display_info));
254}
255
256static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
257{
258 switch (virtio_gpu_format) {
259#ifdef HOST_WORDS_BIGENDIAN
260 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
261 return PIXMAN_b8g8r8x8;
262 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
263 return PIXMAN_b8g8r8a8;
264 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
265 return PIXMAN_x8r8g8b8;
266 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
267 return PIXMAN_a8r8g8b8;
268 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
269 return PIXMAN_r8g8b8x8;
270 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
271 return PIXMAN_r8g8b8a8;
272 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
273 return PIXMAN_x8b8g8r8;
274 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
275 return PIXMAN_a8b8g8r8;
276#else
277 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
278 return PIXMAN_x8r8g8b8;
279 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
280 return PIXMAN_a8r8g8b8;
281 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
282 return PIXMAN_b8g8r8x8;
283 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
284 return PIXMAN_b8g8r8a8;
285 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
286 return PIXMAN_x8b8g8r8;
287 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
288 return PIXMAN_a8b8g8r8;
289 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
290 return PIXMAN_r8g8b8x8;
291 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
292 return PIXMAN_r8g8b8a8;
293#endif
294 default:
295 return 0;
296 }
297}
298
299static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
300 struct virtio_gpu_ctrl_command *cmd)
301{
302 pixman_format_code_t pformat;
303 struct virtio_gpu_simple_resource *res;
304 struct virtio_gpu_resource_create_2d c2d;
305
306 VIRTIO_GPU_FILL_CMD(c2d);
307 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
308 c2d.width, c2d.height);
309
310 if (c2d.resource_id == 0) {
311 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
312 __func__);
313 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
314 return;
315 }
316
317 res = virtio_gpu_find_resource(g, c2d.resource_id);
318 if (res) {
319 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
320 __func__, c2d.resource_id);
321 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
322 return;
323 }
324
325 res = g_new0(struct virtio_gpu_simple_resource, 1);
326
327 res->width = c2d.width;
328 res->height = c2d.height;
329 res->format = c2d.format;
330 res->resource_id = c2d.resource_id;
331
332 pformat = get_pixman_format(c2d.format);
333 if (!pformat) {
334 qemu_log_mask(LOG_GUEST_ERROR,
335 "%s: host couldn't handle guest format %d\n",
336 __func__, c2d.format);
337 g_free(res);
338 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
339 return;
340 }
341 res->image = pixman_image_create_bits(pformat,
342 c2d.width,
343 c2d.height,
344 NULL, 0);
345
346 if (!res->image) {
347 qemu_log_mask(LOG_GUEST_ERROR,
348 "%s: resource creation failed %d %d %d\n",
349 __func__, c2d.resource_id, c2d.width, c2d.height);
350 g_free(res);
351 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
352 return;
353 }
354
355 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
356}
357
358static void virtio_gpu_resource_destroy(VirtIOGPU *g,
359 struct virtio_gpu_simple_resource *res)
360{
361 pixman_image_unref(res->image);
362 QTAILQ_REMOVE(&g->reslist, res, next);
363 g_free(res);
364}
365
366static void virtio_gpu_resource_unref(VirtIOGPU *g,
367 struct virtio_gpu_ctrl_command *cmd)
368{
369 struct virtio_gpu_simple_resource *res;
370 struct virtio_gpu_resource_unref unref;
371
372 VIRTIO_GPU_FILL_CMD(unref);
373 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
374
375 res = virtio_gpu_find_resource(g, unref.resource_id);
376 if (!res) {
377 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
378 __func__, unref.resource_id);
379 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
380 return;
381 }
382 virtio_gpu_resource_destroy(g, res);
383}
384
385static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
386 struct virtio_gpu_ctrl_command *cmd)
387{
388 struct virtio_gpu_simple_resource *res;
389 int h;
390 uint32_t src_offset, dst_offset, stride;
391 int bpp;
392 pixman_format_code_t format;
393 struct virtio_gpu_transfer_to_host_2d t2d;
394
395 VIRTIO_GPU_FILL_CMD(t2d);
396 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
397
398 res = virtio_gpu_find_resource(g, t2d.resource_id);
399 if (!res || !res->iov) {
400 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
401 __func__, t2d.resource_id);
402 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
403 return;
404 }
405
406 if (t2d.r.x > res->width ||
407 t2d.r.y > res->height ||
408 t2d.r.width > res->width ||
409 t2d.r.height > res->height ||
410 t2d.r.x + t2d.r.width > res->width ||
411 t2d.r.y + t2d.r.height > res->height) {
412 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
413 " bounds for resource %d: %d %d %d %d vs %d %d\n",
414 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
415 t2d.r.width, t2d.r.height, res->width, res->height);
416 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
417 return;
418 }
419
420 format = pixman_image_get_format(res->image);
421 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
422 stride = pixman_image_get_stride(res->image);
423
424 if (t2d.offset || t2d.r.x || t2d.r.y ||
425 t2d.r.width != pixman_image_get_width(res->image)) {
426 void *img_data = pixman_image_get_data(res->image);
427 for (h = 0; h < t2d.r.height; h++) {
428 src_offset = t2d.offset + stride * h;
429 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
430
431 iov_to_buf(res->iov, res->iov_cnt, src_offset,
432 (uint8_t *)img_data
433 + dst_offset, t2d.r.width * bpp);
434 }
435 } else {
436 iov_to_buf(res->iov, res->iov_cnt, 0,
437 pixman_image_get_data(res->image),
438 pixman_image_get_stride(res->image)
439 * pixman_image_get_height(res->image));
440 }
441}
442
443static void virtio_gpu_resource_flush(VirtIOGPU *g,
444 struct virtio_gpu_ctrl_command *cmd)
445{
446 struct virtio_gpu_simple_resource *res;
447 struct virtio_gpu_resource_flush rf;
448 pixman_region16_t flush_region;
449 int i;
450
451 VIRTIO_GPU_FILL_CMD(rf);
452 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
453 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
454
455 res = virtio_gpu_find_resource(g, rf.resource_id);
456 if (!res) {
457 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
458 __func__, rf.resource_id);
459 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
460 return;
461 }
462
463 if (rf.r.x > res->width ||
464 rf.r.y > res->height ||
465 rf.r.width > res->width ||
466 rf.r.height > res->height ||
467 rf.r.x + rf.r.width > res->width ||
468 rf.r.y + rf.r.height > res->height) {
469 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
470 " bounds for resource %d: %d %d %d %d vs %d %d\n",
471 __func__, rf.resource_id, rf.r.x, rf.r.y,
472 rf.r.width, rf.r.height, res->width, res->height);
473 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
474 return;
475 }
476
477 pixman_region_init_rect(&flush_region,
478 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
479 for (i = 0; i < g->conf.max_outputs; i++) {
480 struct virtio_gpu_scanout *scanout;
481 pixman_region16_t region, finalregion;
482 pixman_box16_t *extents;
483
484 if (!(res->scanout_bitmask & (1 << i))) {
485 continue;
486 }
487 scanout = &g->scanout[i];
488
489 pixman_region_init(&finalregion);
490 pixman_region_init_rect(®ion, scanout->x, scanout->y,
491 scanout->width, scanout->height);
492
493 pixman_region_intersect(&finalregion, &flush_region, ®ion);
494 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
495 extents = pixman_region_extents(&finalregion);
496
497 dpy_gfx_update(g->scanout[i].con,
498 extents->x1, extents->y1,
499 extents->x2 - extents->x1,
500 extents->y2 - extents->y1);
501
502 pixman_region_fini(®ion);
503 pixman_region_fini(&finalregion);
504 }
505 pixman_region_fini(&flush_region);
506}
507
508static void virtio_unref_resource(pixman_image_t *image, void *data)
509{
510 pixman_image_unref(data);
511}
512
513static void virtio_gpu_set_scanout(VirtIOGPU *g,
514 struct virtio_gpu_ctrl_command *cmd)
515{
516 struct virtio_gpu_simple_resource *res;
517 struct virtio_gpu_scanout *scanout;
518 pixman_format_code_t format;
519 uint32_t offset;
520 int bpp;
521 struct virtio_gpu_set_scanout ss;
522
523 VIRTIO_GPU_FILL_CMD(ss);
524 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
525 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
526
527 if (ss.scanout_id >= g->conf.max_outputs) {
528 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
529 __func__, ss.scanout_id);
530 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
531 return;
532 }
533
534 g->enable = 1;
535 if (ss.resource_id == 0) {
536 scanout = &g->scanout[ss.scanout_id];
537 if (scanout->resource_id) {
538 res = virtio_gpu_find_resource(g, scanout->resource_id);
539 if (res) {
540 res->scanout_bitmask &= ~(1 << ss.scanout_id);
541 }
542 }
543 if (ss.scanout_id == 0) {
544 qemu_log_mask(LOG_GUEST_ERROR,
545 "%s: illegal scanout id specified %d",
546 __func__, ss.scanout_id);
547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
548 return;
549 }
550 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
551 scanout->ds = NULL;
552 scanout->width = 0;
553 scanout->height = 0;
554 return;
555 }
556
557
558 res = virtio_gpu_find_resource(g, ss.resource_id);
559 if (!res) {
560 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
561 __func__, ss.resource_id);
562 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
563 return;
564 }
565
566 if (ss.r.x > res->width ||
567 ss.r.y > res->height ||
568 ss.r.width > res->width ||
569 ss.r.height > res->height ||
570 ss.r.x + ss.r.width > res->width ||
571 ss.r.y + ss.r.height > res->height) {
572 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
573 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
574 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
575 ss.r.width, ss.r.height, res->width, res->height);
576 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
577 return;
578 }
579
580 scanout = &g->scanout[ss.scanout_id];
581
582 format = pixman_image_get_format(res->image);
583 bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
584 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
585 if (!scanout->ds || surface_data(scanout->ds)
586 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
587 scanout->width != ss.r.width ||
588 scanout->height != ss.r.height) {
589 pixman_image_t *rect;
590 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
591 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
592 pixman_image_get_stride(res->image));
593 pixman_image_ref(res->image);
594 pixman_image_set_destroy_function(rect, virtio_unref_resource,
595 res->image);
596
597 scanout->ds = qemu_create_displaysurface_pixman(rect);
598 if (!scanout->ds) {
599 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
600 return;
601 }
602 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
603 }
604
605 res->scanout_bitmask |= (1 << ss.scanout_id);
606 scanout->resource_id = ss.resource_id;
607 scanout->x = ss.r.x;
608 scanout->y = ss.r.y;
609 scanout->width = ss.r.width;
610 scanout->height = ss.r.height;
611}
612
613int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
614 struct virtio_gpu_ctrl_command *cmd,
615 uint64_t **addr, struct iovec **iov)
616{
617 struct virtio_gpu_mem_entry *ents;
618 size_t esize, s;
619 int i;
620
621 if (ab->nr_entries > 16384) {
622 qemu_log_mask(LOG_GUEST_ERROR,
623 "%s: nr_entries is too big (%d > 16384)\n",
624 __func__, ab->nr_entries);
625 return -1;
626 }
627
628 esize = sizeof(*ents) * ab->nr_entries;
629 ents = g_malloc(esize);
630 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
631 sizeof(*ab), ents, esize);
632 if (s != esize) {
633 qemu_log_mask(LOG_GUEST_ERROR,
634 "%s: command data size incorrect %zu vs %zu\n",
635 __func__, s, esize);
636 g_free(ents);
637 return -1;
638 }
639
640 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
641 if (addr) {
642 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
643 }
644 for (i = 0; i < ab->nr_entries; i++) {
645 hwaddr len = ents[i].length;
646 (*iov)[i].iov_len = ents[i].length;
647 (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
648 if (addr) {
649 (*addr)[i] = ents[i].addr;
650 }
651 if (!(*iov)[i].iov_base || len != ents[i].length) {
652 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
653 " resource %d element %d\n",
654 __func__, ab->resource_id, i);
655 virtio_gpu_cleanup_mapping_iov(*iov, i);
656 g_free(ents);
657 *iov = NULL;
658 if (addr) {
659 g_free(*addr);
660 *addr = NULL;
661 }
662 return -1;
663 }
664 }
665 g_free(ents);
666 return 0;
667}
668
669void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
670{
671 int i;
672
673 for (i = 0; i < count; i++) {
674 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
675 iov[i].iov_len);
676 }
677 g_free(iov);
678}
679
680static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
681{
682 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
683 res->iov = NULL;
684 res->iov_cnt = 0;
685 g_free(res->addrs);
686 res->addrs = NULL;
687}
688
689static void
690virtio_gpu_resource_attach_backing(VirtIOGPU *g,
691 struct virtio_gpu_ctrl_command *cmd)
692{
693 struct virtio_gpu_simple_resource *res;
694 struct virtio_gpu_resource_attach_backing ab;
695 int ret;
696
697 VIRTIO_GPU_FILL_CMD(ab);
698 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
699
700 res = virtio_gpu_find_resource(g, ab.resource_id);
701 if (!res) {
702 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
703 __func__, ab.resource_id);
704 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
705 return;
706 }
707
708 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
709 if (ret != 0) {
710 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
711 return;
712 }
713
714 res->iov_cnt = ab.nr_entries;
715}
716
717static void
718virtio_gpu_resource_detach_backing(VirtIOGPU *g,
719 struct virtio_gpu_ctrl_command *cmd)
720{
721 struct virtio_gpu_simple_resource *res;
722 struct virtio_gpu_resource_detach_backing detach;
723
724 VIRTIO_GPU_FILL_CMD(detach);
725 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
726
727 res = virtio_gpu_find_resource(g, detach.resource_id);
728 if (!res || !res->iov) {
729 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
730 __func__, detach.resource_id);
731 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
732 return;
733 }
734 virtio_gpu_cleanup_mapping(res);
735}
736
737static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
738 struct virtio_gpu_ctrl_command *cmd)
739{
740 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
741
742 switch (cmd->cmd_hdr.type) {
743 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
744 virtio_gpu_get_display_info(g, cmd);
745 break;
746 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
747 virtio_gpu_resource_create_2d(g, cmd);
748 break;
749 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
750 virtio_gpu_resource_unref(g, cmd);
751 break;
752 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
753 virtio_gpu_resource_flush(g, cmd);
754 break;
755 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
756 virtio_gpu_transfer_to_host_2d(g, cmd);
757 break;
758 case VIRTIO_GPU_CMD_SET_SCANOUT:
759 virtio_gpu_set_scanout(g, cmd);
760 break;
761 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
762 virtio_gpu_resource_attach_backing(g, cmd);
763 break;
764 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
765 virtio_gpu_resource_detach_backing(g, cmd);
766 break;
767 default:
768 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
769 break;
770 }
771 if (!cmd->finished) {
772 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
773 VIRTIO_GPU_RESP_OK_NODATA);
774 }
775}
776
777static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
778{
779 VirtIOGPU *g = VIRTIO_GPU(vdev);
780 qemu_bh_schedule(g->ctrl_bh);
781}
782
783static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
784{
785 VirtIOGPU *g = VIRTIO_GPU(vdev);
786 qemu_bh_schedule(g->cursor_bh);
787}
788
789void virtio_gpu_process_cmdq(VirtIOGPU *g)
790{
791 struct virtio_gpu_ctrl_command *cmd;
792
793 while (!QTAILQ_EMPTY(&g->cmdq)) {
794 cmd = QTAILQ_FIRST(&g->cmdq);
795
796
797 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
798 g, cmd);
799 if (cmd->waiting) {
800 break;
801 }
802 QTAILQ_REMOVE(&g->cmdq, cmd, next);
803 if (virtio_gpu_stats_enabled(g->conf)) {
804 g->stats.requests++;
805 }
806
807 if (!cmd->finished) {
808 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
809 g->inflight++;
810 if (virtio_gpu_stats_enabled(g->conf)) {
811 if (g->stats.max_inflight < g->inflight) {
812 g->stats.max_inflight = g->inflight;
813 }
814 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
815 }
816 } else {
817 g_free(cmd);
818 }
819 }
820}
821
822static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
823{
824 VirtIOGPU *g = VIRTIO_GPU(vdev);
825 struct virtio_gpu_ctrl_command *cmd;
826
827 if (!virtio_queue_ready(vq)) {
828 return;
829 }
830
831#ifdef CONFIG_VIRGL
832 if (!g->renderer_inited && g->use_virgl_renderer) {
833 virtio_gpu_virgl_init(g);
834 g->renderer_inited = true;
835 }
836#endif
837
838 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
839 while (cmd) {
840 cmd->vq = vq;
841 cmd->error = 0;
842 cmd->finished = false;
843 cmd->waiting = false;
844 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
845 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
846 }
847
848 virtio_gpu_process_cmdq(g);
849
850#ifdef CONFIG_VIRGL
851 if (g->use_virgl_renderer) {
852 virtio_gpu_virgl_fence_poll(g);
853 }
854#endif
855}
856
857static void virtio_gpu_ctrl_bh(void *opaque)
858{
859 VirtIOGPU *g = opaque;
860 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
861}
862
863static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
864{
865 VirtIOGPU *g = VIRTIO_GPU(vdev);
866 VirtQueueElement *elem;
867 size_t s;
868 struct virtio_gpu_update_cursor cursor_info;
869
870 if (!virtio_queue_ready(vq)) {
871 return;
872 }
873 for (;;) {
874 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
875 if (!elem) {
876 break;
877 }
878
879 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
880 &cursor_info, sizeof(cursor_info));
881 if (s != sizeof(cursor_info)) {
882 qemu_log_mask(LOG_GUEST_ERROR,
883 "%s: cursor size incorrect %zu vs %zu\n",
884 __func__, s, sizeof(cursor_info));
885 } else {
886 update_cursor(g, &cursor_info);
887 }
888 virtqueue_push(vq, elem, 0);
889 virtio_notify(vdev, vq);
890 g_free(elem);
891 }
892}
893
894static void virtio_gpu_cursor_bh(void *opaque)
895{
896 VirtIOGPU *g = opaque;
897 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
898}
899
900static void virtio_gpu_invalidate_display(void *opaque)
901{
902}
903
904static void virtio_gpu_update_display(void *opaque)
905{
906}
907
908static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
909{
910}
911
912static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
913{
914 VirtIOGPU *g = opaque;
915
916 if (idx >= g->conf.max_outputs) {
917 return -1;
918 }
919
920 g->req_state[idx].x = info->xoff;
921 g->req_state[idx].y = info->yoff;
922 g->req_state[idx].width = info->width;
923 g->req_state[idx].height = info->height;
924
925 if (info->width && info->height) {
926 g->enabled_output_bitmask |= (1 << idx);
927 } else {
928 g->enabled_output_bitmask &= ~(1 << idx);
929 }
930
931
932 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
933 return 0;
934}
935
936static void virtio_gpu_gl_block(void *opaque, bool block)
937{
938 VirtIOGPU *g = opaque;
939
940 if (block) {
941 g->renderer_blocked++;
942 } else {
943 g->renderer_blocked--;
944 }
945 assert(g->renderer_blocked >= 0);
946
947 if (g->renderer_blocked == 0) {
948 virtio_gpu_process_cmdq(g);
949 }
950}
951
952const GraphicHwOps virtio_gpu_ops = {
953 .invalidate = virtio_gpu_invalidate_display,
954 .gfx_update = virtio_gpu_update_display,
955 .text_update = virtio_gpu_text_update,
956 .ui_info = virtio_gpu_ui_info,
957 .gl_block = virtio_gpu_gl_block,
958};
959
960static const VMStateDescription vmstate_virtio_gpu_scanout = {
961 .name = "virtio-gpu-one-scanout",
962 .version_id = 1,
963 .fields = (VMStateField[]) {
964 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
965 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
966 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
967 VMSTATE_INT32(x, struct virtio_gpu_scanout),
968 VMSTATE_INT32(y, struct virtio_gpu_scanout),
969 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
970 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
971 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
972 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
973 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
974 VMSTATE_END_OF_LIST()
975 },
976};
977
978static const VMStateDescription vmstate_virtio_gpu_scanouts = {
979 .name = "virtio-gpu-scanouts",
980 .version_id = 1,
981 .fields = (VMStateField[]) {
982 VMSTATE_INT32(enable, struct VirtIOGPU),
983 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU),
984 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
985 conf.max_outputs, 1,
986 vmstate_virtio_gpu_scanout,
987 struct virtio_gpu_scanout),
988 VMSTATE_END_OF_LIST()
989 },
990};
991
992static void virtio_gpu_save(QEMUFile *f, void *opaque, size_t size)
993{
994 VirtIOGPU *g = opaque;
995 struct virtio_gpu_simple_resource *res;
996 int i;
997
998
999 assert(QTAILQ_EMPTY(&g->cmdq));
1000
1001 QTAILQ_FOREACH(res, &g->reslist, next) {
1002 qemu_put_be32(f, res->resource_id);
1003 qemu_put_be32(f, res->width);
1004 qemu_put_be32(f, res->height);
1005 qemu_put_be32(f, res->format);
1006 qemu_put_be32(f, res->iov_cnt);
1007 for (i = 0; i < res->iov_cnt; i++) {
1008 qemu_put_be64(f, res->addrs[i]);
1009 qemu_put_be32(f, res->iov[i].iov_len);
1010 }
1011 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1012 pixman_image_get_stride(res->image) * res->height);
1013 }
1014 qemu_put_be32(f, 0);
1015
1016 vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1017}
1018
1019static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size)
1020{
1021 VirtIOGPU *g = opaque;
1022 struct virtio_gpu_simple_resource *res;
1023 struct virtio_gpu_scanout *scanout;
1024 uint32_t resource_id, pformat;
1025 int i;
1026
1027 resource_id = qemu_get_be32(f);
1028 while (resource_id != 0) {
1029 res = g_new0(struct virtio_gpu_simple_resource, 1);
1030 res->resource_id = resource_id;
1031 res->width = qemu_get_be32(f);
1032 res->height = qemu_get_be32(f);
1033 res->format = qemu_get_be32(f);
1034 res->iov_cnt = qemu_get_be32(f);
1035
1036
1037 pformat = get_pixman_format(res->format);
1038 if (!pformat) {
1039 return -EINVAL;
1040 }
1041 res->image = pixman_image_create_bits(pformat,
1042 res->width, res->height,
1043 NULL, 0);
1044 if (!res->image) {
1045 return -EINVAL;
1046 }
1047
1048 res->addrs = g_new(uint64_t, res->iov_cnt);
1049 res->iov = g_new(struct iovec, res->iov_cnt);
1050
1051
1052 for (i = 0; i < res->iov_cnt; i++) {
1053 res->addrs[i] = qemu_get_be64(f);
1054 res->iov[i].iov_len = qemu_get_be32(f);
1055 }
1056 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1057 pixman_image_get_stride(res->image) * res->height);
1058
1059
1060 for (i = 0; i < res->iov_cnt; i++) {
1061 hwaddr len = res->iov[i].iov_len;
1062 res->iov[i].iov_base =
1063 cpu_physical_memory_map(res->addrs[i], &len, 1);
1064 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1065 return -EINVAL;
1066 }
1067 }
1068
1069 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1070
1071 resource_id = qemu_get_be32(f);
1072 }
1073
1074
1075 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1076 for (i = 0; i < g->conf.max_outputs; i++) {
1077 scanout = &g->scanout[i];
1078 if (!scanout->resource_id) {
1079 continue;
1080 }
1081 res = virtio_gpu_find_resource(g, scanout->resource_id);
1082 if (!res) {
1083 return -EINVAL;
1084 }
1085 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1086 if (!scanout->ds) {
1087 return -EINVAL;
1088 }
1089
1090 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1091 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
1092 update_cursor(g, &scanout->cursor);
1093 res->scanout_bitmask |= (1 << i);
1094 }
1095
1096 return 0;
1097}
1098
1099static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1100{
1101 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1102 VirtIOGPU *g = VIRTIO_GPU(qdev);
1103 bool have_virgl;
1104 int i;
1105
1106 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1107 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1108 return;
1109 }
1110
1111 g->config_size = sizeof(struct virtio_gpu_config);
1112 g->virtio_config.num_scanouts = g->conf.max_outputs;
1113 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1114 g->config_size);
1115
1116 g->req_state[0].width = 1024;
1117 g->req_state[0].height = 768;
1118
1119 g->use_virgl_renderer = false;
1120#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1121 have_virgl = false;
1122#else
1123 have_virgl = display_opengl;
1124#endif
1125 if (!have_virgl) {
1126 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1127 }
1128
1129 if (virtio_gpu_virgl_enabled(g->conf)) {
1130
1131 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1132 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1133 g->virtio_config.num_capsets = 1;
1134 } else {
1135 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1136 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1137 }
1138
1139 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1140 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1141 QTAILQ_INIT(&g->reslist);
1142 QTAILQ_INIT(&g->cmdq);
1143 QTAILQ_INIT(&g->fenceq);
1144
1145 g->enabled_output_bitmask = 1;
1146 g->qdev = qdev;
1147
1148 for (i = 0; i < g->conf.max_outputs; i++) {
1149 g->scanout[i].con =
1150 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1151 if (i > 0) {
1152 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1153 }
1154 }
1155
1156 if (virtio_gpu_virgl_enabled(g->conf)) {
1157 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1158 migrate_add_blocker(g->migration_blocker);
1159 }
1160}
1161
1162static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1163{
1164 VirtIOGPU *g = VIRTIO_GPU(qdev);
1165 if (g->migration_blocker) {
1166 migrate_del_blocker(g->migration_blocker);
1167 error_free(g->migration_blocker);
1168 }
1169}
1170
1171static void virtio_gpu_instance_init(Object *obj)
1172{
1173}
1174
1175static void virtio_gpu_reset(VirtIODevice *vdev)
1176{
1177 VirtIOGPU *g = VIRTIO_GPU(vdev);
1178 struct virtio_gpu_simple_resource *res, *tmp;
1179 int i;
1180
1181 g->enable = 0;
1182
1183 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1184 virtio_gpu_resource_destroy(g, res);
1185 }
1186 for (i = 0; i < g->conf.max_outputs; i++) {
1187#if 0
1188 g->req_state[i].x = 0;
1189 g->req_state[i].y = 0;
1190 if (i == 0) {
1191 g->req_state[0].width = 1024;
1192 g->req_state[0].height = 768;
1193 } else {
1194 g->req_state[i].width = 0;
1195 g->req_state[i].height = 0;
1196 }
1197#endif
1198 g->scanout[i].resource_id = 0;
1199 g->scanout[i].width = 0;
1200 g->scanout[i].height = 0;
1201 g->scanout[i].x = 0;
1202 g->scanout[i].y = 0;
1203 g->scanout[i].ds = NULL;
1204 }
1205 g->enabled_output_bitmask = 1;
1206
1207#ifdef CONFIG_VIRGL
1208 if (g->use_virgl_renderer) {
1209 virtio_gpu_virgl_reset(g);
1210 g->use_virgl_renderer = 0;
1211 }
1212#endif
1213}
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223static const VMStateDescription vmstate_virtio_gpu = {
1224 .name = "virtio-gpu",
1225 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1226 .version_id = VIRTIO_GPU_VM_VERSION,
1227 .fields = (VMStateField[]) {
1228 VMSTATE_VIRTIO_DEVICE ,
1229 {
1230 .name = "virtio-gpu",
1231 .info = &(const VMStateInfo) {
1232 .name = "virtio-gpu",
1233 .get = virtio_gpu_load,
1234 .put = virtio_gpu_save,
1235 },
1236 .flags = VMS_SINGLE,
1237 } ,
1238 VMSTATE_END_OF_LIST()
1239 },
1240};
1241
1242static Property virtio_gpu_properties[] = {
1243 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1244#ifdef CONFIG_VIRGL
1245 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1246 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1247 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1248 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1249#endif
1250 DEFINE_PROP_END_OF_LIST(),
1251};
1252
1253static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1254{
1255 DeviceClass *dc = DEVICE_CLASS(klass);
1256 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1257
1258 vdc->realize = virtio_gpu_device_realize;
1259 vdc->unrealize = virtio_gpu_device_unrealize;
1260 vdc->get_config = virtio_gpu_get_config;
1261 vdc->set_config = virtio_gpu_set_config;
1262 vdc->get_features = virtio_gpu_get_features;
1263 vdc->set_features = virtio_gpu_set_features;
1264
1265 vdc->reset = virtio_gpu_reset;
1266
1267 dc->props = virtio_gpu_properties;
1268 dc->vmsd = &vmstate_virtio_gpu;
1269}
1270
1271static const TypeInfo virtio_gpu_info = {
1272 .name = TYPE_VIRTIO_GPU,
1273 .parent = TYPE_VIRTIO_DEVICE,
1274 .instance_size = sizeof(VirtIOGPU),
1275 .instance_init = virtio_gpu_instance_init,
1276 .class_init = virtio_gpu_class_init,
1277};
1278
1279static void virtio_register_types(void)
1280{
1281 type_register_static(&virtio_gpu_info);
1282}
1283
1284type_init(virtio_register_types)
1285
1286QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1287QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1288QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1289QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1290QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1291QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1292QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1293QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1294QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1295QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1296QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
1297
1298QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1299QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1300QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1301QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1302QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1303QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1304QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1305QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1306QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1307QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
1308