qemu/hw/display/virtio-gpu.c
<<
>>
Prefs
   1/*
   2 * Virtio GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2013-2014
   5 *
   6 * Authors:
   7 *     Dave Airlie <airlied@redhat.com>
   8 *     Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/units.h"
  16#include "qemu/iov.h"
  17#include "ui/console.h"
  18#include "trace.h"
  19#include "sysemu/dma.h"
  20#include "sysemu/sysemu.h"
  21#include "hw/virtio/virtio.h"
  22#include "migration/qemu-file-types.h"
  23#include "hw/virtio/virtio-gpu.h"
  24#include "hw/virtio/virtio-gpu-bswap.h"
  25#include "hw/virtio/virtio-gpu-pixman.h"
  26#include "hw/virtio/virtio-bus.h"
  27#include "hw/display/edid.h"
  28#include "hw/qdev-properties.h"
  29#include "qemu/log.h"
  30#include "qemu/module.h"
  31#include "qapi/error.h"
  32#include "qemu/error-report.h"
  33
  34#define VIRTIO_GPU_VM_VERSION 1
  35
  36static struct virtio_gpu_simple_resource*
  37virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  38
  39static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  40                                       struct virtio_gpu_simple_resource *res);
  41
  42#ifdef CONFIG_VIRGL
  43#include <virglrenderer.h>
  44#define VIRGL(_g, _virgl, _simple, ...)                     \
  45    do {                                                    \
  46        if (_g->parent_obj.use_virgl_renderer) {            \
  47            _virgl(__VA_ARGS__);                            \
  48        } else {                                            \
  49            _simple(__VA_ARGS__);                           \
  50        }                                                   \
  51    } while (0)
  52#else
  53#define VIRGL(_g, _virgl, _simple, ...)                 \
  54    do {                                                \
  55        _simple(__VA_ARGS__);                           \
  56    } while (0)
  57#endif
  58
  59static void update_cursor_data_simple(VirtIOGPU *g,
  60                                      struct virtio_gpu_scanout *s,
  61                                      uint32_t resource_id)
  62{
  63    struct virtio_gpu_simple_resource *res;
  64    uint32_t pixels;
  65
  66    res = virtio_gpu_find_resource(g, resource_id);
  67    if (!res) {
  68        return;
  69    }
  70
  71    if (pixman_image_get_width(res->image)  != s->current_cursor->width ||
  72        pixman_image_get_height(res->image) != s->current_cursor->height) {
  73        return;
  74    }
  75
  76    pixels = s->current_cursor->width * s->current_cursor->height;
  77    memcpy(s->current_cursor->data,
  78           pixman_image_get_data(res->image),
  79           pixels * sizeof(uint32_t));
  80}
  81
  82#ifdef CONFIG_VIRGL
  83
  84static void update_cursor_data_virgl(VirtIOGPU *g,
  85                                     struct virtio_gpu_scanout *s,
  86                                     uint32_t resource_id)
  87{
  88    uint32_t width, height;
  89    uint32_t pixels, *data;
  90
  91    data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
  92    if (!data) {
  93        return;
  94    }
  95
  96    if (width != s->current_cursor->width ||
  97        height != s->current_cursor->height) {
  98        free(data);
  99        return;
 100    }
 101
 102    pixels = s->current_cursor->width * s->current_cursor->height;
 103    memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
 104    free(data);
 105}
 106
 107#endif
 108
 109static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
 110{
 111    struct virtio_gpu_scanout *s;
 112    bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
 113
 114    if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
 115        return;
 116    }
 117    s = &g->parent_obj.scanout[cursor->pos.scanout_id];
 118
 119    trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
 120                                   cursor->pos.x,
 121                                   cursor->pos.y,
 122                                   move ? "move" : "update",
 123                                   cursor->resource_id);
 124
 125    if (!move) {
 126        if (!s->current_cursor) {
 127            s->current_cursor = cursor_alloc(64, 64);
 128        }
 129
 130        s->current_cursor->hot_x = cursor->hot_x;
 131        s->current_cursor->hot_y = cursor->hot_y;
 132
 133        if (cursor->resource_id > 0) {
 134            VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
 135                  g, s, cursor->resource_id);
 136        }
 137        dpy_cursor_define(s->con, s->current_cursor);
 138
 139        s->cursor = *cursor;
 140    } else {
 141        s->cursor.pos.x = cursor->pos.x;
 142        s->cursor.pos.y = cursor->pos.y;
 143    }
 144    dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
 145                  cursor->resource_id ? 1 : 0);
 146}
 147
 148static struct virtio_gpu_simple_resource *
 149virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
 150{
 151    struct virtio_gpu_simple_resource *res;
 152
 153    QTAILQ_FOREACH(res, &g->reslist, next) {
 154        if (res->resource_id == resource_id) {
 155            return res;
 156        }
 157    }
 158    return NULL;
 159}
 160
 161void virtio_gpu_ctrl_response(VirtIOGPU *g,
 162                              struct virtio_gpu_ctrl_command *cmd,
 163                              struct virtio_gpu_ctrl_hdr *resp,
 164                              size_t resp_len)
 165{
 166    size_t s;
 167
 168    if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
 169        resp->flags |= VIRTIO_GPU_FLAG_FENCE;
 170        resp->fence_id = cmd->cmd_hdr.fence_id;
 171        resp->ctx_id = cmd->cmd_hdr.ctx_id;
 172    }
 173    virtio_gpu_ctrl_hdr_bswap(resp);
 174    s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
 175    if (s != resp_len) {
 176        qemu_log_mask(LOG_GUEST_ERROR,
 177                      "%s: response size incorrect %zu vs %zu\n",
 178                      __func__, s, resp_len);
 179    }
 180    virtqueue_push(cmd->vq, &cmd->elem, s);
 181    virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
 182    cmd->finished = true;
 183}
 184
 185void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
 186                                     struct virtio_gpu_ctrl_command *cmd,
 187                                     enum virtio_gpu_ctrl_type type)
 188{
 189    struct virtio_gpu_ctrl_hdr resp;
 190
 191    memset(&resp, 0, sizeof(resp));
 192    resp.type = type;
 193    virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
 194}
 195
 196void virtio_gpu_get_display_info(VirtIOGPU *g,
 197                                 struct virtio_gpu_ctrl_command *cmd)
 198{
 199    struct virtio_gpu_resp_display_info display_info;
 200
 201    trace_virtio_gpu_cmd_get_display_info();
 202    memset(&display_info, 0, sizeof(display_info));
 203    display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
 204    virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
 205    virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
 206                             sizeof(display_info));
 207}
 208
 209static void
 210virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
 211                         struct virtio_gpu_resp_edid *edid)
 212{
 213    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
 214    qemu_edid_info info = {
 215        .width_mm = b->req_state[scanout].width_mm,
 216        .height_mm = b->req_state[scanout].height_mm,
 217        .prefx = b->req_state[scanout].width,
 218        .prefy = b->req_state[scanout].height,
 219    };
 220
 221    edid->size = cpu_to_le32(sizeof(edid->edid));
 222    qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
 223}
 224
 225void virtio_gpu_get_edid(VirtIOGPU *g,
 226                         struct virtio_gpu_ctrl_command *cmd)
 227{
 228    struct virtio_gpu_resp_edid edid;
 229    struct virtio_gpu_cmd_get_edid get_edid;
 230    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
 231
 232    VIRTIO_GPU_FILL_CMD(get_edid);
 233    virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
 234
 235    if (get_edid.scanout >= b->conf.max_outputs) {
 236        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 237        return;
 238    }
 239
 240    trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
 241    memset(&edid, 0, sizeof(edid));
 242    edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
 243    virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
 244    virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
 245}
 246
 247static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
 248                                   uint32_t width, uint32_t height)
 249{
 250    /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
 251     * pixman_image_create_bits will fail in case it overflow.
 252     */
 253
 254    int bpp = PIXMAN_FORMAT_BPP(pformat);
 255    int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
 256    return height * stride;
 257}
 258
 259static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
 260                                          struct virtio_gpu_ctrl_command *cmd)
 261{
 262    pixman_format_code_t pformat;
 263    struct virtio_gpu_simple_resource *res;
 264    struct virtio_gpu_resource_create_2d c2d;
 265
 266    VIRTIO_GPU_FILL_CMD(c2d);
 267    virtio_gpu_bswap_32(&c2d, sizeof(c2d));
 268    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
 269                                       c2d.width, c2d.height);
 270
 271    if (c2d.resource_id == 0) {
 272        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
 273                      __func__);
 274        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 275        return;
 276    }
 277
 278    res = virtio_gpu_find_resource(g, c2d.resource_id);
 279    if (res) {
 280        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
 281                      __func__, c2d.resource_id);
 282        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 283        return;
 284    }
 285
 286    res = g_new0(struct virtio_gpu_simple_resource, 1);
 287
 288    res->width = c2d.width;
 289    res->height = c2d.height;
 290    res->format = c2d.format;
 291    res->resource_id = c2d.resource_id;
 292
 293    pformat = virtio_gpu_get_pixman_format(c2d.format);
 294    if (!pformat) {
 295        qemu_log_mask(LOG_GUEST_ERROR,
 296                      "%s: host couldn't handle guest format %d\n",
 297                      __func__, c2d.format);
 298        g_free(res);
 299        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 300        return;
 301    }
 302
 303    res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
 304    if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
 305        res->image = pixman_image_create_bits(pformat,
 306                                              c2d.width,
 307                                              c2d.height,
 308                                              NULL, 0);
 309    }
 310
 311    if (!res->image) {
 312        qemu_log_mask(LOG_GUEST_ERROR,
 313                      "%s: resource creation failed %d %d %d\n",
 314                      __func__, c2d.resource_id, c2d.width, c2d.height);
 315        g_free(res);
 316        cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
 317        return;
 318    }
 319
 320    QTAILQ_INSERT_HEAD(&g->reslist, res, next);
 321    g->hostmem += res->hostmem;
 322}
 323
 324static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
 325{
 326    struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
 327    struct virtio_gpu_simple_resource *res;
 328
 329    if (scanout->resource_id == 0) {
 330        return;
 331    }
 332
 333    res = virtio_gpu_find_resource(g, scanout->resource_id);
 334    if (res) {
 335        res->scanout_bitmask &= ~(1 << scanout_id);
 336    }
 337
 338    dpy_gfx_replace_surface(scanout->con, NULL);
 339    scanout->resource_id = 0;
 340    scanout->ds = NULL;
 341    scanout->width = 0;
 342    scanout->height = 0;
 343}
 344
 345static void virtio_gpu_resource_destroy(VirtIOGPU *g,
 346                                        struct virtio_gpu_simple_resource *res)
 347{
 348    int i;
 349
 350    if (res->scanout_bitmask) {
 351        for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 352            if (res->scanout_bitmask & (1 << i)) {
 353                virtio_gpu_disable_scanout(g, i);
 354            }
 355        }
 356    }
 357
 358    pixman_image_unref(res->image);
 359    virtio_gpu_cleanup_mapping(g, res);
 360    QTAILQ_REMOVE(&g->reslist, res, next);
 361    g->hostmem -= res->hostmem;
 362    g_free(res);
 363}
 364
 365static void virtio_gpu_resource_unref(VirtIOGPU *g,
 366                                      struct virtio_gpu_ctrl_command *cmd)
 367{
 368    struct virtio_gpu_simple_resource *res;
 369    struct virtio_gpu_resource_unref unref;
 370
 371    VIRTIO_GPU_FILL_CMD(unref);
 372    virtio_gpu_bswap_32(&unref, sizeof(unref));
 373    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
 374
 375    res = virtio_gpu_find_resource(g, unref.resource_id);
 376    if (!res) {
 377        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 378                      __func__, unref.resource_id);
 379        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 380        return;
 381    }
 382    virtio_gpu_resource_destroy(g, res);
 383}
 384
 385static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
 386                                           struct virtio_gpu_ctrl_command *cmd)
 387{
 388    struct virtio_gpu_simple_resource *res;
 389    int h;
 390    uint32_t src_offset, dst_offset, stride;
 391    int bpp;
 392    pixman_format_code_t format;
 393    struct virtio_gpu_transfer_to_host_2d t2d;
 394
 395    VIRTIO_GPU_FILL_CMD(t2d);
 396    virtio_gpu_t2d_bswap(&t2d);
 397    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 398
 399    res = virtio_gpu_find_resource(g, t2d.resource_id);
 400    if (!res || !res->iov) {
 401        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 402                      __func__, t2d.resource_id);
 403        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 404        return;
 405    }
 406
 407    if (t2d.r.x > res->width ||
 408        t2d.r.y > res->height ||
 409        t2d.r.width > res->width ||
 410        t2d.r.height > res->height ||
 411        t2d.r.x + t2d.r.width > res->width ||
 412        t2d.r.y + t2d.r.height > res->height) {
 413        qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
 414                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 415                      __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
 416                      t2d.r.width, t2d.r.height, res->width, res->height);
 417        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 418        return;
 419    }
 420
 421    format = pixman_image_get_format(res->image);
 422    bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 423    stride = pixman_image_get_stride(res->image);
 424
 425    if (t2d.offset || t2d.r.x || t2d.r.y ||
 426        t2d.r.width != pixman_image_get_width(res->image)) {
 427        void *img_data = pixman_image_get_data(res->image);
 428        for (h = 0; h < t2d.r.height; h++) {
 429            src_offset = t2d.offset + stride * h;
 430            dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
 431
 432            iov_to_buf(res->iov, res->iov_cnt, src_offset,
 433                       (uint8_t *)img_data
 434                       + dst_offset, t2d.r.width * bpp);
 435        }
 436    } else {
 437        iov_to_buf(res->iov, res->iov_cnt, 0,
 438                   pixman_image_get_data(res->image),
 439                   pixman_image_get_stride(res->image)
 440                   * pixman_image_get_height(res->image));
 441    }
 442}
 443
 444static void virtio_gpu_resource_flush(VirtIOGPU *g,
 445                                      struct virtio_gpu_ctrl_command *cmd)
 446{
 447    struct virtio_gpu_simple_resource *res;
 448    struct virtio_gpu_resource_flush rf;
 449    pixman_region16_t flush_region;
 450    int i;
 451
 452    VIRTIO_GPU_FILL_CMD(rf);
 453    virtio_gpu_bswap_32(&rf, sizeof(rf));
 454    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
 455                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 456
 457    res = virtio_gpu_find_resource(g, rf.resource_id);
 458    if (!res) {
 459        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 460                      __func__, rf.resource_id);
 461        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 462        return;
 463    }
 464
 465    if (rf.r.x > res->width ||
 466        rf.r.y > res->height ||
 467        rf.r.width > res->width ||
 468        rf.r.height > res->height ||
 469        rf.r.x + rf.r.width > res->width ||
 470        rf.r.y + rf.r.height > res->height) {
 471        qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
 472                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 473                      __func__, rf.resource_id, rf.r.x, rf.r.y,
 474                      rf.r.width, rf.r.height, res->width, res->height);
 475        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 476        return;
 477    }
 478
 479    pixman_region_init_rect(&flush_region,
 480                            rf.r.x, rf.r.y, rf.r.width, rf.r.height);
 481    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 482        struct virtio_gpu_scanout *scanout;
 483        pixman_region16_t region, finalregion;
 484        pixman_box16_t *extents;
 485
 486        if (!(res->scanout_bitmask & (1 << i))) {
 487            continue;
 488        }
 489        scanout = &g->parent_obj.scanout[i];
 490
 491        pixman_region_init(&finalregion);
 492        pixman_region_init_rect(&region, scanout->x, scanout->y,
 493                                scanout->width, scanout->height);
 494
 495        pixman_region_intersect(&finalregion, &flush_region, &region);
 496        pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
 497        extents = pixman_region_extents(&finalregion);
 498        /* work out the area we need to update for each console */
 499        dpy_gfx_update(g->parent_obj.scanout[i].con,
 500                       extents->x1, extents->y1,
 501                       extents->x2 - extents->x1,
 502                       extents->y2 - extents->y1);
 503
 504        pixman_region_fini(&region);
 505        pixman_region_fini(&finalregion);
 506    }
 507    pixman_region_fini(&flush_region);
 508}
 509
 510static void virtio_unref_resource(pixman_image_t *image, void *data)
 511{
 512    pixman_image_unref(data);
 513}
 514
 515static void virtio_gpu_set_scanout(VirtIOGPU *g,
 516                                   struct virtio_gpu_ctrl_command *cmd)
 517{
 518    struct virtio_gpu_simple_resource *res, *ores;
 519    struct virtio_gpu_scanout *scanout;
 520    pixman_format_code_t format;
 521    uint32_t offset;
 522    int bpp;
 523    struct virtio_gpu_set_scanout ss;
 524
 525    VIRTIO_GPU_FILL_CMD(ss);
 526    virtio_gpu_bswap_32(&ss, sizeof(ss));
 527    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
 528                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 529
 530    if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
 531        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
 532                      __func__, ss.scanout_id);
 533        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 534        return;
 535    }
 536
 537    g->parent_obj.enable = 1;
 538    if (ss.resource_id == 0) {
 539        virtio_gpu_disable_scanout(g, ss.scanout_id);
 540        return;
 541    }
 542
 543    /* create a surface for this scanout */
 544    res = virtio_gpu_find_resource(g, ss.resource_id);
 545    if (!res) {
 546        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 547                      __func__, ss.resource_id);
 548        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 549        return;
 550    }
 551
 552    if (ss.r.x > res->width ||
 553        ss.r.y > res->height ||
 554        ss.r.width < 16 ||
 555        ss.r.height < 16 ||
 556        ss.r.width > res->width ||
 557        ss.r.height > res->height ||
 558        ss.r.x + ss.r.width > res->width ||
 559        ss.r.y + ss.r.height > res->height) {
 560        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
 561                      " resource %d, (%d,%d)+%d,%d vs %d %d\n",
 562                      __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
 563                      ss.r.width, ss.r.height, res->width, res->height);
 564        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 565        return;
 566    }
 567
 568    scanout = &g->parent_obj.scanout[ss.scanout_id];
 569
 570    format = pixman_image_get_format(res->image);
 571    bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 572    offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
 573    if (!scanout->ds || surface_data(scanout->ds)
 574        != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
 575        scanout->width != ss.r.width ||
 576        scanout->height != ss.r.height) {
 577        pixman_image_t *rect;
 578        void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
 579        rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
 580                                        pixman_image_get_stride(res->image));
 581        pixman_image_ref(res->image);
 582        pixman_image_set_destroy_function(rect, virtio_unref_resource,
 583                                          res->image);
 584        /* realloc the surface ptr */
 585        scanout->ds = qemu_create_displaysurface_pixman(rect);
 586        if (!scanout->ds) {
 587            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 588            return;
 589        }
 590        pixman_image_unref(rect);
 591        dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
 592                                scanout->ds);
 593    }
 594
 595    ores = virtio_gpu_find_resource(g, scanout->resource_id);
 596    if (ores) {
 597        ores->scanout_bitmask &= ~(1 << ss.scanout_id);
 598    }
 599
 600    res->scanout_bitmask |= (1 << ss.scanout_id);
 601    scanout->resource_id = ss.resource_id;
 602    scanout->x = ss.r.x;
 603    scanout->y = ss.r.y;
 604    scanout->width = ss.r.width;
 605    scanout->height = ss.r.height;
 606}
 607
 608int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
 609                                  struct virtio_gpu_resource_attach_backing *ab,
 610                                  struct virtio_gpu_ctrl_command *cmd,
 611                                  uint64_t **addr, struct iovec **iov)
 612{
 613    struct virtio_gpu_mem_entry *ents;
 614    size_t esize, s;
 615    int i;
 616
 617    if (ab->nr_entries > 16384) {
 618        qemu_log_mask(LOG_GUEST_ERROR,
 619                      "%s: nr_entries is too big (%d > 16384)\n",
 620                      __func__, ab->nr_entries);
 621        return -1;
 622    }
 623
 624    esize = sizeof(*ents) * ab->nr_entries;
 625    ents = g_malloc(esize);
 626    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
 627                   sizeof(*ab), ents, esize);
 628    if (s != esize) {
 629        qemu_log_mask(LOG_GUEST_ERROR,
 630                      "%s: command data size incorrect %zu vs %zu\n",
 631                      __func__, s, esize);
 632        g_free(ents);
 633        return -1;
 634    }
 635
 636    *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
 637    if (addr) {
 638        *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
 639    }
 640    for (i = 0; i < ab->nr_entries; i++) {
 641        uint64_t a = le64_to_cpu(ents[i].addr);
 642        uint32_t l = le32_to_cpu(ents[i].length);
 643        hwaddr len = l;
 644        (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
 645                                            a, &len, DMA_DIRECTION_TO_DEVICE);
 646        (*iov)[i].iov_len = len;
 647        if (addr) {
 648            (*addr)[i] = a;
 649        }
 650        if (!(*iov)[i].iov_base || len != l) {
 651            qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
 652                          " resource %d element %d\n",
 653                          __func__, ab->resource_id, i);
 654            if ((*iov)[i].iov_base) {
 655                i++; /* cleanup the 'i'th map */
 656            }
 657            virtio_gpu_cleanup_mapping_iov(g, *iov, i);
 658            g_free(ents);
 659            *iov = NULL;
 660            if (addr) {
 661                g_free(*addr);
 662                *addr = NULL;
 663            }
 664            return -1;
 665        }
 666    }
 667    g_free(ents);
 668    return 0;
 669}
 670
 671void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
 672                                    struct iovec *iov, uint32_t count)
 673{
 674    int i;
 675
 676    for (i = 0; i < count; i++) {
 677        dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
 678                         iov[i].iov_base, iov[i].iov_len,
 679                         DMA_DIRECTION_TO_DEVICE,
 680                         iov[i].iov_len);
 681    }
 682    g_free(iov);
 683}
 684
 685static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
 686                                       struct virtio_gpu_simple_resource *res)
 687{
 688    virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
 689    res->iov = NULL;
 690    res->iov_cnt = 0;
 691    g_free(res->addrs);
 692    res->addrs = NULL;
 693}
 694
 695static void
 696virtio_gpu_resource_attach_backing(VirtIOGPU *g,
 697                                   struct virtio_gpu_ctrl_command *cmd)
 698{
 699    struct virtio_gpu_simple_resource *res;
 700    struct virtio_gpu_resource_attach_backing ab;
 701    int ret;
 702
 703    VIRTIO_GPU_FILL_CMD(ab);
 704    virtio_gpu_bswap_32(&ab, sizeof(ab));
 705    trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
 706
 707    res = virtio_gpu_find_resource(g, ab.resource_id);
 708    if (!res) {
 709        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 710                      __func__, ab.resource_id);
 711        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 712        return;
 713    }
 714
 715    if (res->iov) {
 716        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 717        return;
 718    }
 719
 720    ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
 721    if (ret != 0) {
 722        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 723        return;
 724    }
 725
 726    res->iov_cnt = ab.nr_entries;
 727}
 728
 729static void
 730virtio_gpu_resource_detach_backing(VirtIOGPU *g,
 731                                   struct virtio_gpu_ctrl_command *cmd)
 732{
 733    struct virtio_gpu_simple_resource *res;
 734    struct virtio_gpu_resource_detach_backing detach;
 735
 736    VIRTIO_GPU_FILL_CMD(detach);
 737    virtio_gpu_bswap_32(&detach, sizeof(detach));
 738    trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
 739
 740    res = virtio_gpu_find_resource(g, detach.resource_id);
 741    if (!res || !res->iov) {
 742        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 743                      __func__, detach.resource_id);
 744        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 745        return;
 746    }
 747    virtio_gpu_cleanup_mapping(g, res);
 748}
 749
 750static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
 751                                          struct virtio_gpu_ctrl_command *cmd)
 752{
 753    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 754    virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
 755
 756    switch (cmd->cmd_hdr.type) {
 757    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 758        virtio_gpu_get_display_info(g, cmd);
 759        break;
 760    case VIRTIO_GPU_CMD_GET_EDID:
 761        virtio_gpu_get_edid(g, cmd);
 762        break;
 763    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
 764        virtio_gpu_resource_create_2d(g, cmd);
 765        break;
 766    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
 767        virtio_gpu_resource_unref(g, cmd);
 768        break;
 769    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
 770        virtio_gpu_resource_flush(g, cmd);
 771        break;
 772    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
 773        virtio_gpu_transfer_to_host_2d(g, cmd);
 774        break;
 775    case VIRTIO_GPU_CMD_SET_SCANOUT:
 776        virtio_gpu_set_scanout(g, cmd);
 777        break;
 778    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
 779        virtio_gpu_resource_attach_backing(g, cmd);
 780        break;
 781    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
 782        virtio_gpu_resource_detach_backing(g, cmd);
 783        break;
 784    default:
 785        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 786        break;
 787    }
 788    if (!cmd->finished) {
 789        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
 790                                        VIRTIO_GPU_RESP_OK_NODATA);
 791    }
 792}
 793
 794static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
 795{
 796    VirtIOGPU *g = VIRTIO_GPU(vdev);
 797    qemu_bh_schedule(g->ctrl_bh);
 798}
 799
 800static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
 801{
 802    VirtIOGPU *g = VIRTIO_GPU(vdev);
 803    qemu_bh_schedule(g->cursor_bh);
 804}
 805
 806void virtio_gpu_process_cmdq(VirtIOGPU *g)
 807{
 808    struct virtio_gpu_ctrl_command *cmd;
 809
 810    if (g->processing_cmdq) {
 811        return;
 812    }
 813    g->processing_cmdq = true;
 814    while (!QTAILQ_EMPTY(&g->cmdq)) {
 815        cmd = QTAILQ_FIRST(&g->cmdq);
 816
 817        if (g->parent_obj.renderer_blocked) {
 818            break;
 819        }
 820
 821        /* process command */
 822        VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
 823              g, cmd);
 824
 825        QTAILQ_REMOVE(&g->cmdq, cmd, next);
 826        if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 827            g->stats.requests++;
 828        }
 829
 830        if (!cmd->finished) {
 831            QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
 832            g->inflight++;
 833            if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 834                if (g->stats.max_inflight < g->inflight) {
 835                    g->stats.max_inflight = g->inflight;
 836                }
 837                fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
 838            }
 839        } else {
 840            g_free(cmd);
 841        }
 842    }
 843    g->processing_cmdq = false;
 844}
 845
 846static void virtio_gpu_gl_flushed(VirtIOGPUBase *b)
 847{
 848    VirtIOGPU *g = VIRTIO_GPU(b);
 849
 850#ifdef CONFIG_VIRGL
 851    if (g->renderer_reset) {
 852        g->renderer_reset = false;
 853        virtio_gpu_virgl_reset(g);
 854    }
 855#endif
 856    virtio_gpu_process_cmdq(g);
 857}
 858
 859static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 860{
 861    VirtIOGPU *g = VIRTIO_GPU(vdev);
 862    struct virtio_gpu_ctrl_command *cmd;
 863
 864    if (!virtio_queue_ready(vq)) {
 865        return;
 866    }
 867
 868#ifdef CONFIG_VIRGL
 869    if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
 870        virtio_gpu_virgl_init(g);
 871        g->renderer_inited = true;
 872    }
 873#endif
 874
 875    cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
 876    while (cmd) {
 877        cmd->vq = vq;
 878        cmd->error = 0;
 879        cmd->finished = false;
 880        QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
 881        cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
 882    }
 883
 884    virtio_gpu_process_cmdq(g);
 885
 886#ifdef CONFIG_VIRGL
 887    if (g->parent_obj.use_virgl_renderer) {
 888        virtio_gpu_virgl_fence_poll(g);
 889    }
 890#endif
 891}
 892
 893static void virtio_gpu_ctrl_bh(void *opaque)
 894{
 895    VirtIOGPU *g = opaque;
 896    virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
 897}
 898
 899static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
 900{
 901    VirtIOGPU *g = VIRTIO_GPU(vdev);
 902    VirtQueueElement *elem;
 903    size_t s;
 904    struct virtio_gpu_update_cursor cursor_info;
 905
 906    if (!virtio_queue_ready(vq)) {
 907        return;
 908    }
 909    for (;;) {
 910        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
 911        if (!elem) {
 912            break;
 913        }
 914
 915        s = iov_to_buf(elem->out_sg, elem->out_num, 0,
 916                       &cursor_info, sizeof(cursor_info));
 917        if (s != sizeof(cursor_info)) {
 918            qemu_log_mask(LOG_GUEST_ERROR,
 919                          "%s: cursor size incorrect %zu vs %zu\n",
 920                          __func__, s, sizeof(cursor_info));
 921        } else {
 922            virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
 923            update_cursor(g, &cursor_info);
 924        }
 925        virtqueue_push(vq, elem, 0);
 926        virtio_notify(vdev, vq);
 927        g_free(elem);
 928    }
 929}
 930
 931static void virtio_gpu_cursor_bh(void *opaque)
 932{
 933    VirtIOGPU *g = opaque;
 934    virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
 935}
 936
 937static const VMStateDescription vmstate_virtio_gpu_scanout = {
 938    .name = "virtio-gpu-one-scanout",
 939    .version_id = 1,
 940    .fields = (VMStateField[]) {
 941        VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
 942        VMSTATE_UINT32(width, struct virtio_gpu_scanout),
 943        VMSTATE_UINT32(height, struct virtio_gpu_scanout),
 944        VMSTATE_INT32(x, struct virtio_gpu_scanout),
 945        VMSTATE_INT32(y, struct virtio_gpu_scanout),
 946        VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
 947        VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
 948        VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
 949        VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
 950        VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
 951        VMSTATE_END_OF_LIST()
 952    },
 953};
 954
 955static const VMStateDescription vmstate_virtio_gpu_scanouts = {
 956    .name = "virtio-gpu-scanouts",
 957    .version_id = 1,
 958    .fields = (VMStateField[]) {
 959        VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
 960        VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
 961                             struct VirtIOGPU, NULL),
 962        VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
 963                                     parent_obj.conf.max_outputs, 1,
 964                                     vmstate_virtio_gpu_scanout,
 965                                     struct virtio_gpu_scanout),
 966        VMSTATE_END_OF_LIST()
 967    },
 968};
 969
 970static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
 971                           const VMStateField *field, JSONWriter *vmdesc)
 972{
 973    VirtIOGPU *g = opaque;
 974    struct virtio_gpu_simple_resource *res;
 975    int i;
 976
 977    /* in 2d mode we should never find unprocessed commands here */
 978    assert(QTAILQ_EMPTY(&g->cmdq));
 979
 980    QTAILQ_FOREACH(res, &g->reslist, next) {
 981        qemu_put_be32(f, res->resource_id);
 982        qemu_put_be32(f, res->width);
 983        qemu_put_be32(f, res->height);
 984        qemu_put_be32(f, res->format);
 985        qemu_put_be32(f, res->iov_cnt);
 986        for (i = 0; i < res->iov_cnt; i++) {
 987            qemu_put_be64(f, res->addrs[i]);
 988            qemu_put_be32(f, res->iov[i].iov_len);
 989        }
 990        qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
 991                        pixman_image_get_stride(res->image) * res->height);
 992    }
 993    qemu_put_be32(f, 0); /* end of list */
 994
 995    return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
 996}
 997
 998static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
 999                           const VMStateField *field)
1000{
1001    VirtIOGPU *g = opaque;
1002    struct virtio_gpu_simple_resource *res;
1003    struct virtio_gpu_scanout *scanout;
1004    uint32_t resource_id, pformat;
1005    int i;
1006
1007    g->hostmem = 0;
1008
1009    resource_id = qemu_get_be32(f);
1010    while (resource_id != 0) {
1011        res = virtio_gpu_find_resource(g, resource_id);
1012        if (res) {
1013            return -EINVAL;
1014        }
1015
1016        res = g_new0(struct virtio_gpu_simple_resource, 1);
1017        res->resource_id = resource_id;
1018        res->width = qemu_get_be32(f);
1019        res->height = qemu_get_be32(f);
1020        res->format = qemu_get_be32(f);
1021        res->iov_cnt = qemu_get_be32(f);
1022
1023        /* allocate */
1024        pformat = virtio_gpu_get_pixman_format(res->format);
1025        if (!pformat) {
1026            g_free(res);
1027            return -EINVAL;
1028        }
1029        res->image = pixman_image_create_bits(pformat,
1030                                              res->width, res->height,
1031                                              NULL, 0);
1032        if (!res->image) {
1033            g_free(res);
1034            return -EINVAL;
1035        }
1036
1037        res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1038
1039        res->addrs = g_new(uint64_t, res->iov_cnt);
1040        res->iov = g_new(struct iovec, res->iov_cnt);
1041
1042        /* read data */
1043        for (i = 0; i < res->iov_cnt; i++) {
1044            res->addrs[i] = qemu_get_be64(f);
1045            res->iov[i].iov_len = qemu_get_be32(f);
1046        }
1047        qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1048                        pixman_image_get_stride(res->image) * res->height);
1049
1050        /* restore mapping */
1051        for (i = 0; i < res->iov_cnt; i++) {
1052            hwaddr len = res->iov[i].iov_len;
1053            res->iov[i].iov_base =
1054                dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1055                               res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1056
1057            if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1058                /* Clean up the half-a-mapping we just created... */
1059                if (res->iov[i].iov_base) {
1060                    dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1061                                     res->iov[i].iov_base,
1062                                     len,
1063                                     DMA_DIRECTION_TO_DEVICE,
1064                                     0);
1065                }
1066                /* ...and the mappings for previous loop iterations */
1067                res->iov_cnt = i;
1068                virtio_gpu_cleanup_mapping(g, res);
1069                pixman_image_unref(res->image);
1070                g_free(res);
1071                return -EINVAL;
1072            }
1073        }
1074
1075        QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1076        g->hostmem += res->hostmem;
1077
1078        resource_id = qemu_get_be32(f);
1079    }
1080
1081    /* load & apply scanout state */
1082    vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1083    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1084        scanout = &g->parent_obj.scanout[i];
1085        if (!scanout->resource_id) {
1086            continue;
1087        }
1088        res = virtio_gpu_find_resource(g, scanout->resource_id);
1089        if (!res) {
1090            return -EINVAL;
1091        }
1092        scanout->ds = qemu_create_displaysurface_pixman(res->image);
1093        if (!scanout->ds) {
1094            return -EINVAL;
1095        }
1096
1097        dpy_gfx_replace_surface(scanout->con, scanout->ds);
1098        dpy_gfx_update_full(scanout->con);
1099        if (scanout->cursor.resource_id) {
1100            update_cursor(g, &scanout->cursor);
1101        }
1102        res->scanout_bitmask |= (1 << i);
1103    }
1104
1105    return 0;
1106}
1107
1108static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1109{
1110    VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1111    VirtIOGPU *g = VIRTIO_GPU(qdev);
1112    bool have_virgl;
1113
1114#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1115    have_virgl = false;
1116#else
1117    have_virgl = display_opengl;
1118#endif
1119    if (!have_virgl) {
1120        g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1121    } else {
1122#if defined(CONFIG_VIRGL)
1123        VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
1124            virtio_gpu_virgl_get_num_capsets(g);
1125#endif
1126    }
1127
1128    if (!virtio_gpu_base_device_realize(qdev,
1129                                        virtio_gpu_handle_ctrl_cb,
1130                                        virtio_gpu_handle_cursor_cb,
1131                                        errp)) {
1132        return;
1133    }
1134
1135    g->ctrl_vq = virtio_get_queue(vdev, 0);
1136    g->cursor_vq = virtio_get_queue(vdev, 1);
1137    g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1138    g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1139    QTAILQ_INIT(&g->reslist);
1140    QTAILQ_INIT(&g->cmdq);
1141    QTAILQ_INIT(&g->fenceq);
1142}
1143
1144static void virtio_gpu_reset(VirtIODevice *vdev)
1145{
1146    VirtIOGPU *g = VIRTIO_GPU(vdev);
1147    struct virtio_gpu_simple_resource *res, *tmp;
1148    struct virtio_gpu_ctrl_command *cmd;
1149
1150#ifdef CONFIG_VIRGL
1151    if (g->parent_obj.use_virgl_renderer) {
1152        virtio_gpu_virgl_reset(g);
1153    }
1154#endif
1155
1156    QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1157        virtio_gpu_resource_destroy(g, res);
1158    }
1159
1160    while (!QTAILQ_EMPTY(&g->cmdq)) {
1161        cmd = QTAILQ_FIRST(&g->cmdq);
1162        QTAILQ_REMOVE(&g->cmdq, cmd, next);
1163        g_free(cmd);
1164    }
1165
1166    while (!QTAILQ_EMPTY(&g->fenceq)) {
1167        cmd = QTAILQ_FIRST(&g->fenceq);
1168        QTAILQ_REMOVE(&g->fenceq, cmd, next);
1169        g->inflight--;
1170        g_free(cmd);
1171    }
1172
1173#ifdef CONFIG_VIRGL
1174    if (g->parent_obj.use_virgl_renderer) {
1175        if (g->parent_obj.renderer_blocked) {
1176            g->renderer_reset = true;
1177        } else {
1178            virtio_gpu_virgl_reset(g);
1179        }
1180        g->parent_obj.use_virgl_renderer = false;
1181    }
1182#endif
1183
1184    virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1185}
1186
1187static void
1188virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1189{
1190    VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1191
1192    memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1193}
1194
1195static void
1196virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1197{
1198    VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1199    const struct virtio_gpu_config *vgconfig =
1200        (const struct virtio_gpu_config *)config;
1201
1202    if (vgconfig->events_clear) {
1203        g->virtio_config.events_read &= ~vgconfig->events_clear;
1204    }
1205}
1206
1207/*
1208 * For historical reasons virtio_gpu does not adhere to virtio migration
1209 * scheme as described in doc/virtio-migration.txt, in a sense that no
1210 * save/load callback are provided to the core. Instead the device data
1211 * is saved/loaded after the core data.
1212 *
1213 * Because of this we need a special vmsd.
1214 */
1215static const VMStateDescription vmstate_virtio_gpu = {
1216    .name = "virtio-gpu",
1217    .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1218    .version_id = VIRTIO_GPU_VM_VERSION,
1219    .fields = (VMStateField[]) {
1220        VMSTATE_VIRTIO_DEVICE /* core */,
1221        {
1222            .name = "virtio-gpu",
1223            .info = &(const VMStateInfo) {
1224                        .name = "virtio-gpu",
1225                        .get = virtio_gpu_load,
1226                        .put = virtio_gpu_save,
1227            },
1228            .flags = VMS_SINGLE,
1229        } /* device */,
1230        VMSTATE_END_OF_LIST()
1231    },
1232};
1233
1234static Property virtio_gpu_properties[] = {
1235    VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1236    DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1237                     256 * MiB),
1238#ifdef CONFIG_VIRGL
1239    DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
1240                    VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1241    DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
1242                    VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1243#endif
1244    DEFINE_PROP_END_OF_LIST(),
1245};
1246
1247static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1248{
1249    DeviceClass *dc = DEVICE_CLASS(klass);
1250    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1251    VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
1252
1253    vgc->gl_flushed = virtio_gpu_gl_flushed;
1254    vdc->realize = virtio_gpu_device_realize;
1255    vdc->reset = virtio_gpu_reset;
1256    vdc->get_config = virtio_gpu_get_config;
1257    vdc->set_config = virtio_gpu_set_config;
1258
1259    dc->vmsd = &vmstate_virtio_gpu;
1260    device_class_set_props(dc, virtio_gpu_properties);
1261}
1262
1263static const TypeInfo virtio_gpu_info = {
1264    .name = TYPE_VIRTIO_GPU,
1265    .parent = TYPE_VIRTIO_GPU_BASE,
1266    .instance_size = sizeof(VirtIOGPU),
1267    .class_init = virtio_gpu_class_init,
1268};
1269
1270static void virtio_register_types(void)
1271{
1272    type_register_static(&virtio_gpu_info);
1273}
1274
1275type_init(virtio_register_types)
1276