qemu/hw/display/virtio-gpu.c
<<
>>
Prefs
   1/*
   2 * Virtio GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2013-2014
   5 *
   6 * Authors:
   7 *     Dave Airlie <airlied@redhat.com>
   8 *     Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/units.h"
  16#include "qemu/iov.h"
  17#include "ui/console.h"
  18#include "trace.h"
  19#include "sysemu/dma.h"
  20#include "sysemu/sysemu.h"
  21#include "hw/virtio/virtio.h"
  22#include "migration/qemu-file-types.h"
  23#include "hw/virtio/virtio-gpu.h"
  24#include "hw/virtio/virtio-gpu-bswap.h"
  25#include "hw/virtio/virtio-gpu-pixman.h"
  26#include "hw/virtio/virtio-bus.h"
  27#include "hw/display/edid.h"
  28#include "hw/qdev-properties.h"
  29#include "qemu/log.h"
  30#include "qemu/module.h"
  31#include "qapi/error.h"
  32#include "qemu/error-report.h"
  33
  34#define VIRTIO_GPU_VM_VERSION 1
  35
  36static struct virtio_gpu_simple_resource*
  37virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  38
  39static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  40                                       struct virtio_gpu_simple_resource *res);
  41
  42#ifdef CONFIG_VIRGL
  43#include <virglrenderer.h>
  44#define VIRGL(_g, _virgl, _simple, ...)                     \
  45    do {                                                    \
  46        if (_g->parent_obj.use_virgl_renderer) {            \
  47            _virgl(__VA_ARGS__);                            \
  48        } else {                                            \
  49            _simple(__VA_ARGS__);                           \
  50        }                                                   \
  51    } while (0)
  52#else
  53#define VIRGL(_g, _virgl, _simple, ...)                 \
  54    do {                                                \
  55        _simple(__VA_ARGS__);                           \
  56    } while (0)
  57#endif
  58
  59static void update_cursor_data_simple(VirtIOGPU *g,
  60                                      struct virtio_gpu_scanout *s,
  61                                      uint32_t resource_id)
  62{
  63    struct virtio_gpu_simple_resource *res;
  64    uint32_t pixels;
  65
  66    res = virtio_gpu_find_resource(g, resource_id);
  67    if (!res) {
  68        return;
  69    }
  70
  71    if (pixman_image_get_width(res->image)  != s->current_cursor->width ||
  72        pixman_image_get_height(res->image) != s->current_cursor->height) {
  73        return;
  74    }
  75
  76    pixels = s->current_cursor->width * s->current_cursor->height;
  77    memcpy(s->current_cursor->data,
  78           pixman_image_get_data(res->image),
  79           pixels * sizeof(uint32_t));
  80}
  81
  82#ifdef CONFIG_VIRGL
  83
  84static void update_cursor_data_virgl(VirtIOGPU *g,
  85                                     struct virtio_gpu_scanout *s,
  86                                     uint32_t resource_id)
  87{
  88    uint32_t width, height;
  89    uint32_t pixels, *data;
  90
  91    data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
  92    if (!data) {
  93        return;
  94    }
  95
  96    if (width != s->current_cursor->width ||
  97        height != s->current_cursor->height) {
  98        free(data);
  99        return;
 100    }
 101
 102    pixels = s->current_cursor->width * s->current_cursor->height;
 103    memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
 104    free(data);
 105}
 106
 107#endif
 108
 109static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
 110{
 111    struct virtio_gpu_scanout *s;
 112    bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
 113
 114    if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
 115        return;
 116    }
 117    s = &g->parent_obj.scanout[cursor->pos.scanout_id];
 118
 119    trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
 120                                   cursor->pos.x,
 121                                   cursor->pos.y,
 122                                   move ? "move" : "update",
 123                                   cursor->resource_id);
 124
 125    if (!move) {
 126        if (!s->current_cursor) {
 127            s->current_cursor = cursor_alloc(64, 64);
 128        }
 129
 130        s->current_cursor->hot_x = cursor->hot_x;
 131        s->current_cursor->hot_y = cursor->hot_y;
 132
 133        if (cursor->resource_id > 0) {
 134            VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
 135                  g, s, cursor->resource_id);
 136        }
 137        dpy_cursor_define(s->con, s->current_cursor);
 138
 139        s->cursor = *cursor;
 140    } else {
 141        s->cursor.pos.x = cursor->pos.x;
 142        s->cursor.pos.y = cursor->pos.y;
 143    }
 144    dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
 145                  cursor->resource_id ? 1 : 0);
 146}
 147
 148static struct virtio_gpu_simple_resource *
 149virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
 150{
 151    struct virtio_gpu_simple_resource *res;
 152
 153    QTAILQ_FOREACH(res, &g->reslist, next) {
 154        if (res->resource_id == resource_id) {
 155            return res;
 156        }
 157    }
 158    return NULL;
 159}
 160
 161void virtio_gpu_ctrl_response(VirtIOGPU *g,
 162                              struct virtio_gpu_ctrl_command *cmd,
 163                              struct virtio_gpu_ctrl_hdr *resp,
 164                              size_t resp_len)
 165{
 166    size_t s;
 167
 168    if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
 169        resp->flags |= VIRTIO_GPU_FLAG_FENCE;
 170        resp->fence_id = cmd->cmd_hdr.fence_id;
 171        resp->ctx_id = cmd->cmd_hdr.ctx_id;
 172    }
 173    virtio_gpu_ctrl_hdr_bswap(resp);
 174    s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
 175    if (s != resp_len) {
 176        qemu_log_mask(LOG_GUEST_ERROR,
 177                      "%s: response size incorrect %zu vs %zu\n",
 178                      __func__, s, resp_len);
 179    }
 180    virtqueue_push(cmd->vq, &cmd->elem, s);
 181    virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
 182    cmd->finished = true;
 183}
 184
 185void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
 186                                     struct virtio_gpu_ctrl_command *cmd,
 187                                     enum virtio_gpu_ctrl_type type)
 188{
 189    struct virtio_gpu_ctrl_hdr resp;
 190
 191    memset(&resp, 0, sizeof(resp));
 192    resp.type = type;
 193    virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
 194}
 195
 196void virtio_gpu_get_display_info(VirtIOGPU *g,
 197                                 struct virtio_gpu_ctrl_command *cmd)
 198{
 199    struct virtio_gpu_resp_display_info display_info;
 200
 201    trace_virtio_gpu_cmd_get_display_info();
 202    memset(&display_info, 0, sizeof(display_info));
 203    display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
 204    virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
 205    virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
 206                             sizeof(display_info));
 207}
 208
 209static void
 210virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
 211                         struct virtio_gpu_resp_edid *edid)
 212{
 213    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
 214    qemu_edid_info info = {
 215        .prefx = b->req_state[scanout].width,
 216        .prefy = b->req_state[scanout].height,
 217    };
 218
 219    edid->size = cpu_to_le32(sizeof(edid->edid));
 220    qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
 221}
 222
 223void virtio_gpu_get_edid(VirtIOGPU *g,
 224                         struct virtio_gpu_ctrl_command *cmd)
 225{
 226    struct virtio_gpu_resp_edid edid;
 227    struct virtio_gpu_cmd_get_edid get_edid;
 228    VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
 229
 230    VIRTIO_GPU_FILL_CMD(get_edid);
 231    virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
 232
 233    if (get_edid.scanout >= b->conf.max_outputs) {
 234        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 235        return;
 236    }
 237
 238    trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
 239    memset(&edid, 0, sizeof(edid));
 240    edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
 241    virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
 242    virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
 243}
 244
 245static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
 246                                   uint32_t width, uint32_t height)
 247{
 248    /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
 249     * pixman_image_create_bits will fail in case it overflow.
 250     */
 251
 252    int bpp = PIXMAN_FORMAT_BPP(pformat);
 253    int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
 254    return height * stride;
 255}
 256
 257static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
 258                                          struct virtio_gpu_ctrl_command *cmd)
 259{
 260    pixman_format_code_t pformat;
 261    struct virtio_gpu_simple_resource *res;
 262    struct virtio_gpu_resource_create_2d c2d;
 263
 264    VIRTIO_GPU_FILL_CMD(c2d);
 265    virtio_gpu_bswap_32(&c2d, sizeof(c2d));
 266    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
 267                                       c2d.width, c2d.height);
 268
 269    if (c2d.resource_id == 0) {
 270        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
 271                      __func__);
 272        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 273        return;
 274    }
 275
 276    res = virtio_gpu_find_resource(g, c2d.resource_id);
 277    if (res) {
 278        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
 279                      __func__, c2d.resource_id);
 280        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 281        return;
 282    }
 283
 284    res = g_new0(struct virtio_gpu_simple_resource, 1);
 285
 286    res->width = c2d.width;
 287    res->height = c2d.height;
 288    res->format = c2d.format;
 289    res->resource_id = c2d.resource_id;
 290
 291    pformat = virtio_gpu_get_pixman_format(c2d.format);
 292    if (!pformat) {
 293        qemu_log_mask(LOG_GUEST_ERROR,
 294                      "%s: host couldn't handle guest format %d\n",
 295                      __func__, c2d.format);
 296        g_free(res);
 297        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 298        return;
 299    }
 300
 301    res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
 302    if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
 303        res->image = pixman_image_create_bits(pformat,
 304                                              c2d.width,
 305                                              c2d.height,
 306                                              NULL, 0);
 307    }
 308
 309    if (!res->image) {
 310        qemu_log_mask(LOG_GUEST_ERROR,
 311                      "%s: resource creation failed %d %d %d\n",
 312                      __func__, c2d.resource_id, c2d.width, c2d.height);
 313        g_free(res);
 314        cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
 315        return;
 316    }
 317
 318    QTAILQ_INSERT_HEAD(&g->reslist, res, next);
 319    g->hostmem += res->hostmem;
 320}
 321
 322static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
 323{
 324    struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
 325    struct virtio_gpu_simple_resource *res;
 326    DisplaySurface *ds = NULL;
 327
 328    if (scanout->resource_id == 0) {
 329        return;
 330    }
 331
 332    res = virtio_gpu_find_resource(g, scanout->resource_id);
 333    if (res) {
 334        res->scanout_bitmask &= ~(1 << scanout_id);
 335    }
 336
 337    if (scanout_id == 0) {
 338        /* primary head */
 339        ds = qemu_create_message_surface(scanout->width  ?: 640,
 340                                         scanout->height ?: 480,
 341                                         "Guest disabled display.");
 342    }
 343    dpy_gfx_replace_surface(scanout->con, ds);
 344    scanout->resource_id = 0;
 345    scanout->ds = NULL;
 346    scanout->width = 0;
 347    scanout->height = 0;
 348}
 349
 350static void virtio_gpu_resource_destroy(VirtIOGPU *g,
 351                                        struct virtio_gpu_simple_resource *res)
 352{
 353    int i;
 354
 355    if (res->scanout_bitmask) {
 356        for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 357            if (res->scanout_bitmask & (1 << i)) {
 358                virtio_gpu_disable_scanout(g, i);
 359            }
 360        }
 361    }
 362
 363    pixman_image_unref(res->image);
 364    virtio_gpu_cleanup_mapping(g, res);
 365    QTAILQ_REMOVE(&g->reslist, res, next);
 366    g->hostmem -= res->hostmem;
 367    g_free(res);
 368}
 369
 370static void virtio_gpu_resource_unref(VirtIOGPU *g,
 371                                      struct virtio_gpu_ctrl_command *cmd)
 372{
 373    struct virtio_gpu_simple_resource *res;
 374    struct virtio_gpu_resource_unref unref;
 375
 376    VIRTIO_GPU_FILL_CMD(unref);
 377    virtio_gpu_bswap_32(&unref, sizeof(unref));
 378    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
 379
 380    res = virtio_gpu_find_resource(g, unref.resource_id);
 381    if (!res) {
 382        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 383                      __func__, unref.resource_id);
 384        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 385        return;
 386    }
 387    virtio_gpu_resource_destroy(g, res);
 388}
 389
 390static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
 391                                           struct virtio_gpu_ctrl_command *cmd)
 392{
 393    struct virtio_gpu_simple_resource *res;
 394    int h;
 395    uint32_t src_offset, dst_offset, stride;
 396    int bpp;
 397    pixman_format_code_t format;
 398    struct virtio_gpu_transfer_to_host_2d t2d;
 399
 400    VIRTIO_GPU_FILL_CMD(t2d);
 401    virtio_gpu_t2d_bswap(&t2d);
 402    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 403
 404    res = virtio_gpu_find_resource(g, t2d.resource_id);
 405    if (!res || !res->iov) {
 406        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 407                      __func__, t2d.resource_id);
 408        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 409        return;
 410    }
 411
 412    if (t2d.r.x > res->width ||
 413        t2d.r.y > res->height ||
 414        t2d.r.width > res->width ||
 415        t2d.r.height > res->height ||
 416        t2d.r.x + t2d.r.width > res->width ||
 417        t2d.r.y + t2d.r.height > res->height) {
 418        qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
 419                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 420                      __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
 421                      t2d.r.width, t2d.r.height, res->width, res->height);
 422        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 423        return;
 424    }
 425
 426    format = pixman_image_get_format(res->image);
 427    bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 428    stride = pixman_image_get_stride(res->image);
 429
 430    if (t2d.offset || t2d.r.x || t2d.r.y ||
 431        t2d.r.width != pixman_image_get_width(res->image)) {
 432        void *img_data = pixman_image_get_data(res->image);
 433        for (h = 0; h < t2d.r.height; h++) {
 434            src_offset = t2d.offset + stride * h;
 435            dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
 436
 437            iov_to_buf(res->iov, res->iov_cnt, src_offset,
 438                       (uint8_t *)img_data
 439                       + dst_offset, t2d.r.width * bpp);
 440        }
 441    } else {
 442        iov_to_buf(res->iov, res->iov_cnt, 0,
 443                   pixman_image_get_data(res->image),
 444                   pixman_image_get_stride(res->image)
 445                   * pixman_image_get_height(res->image));
 446    }
 447}
 448
 449static void virtio_gpu_resource_flush(VirtIOGPU *g,
 450                                      struct virtio_gpu_ctrl_command *cmd)
 451{
 452    struct virtio_gpu_simple_resource *res;
 453    struct virtio_gpu_resource_flush rf;
 454    pixman_region16_t flush_region;
 455    int i;
 456
 457    VIRTIO_GPU_FILL_CMD(rf);
 458    virtio_gpu_bswap_32(&rf, sizeof(rf));
 459    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
 460                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 461
 462    res = virtio_gpu_find_resource(g, rf.resource_id);
 463    if (!res) {
 464        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 465                      __func__, rf.resource_id);
 466        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 467        return;
 468    }
 469
 470    if (rf.r.x > res->width ||
 471        rf.r.y > res->height ||
 472        rf.r.width > res->width ||
 473        rf.r.height > res->height ||
 474        rf.r.x + rf.r.width > res->width ||
 475        rf.r.y + rf.r.height > res->height) {
 476        qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
 477                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 478                      __func__, rf.resource_id, rf.r.x, rf.r.y,
 479                      rf.r.width, rf.r.height, res->width, res->height);
 480        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 481        return;
 482    }
 483
 484    pixman_region_init_rect(&flush_region,
 485                            rf.r.x, rf.r.y, rf.r.width, rf.r.height);
 486    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 487        struct virtio_gpu_scanout *scanout;
 488        pixman_region16_t region, finalregion;
 489        pixman_box16_t *extents;
 490
 491        if (!(res->scanout_bitmask & (1 << i))) {
 492            continue;
 493        }
 494        scanout = &g->parent_obj.scanout[i];
 495
 496        pixman_region_init(&finalregion);
 497        pixman_region_init_rect(&region, scanout->x, scanout->y,
 498                                scanout->width, scanout->height);
 499
 500        pixman_region_intersect(&finalregion, &flush_region, &region);
 501        pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
 502        extents = pixman_region_extents(&finalregion);
 503        /* work out the area we need to update for each console */
 504        dpy_gfx_update(g->parent_obj.scanout[i].con,
 505                       extents->x1, extents->y1,
 506                       extents->x2 - extents->x1,
 507                       extents->y2 - extents->y1);
 508
 509        pixman_region_fini(&region);
 510        pixman_region_fini(&finalregion);
 511    }
 512    pixman_region_fini(&flush_region);
 513}
 514
 515static void virtio_unref_resource(pixman_image_t *image, void *data)
 516{
 517    pixman_image_unref(data);
 518}
 519
 520static void virtio_gpu_set_scanout(VirtIOGPU *g,
 521                                   struct virtio_gpu_ctrl_command *cmd)
 522{
 523    struct virtio_gpu_simple_resource *res, *ores;
 524    struct virtio_gpu_scanout *scanout;
 525    pixman_format_code_t format;
 526    uint32_t offset;
 527    int bpp;
 528    struct virtio_gpu_set_scanout ss;
 529
 530    VIRTIO_GPU_FILL_CMD(ss);
 531    virtio_gpu_bswap_32(&ss, sizeof(ss));
 532    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
 533                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 534
 535    if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
 536        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
 537                      __func__, ss.scanout_id);
 538        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 539        return;
 540    }
 541
 542    g->parent_obj.enable = 1;
 543    if (ss.resource_id == 0) {
 544        virtio_gpu_disable_scanout(g, ss.scanout_id);
 545        return;
 546    }
 547
 548    /* create a surface for this scanout */
 549    res = virtio_gpu_find_resource(g, ss.resource_id);
 550    if (!res) {
 551        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 552                      __func__, ss.resource_id);
 553        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 554        return;
 555    }
 556
 557    if (ss.r.x > res->width ||
 558        ss.r.y > res->height ||
 559        ss.r.width < 16 ||
 560        ss.r.height < 16 ||
 561        ss.r.width > res->width ||
 562        ss.r.height > res->height ||
 563        ss.r.x + ss.r.width > res->width ||
 564        ss.r.y + ss.r.height > res->height) {
 565        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
 566                      " resource %d, (%d,%d)+%d,%d vs %d %d\n",
 567                      __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
 568                      ss.r.width, ss.r.height, res->width, res->height);
 569        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 570        return;
 571    }
 572
 573    scanout = &g->parent_obj.scanout[ss.scanout_id];
 574
 575    format = pixman_image_get_format(res->image);
 576    bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 577    offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
 578    if (!scanout->ds || surface_data(scanout->ds)
 579        != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
 580        scanout->width != ss.r.width ||
 581        scanout->height != ss.r.height) {
 582        pixman_image_t *rect;
 583        void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
 584        rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
 585                                        pixman_image_get_stride(res->image));
 586        pixman_image_ref(res->image);
 587        pixman_image_set_destroy_function(rect, virtio_unref_resource,
 588                                          res->image);
 589        /* realloc the surface ptr */
 590        scanout->ds = qemu_create_displaysurface_pixman(rect);
 591        if (!scanout->ds) {
 592            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 593            return;
 594        }
 595        pixman_image_unref(rect);
 596        dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
 597                                scanout->ds);
 598    }
 599
 600    ores = virtio_gpu_find_resource(g, scanout->resource_id);
 601    if (ores) {
 602        ores->scanout_bitmask &= ~(1 << ss.scanout_id);
 603    }
 604
 605    res->scanout_bitmask |= (1 << ss.scanout_id);
 606    scanout->resource_id = ss.resource_id;
 607    scanout->x = ss.r.x;
 608    scanout->y = ss.r.y;
 609    scanout->width = ss.r.width;
 610    scanout->height = ss.r.height;
 611}
 612
 613int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
 614                                  struct virtio_gpu_resource_attach_backing *ab,
 615                                  struct virtio_gpu_ctrl_command *cmd,
 616                                  uint64_t **addr, struct iovec **iov)
 617{
 618    struct virtio_gpu_mem_entry *ents;
 619    size_t esize, s;
 620    int i;
 621
 622    if (ab->nr_entries > 16384) {
 623        qemu_log_mask(LOG_GUEST_ERROR,
 624                      "%s: nr_entries is too big (%d > 16384)\n",
 625                      __func__, ab->nr_entries);
 626        return -1;
 627    }
 628
 629    esize = sizeof(*ents) * ab->nr_entries;
 630    ents = g_malloc(esize);
 631    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
 632                   sizeof(*ab), ents, esize);
 633    if (s != esize) {
 634        qemu_log_mask(LOG_GUEST_ERROR,
 635                      "%s: command data size incorrect %zu vs %zu\n",
 636                      __func__, s, esize);
 637        g_free(ents);
 638        return -1;
 639    }
 640
 641    *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
 642    if (addr) {
 643        *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
 644    }
 645    for (i = 0; i < ab->nr_entries; i++) {
 646        uint64_t a = le64_to_cpu(ents[i].addr);
 647        uint32_t l = le32_to_cpu(ents[i].length);
 648        hwaddr len = l;
 649        (*iov)[i].iov_len = l;
 650        (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
 651                                            a, &len, DMA_DIRECTION_TO_DEVICE);
 652        if (addr) {
 653            (*addr)[i] = a;
 654        }
 655        if (!(*iov)[i].iov_base || len != l) {
 656            qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
 657                          " resource %d element %d\n",
 658                          __func__, ab->resource_id, i);
 659            virtio_gpu_cleanup_mapping_iov(g, *iov, i);
 660            g_free(ents);
 661            *iov = NULL;
 662            if (addr) {
 663                g_free(*addr);
 664                *addr = NULL;
 665            }
 666            return -1;
 667        }
 668    }
 669    g_free(ents);
 670    return 0;
 671}
 672
 673void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
 674                                    struct iovec *iov, uint32_t count)
 675{
 676    int i;
 677
 678    for (i = 0; i < count; i++) {
 679        dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
 680                         iov[i].iov_base, iov[i].iov_len,
 681                         DMA_DIRECTION_TO_DEVICE,
 682                         iov[i].iov_len);
 683    }
 684    g_free(iov);
 685}
 686
 687static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
 688                                       struct virtio_gpu_simple_resource *res)
 689{
 690    virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
 691    res->iov = NULL;
 692    res->iov_cnt = 0;
 693    g_free(res->addrs);
 694    res->addrs = NULL;
 695}
 696
 697static void
 698virtio_gpu_resource_attach_backing(VirtIOGPU *g,
 699                                   struct virtio_gpu_ctrl_command *cmd)
 700{
 701    struct virtio_gpu_simple_resource *res;
 702    struct virtio_gpu_resource_attach_backing ab;
 703    int ret;
 704
 705    VIRTIO_GPU_FILL_CMD(ab);
 706    virtio_gpu_bswap_32(&ab, sizeof(ab));
 707    trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
 708
 709    res = virtio_gpu_find_resource(g, ab.resource_id);
 710    if (!res) {
 711        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 712                      __func__, ab.resource_id);
 713        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 714        return;
 715    }
 716
 717    if (res->iov) {
 718        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 719        return;
 720    }
 721
 722    ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
 723    if (ret != 0) {
 724        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 725        return;
 726    }
 727
 728    res->iov_cnt = ab.nr_entries;
 729}
 730
 731static void
 732virtio_gpu_resource_detach_backing(VirtIOGPU *g,
 733                                   struct virtio_gpu_ctrl_command *cmd)
 734{
 735    struct virtio_gpu_simple_resource *res;
 736    struct virtio_gpu_resource_detach_backing detach;
 737
 738    VIRTIO_GPU_FILL_CMD(detach);
 739    virtio_gpu_bswap_32(&detach, sizeof(detach));
 740    trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
 741
 742    res = virtio_gpu_find_resource(g, detach.resource_id);
 743    if (!res || !res->iov) {
 744        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 745                      __func__, detach.resource_id);
 746        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 747        return;
 748    }
 749    virtio_gpu_cleanup_mapping(g, res);
 750}
 751
 752static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
 753                                          struct virtio_gpu_ctrl_command *cmd)
 754{
 755    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 756    virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
 757
 758    switch (cmd->cmd_hdr.type) {
 759    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 760        virtio_gpu_get_display_info(g, cmd);
 761        break;
 762    case VIRTIO_GPU_CMD_GET_EDID:
 763        virtio_gpu_get_edid(g, cmd);
 764        break;
 765    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
 766        virtio_gpu_resource_create_2d(g, cmd);
 767        break;
 768    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
 769        virtio_gpu_resource_unref(g, cmd);
 770        break;
 771    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
 772        virtio_gpu_resource_flush(g, cmd);
 773        break;
 774    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
 775        virtio_gpu_transfer_to_host_2d(g, cmd);
 776        break;
 777    case VIRTIO_GPU_CMD_SET_SCANOUT:
 778        virtio_gpu_set_scanout(g, cmd);
 779        break;
 780    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
 781        virtio_gpu_resource_attach_backing(g, cmd);
 782        break;
 783    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
 784        virtio_gpu_resource_detach_backing(g, cmd);
 785        break;
 786    default:
 787        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 788        break;
 789    }
 790    if (!cmd->finished) {
 791        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
 792                                        VIRTIO_GPU_RESP_OK_NODATA);
 793    }
 794}
 795
 796static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
 797{
 798    VirtIOGPU *g = VIRTIO_GPU(vdev);
 799    qemu_bh_schedule(g->ctrl_bh);
 800}
 801
 802static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
 803{
 804    VirtIOGPU *g = VIRTIO_GPU(vdev);
 805    qemu_bh_schedule(g->cursor_bh);
 806}
 807
 808void virtio_gpu_process_cmdq(VirtIOGPU *g)
 809{
 810    struct virtio_gpu_ctrl_command *cmd;
 811
 812    while (!QTAILQ_EMPTY(&g->cmdq)) {
 813        cmd = QTAILQ_FIRST(&g->cmdq);
 814
 815        if (g->parent_obj.renderer_blocked) {
 816            break;
 817        }
 818
 819        /* process command */
 820        VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
 821              g, cmd);
 822
 823        QTAILQ_REMOVE(&g->cmdq, cmd, next);
 824        if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 825            g->stats.requests++;
 826        }
 827
 828        if (!cmd->finished) {
 829            QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
 830            g->inflight++;
 831            if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 832                if (g->stats.max_inflight < g->inflight) {
 833                    g->stats.max_inflight = g->inflight;
 834                }
 835                fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
 836            }
 837        } else {
 838            g_free(cmd);
 839        }
 840    }
 841}
 842
 843static void virtio_gpu_gl_unblock(VirtIOGPUBase *b)
 844{
 845    VirtIOGPU *g = VIRTIO_GPU(b);
 846
 847#ifdef CONFIG_VIRGL
 848    if (g->renderer_reset) {
 849        g->renderer_reset = false;
 850        virtio_gpu_virgl_reset(g);
 851    }
 852#endif
 853    virtio_gpu_process_cmdq(g);
 854}
 855
 856static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 857{
 858    VirtIOGPU *g = VIRTIO_GPU(vdev);
 859    struct virtio_gpu_ctrl_command *cmd;
 860
 861    if (!virtio_queue_ready(vq)) {
 862        return;
 863    }
 864
 865#ifdef CONFIG_VIRGL
 866    if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
 867        virtio_gpu_virgl_init(g);
 868        g->renderer_inited = true;
 869    }
 870#endif
 871
 872    cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
 873    while (cmd) {
 874        cmd->vq = vq;
 875        cmd->error = 0;
 876        cmd->finished = false;
 877        QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
 878        cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
 879    }
 880
 881    virtio_gpu_process_cmdq(g);
 882
 883#ifdef CONFIG_VIRGL
 884    if (g->parent_obj.use_virgl_renderer) {
 885        virtio_gpu_virgl_fence_poll(g);
 886    }
 887#endif
 888}
 889
 890static void virtio_gpu_ctrl_bh(void *opaque)
 891{
 892    VirtIOGPU *g = opaque;
 893    virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
 894}
 895
 896static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
 897{
 898    VirtIOGPU *g = VIRTIO_GPU(vdev);
 899    VirtQueueElement *elem;
 900    size_t s;
 901    struct virtio_gpu_update_cursor cursor_info;
 902
 903    if (!virtio_queue_ready(vq)) {
 904        return;
 905    }
 906    for (;;) {
 907        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
 908        if (!elem) {
 909            break;
 910        }
 911
 912        s = iov_to_buf(elem->out_sg, elem->out_num, 0,
 913                       &cursor_info, sizeof(cursor_info));
 914        if (s != sizeof(cursor_info)) {
 915            qemu_log_mask(LOG_GUEST_ERROR,
 916                          "%s: cursor size incorrect %zu vs %zu\n",
 917                          __func__, s, sizeof(cursor_info));
 918        } else {
 919            virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
 920            update_cursor(g, &cursor_info);
 921        }
 922        virtqueue_push(vq, elem, 0);
 923        virtio_notify(vdev, vq);
 924        g_free(elem);
 925    }
 926}
 927
 928static void virtio_gpu_cursor_bh(void *opaque)
 929{
 930    VirtIOGPU *g = opaque;
 931    virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
 932}
 933
 934static const VMStateDescription vmstate_virtio_gpu_scanout = {
 935    .name = "virtio-gpu-one-scanout",
 936    .version_id = 1,
 937    .fields = (VMStateField[]) {
 938        VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
 939        VMSTATE_UINT32(width, struct virtio_gpu_scanout),
 940        VMSTATE_UINT32(height, struct virtio_gpu_scanout),
 941        VMSTATE_INT32(x, struct virtio_gpu_scanout),
 942        VMSTATE_INT32(y, struct virtio_gpu_scanout),
 943        VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
 944        VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
 945        VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
 946        VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
 947        VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
 948        VMSTATE_END_OF_LIST()
 949    },
 950};
 951
 952static const VMStateDescription vmstate_virtio_gpu_scanouts = {
 953    .name = "virtio-gpu-scanouts",
 954    .version_id = 1,
 955    .fields = (VMStateField[]) {
 956        VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
 957        VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
 958                             struct VirtIOGPU, NULL),
 959        VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
 960                                     parent_obj.conf.max_outputs, 1,
 961                                     vmstate_virtio_gpu_scanout,
 962                                     struct virtio_gpu_scanout),
 963        VMSTATE_END_OF_LIST()
 964    },
 965};
 966
 967static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
 968                           const VMStateField *field, QJSON *vmdesc)
 969{
 970    VirtIOGPU *g = opaque;
 971    struct virtio_gpu_simple_resource *res;
 972    int i;
 973
 974    /* in 2d mode we should never find unprocessed commands here */
 975    assert(QTAILQ_EMPTY(&g->cmdq));
 976
 977    QTAILQ_FOREACH(res, &g->reslist, next) {
 978        qemu_put_be32(f, res->resource_id);
 979        qemu_put_be32(f, res->width);
 980        qemu_put_be32(f, res->height);
 981        qemu_put_be32(f, res->format);
 982        qemu_put_be32(f, res->iov_cnt);
 983        for (i = 0; i < res->iov_cnt; i++) {
 984            qemu_put_be64(f, res->addrs[i]);
 985            qemu_put_be32(f, res->iov[i].iov_len);
 986        }
 987        qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
 988                        pixman_image_get_stride(res->image) * res->height);
 989    }
 990    qemu_put_be32(f, 0); /* end of list */
 991
 992    return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
 993}
 994
 995static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
 996                           const VMStateField *field)
 997{
 998    VirtIOGPU *g = opaque;
 999    struct virtio_gpu_simple_resource *res;
1000    struct virtio_gpu_scanout *scanout;
1001    uint32_t resource_id, pformat;
1002    int i;
1003
1004    g->hostmem = 0;
1005
1006    resource_id = qemu_get_be32(f);
1007    while (resource_id != 0) {
1008        res = virtio_gpu_find_resource(g, resource_id);
1009        if (res) {
1010            return -EINVAL;
1011        }
1012
1013        res = g_new0(struct virtio_gpu_simple_resource, 1);
1014        res->resource_id = resource_id;
1015        res->width = qemu_get_be32(f);
1016        res->height = qemu_get_be32(f);
1017        res->format = qemu_get_be32(f);
1018        res->iov_cnt = qemu_get_be32(f);
1019
1020        /* allocate */
1021        pformat = virtio_gpu_get_pixman_format(res->format);
1022        if (!pformat) {
1023            g_free(res);
1024            return -EINVAL;
1025        }
1026        res->image = pixman_image_create_bits(pformat,
1027                                              res->width, res->height,
1028                                              NULL, 0);
1029        if (!res->image) {
1030            g_free(res);
1031            return -EINVAL;
1032        }
1033
1034        res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1035
1036        res->addrs = g_new(uint64_t, res->iov_cnt);
1037        res->iov = g_new(struct iovec, res->iov_cnt);
1038
1039        /* read data */
1040        for (i = 0; i < res->iov_cnt; i++) {
1041            res->addrs[i] = qemu_get_be64(f);
1042            res->iov[i].iov_len = qemu_get_be32(f);
1043        }
1044        qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1045                        pixman_image_get_stride(res->image) * res->height);
1046
1047        /* restore mapping */
1048        for (i = 0; i < res->iov_cnt; i++) {
1049            hwaddr len = res->iov[i].iov_len;
1050            res->iov[i].iov_base =
1051                dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1052                               res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1053
1054            if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1055                /* Clean up the half-a-mapping we just created... */
1056                if (res->iov[i].iov_base) {
1057                    dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1058                                     res->iov[i].iov_base,
1059                                     len,
1060                                     DMA_DIRECTION_TO_DEVICE,
1061                                     0);
1062                }
1063                /* ...and the mappings for previous loop iterations */
1064                res->iov_cnt = i;
1065                virtio_gpu_cleanup_mapping(g, res);
1066                pixman_image_unref(res->image);
1067                g_free(res);
1068                return -EINVAL;
1069            }
1070        }
1071
1072        QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1073        g->hostmem += res->hostmem;
1074
1075        resource_id = qemu_get_be32(f);
1076    }
1077
1078    /* load & apply scanout state */
1079    vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1080    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1081        scanout = &g->parent_obj.scanout[i];
1082        if (!scanout->resource_id) {
1083            continue;
1084        }
1085        res = virtio_gpu_find_resource(g, scanout->resource_id);
1086        if (!res) {
1087            return -EINVAL;
1088        }
1089        scanout->ds = qemu_create_displaysurface_pixman(res->image);
1090        if (!scanout->ds) {
1091            return -EINVAL;
1092        }
1093
1094        dpy_gfx_replace_surface(scanout->con, scanout->ds);
1095        dpy_gfx_update_full(scanout->con);
1096        if (scanout->cursor.resource_id) {
1097            update_cursor(g, &scanout->cursor);
1098        }
1099        res->scanout_bitmask |= (1 << i);
1100    }
1101
1102    return 0;
1103}
1104
1105static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1106{
1107    VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1108    VirtIOGPU *g = VIRTIO_GPU(qdev);
1109    bool have_virgl;
1110
1111#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1112    have_virgl = false;
1113#else
1114    have_virgl = display_opengl;
1115#endif
1116    if (!have_virgl) {
1117        g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1118    } else {
1119#if defined(CONFIG_VIRGL)
1120        VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
1121            virtio_gpu_virgl_get_num_capsets(g);
1122#endif
1123    }
1124
1125    if (!virtio_gpu_base_device_realize(qdev,
1126                                        virtio_gpu_handle_ctrl_cb,
1127                                        virtio_gpu_handle_cursor_cb,
1128                                        errp)) {
1129        return;
1130    }
1131
1132    g->ctrl_vq = virtio_get_queue(vdev, 0);
1133    g->cursor_vq = virtio_get_queue(vdev, 1);
1134    g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1135    g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1136    QTAILQ_INIT(&g->reslist);
1137    QTAILQ_INIT(&g->cmdq);
1138    QTAILQ_INIT(&g->fenceq);
1139}
1140
1141static void virtio_gpu_reset(VirtIODevice *vdev)
1142{
1143    VirtIOGPU *g = VIRTIO_GPU(vdev);
1144    struct virtio_gpu_simple_resource *res, *tmp;
1145    struct virtio_gpu_ctrl_command *cmd;
1146
1147#ifdef CONFIG_VIRGL
1148    if (g->parent_obj.use_virgl_renderer) {
1149        virtio_gpu_virgl_reset(g);
1150    }
1151#endif
1152
1153    QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1154        virtio_gpu_resource_destroy(g, res);
1155    }
1156
1157    while (!QTAILQ_EMPTY(&g->cmdq)) {
1158        cmd = QTAILQ_FIRST(&g->cmdq);
1159        QTAILQ_REMOVE(&g->cmdq, cmd, next);
1160        g_free(cmd);
1161    }
1162
1163    while (!QTAILQ_EMPTY(&g->fenceq)) {
1164        cmd = QTAILQ_FIRST(&g->fenceq);
1165        QTAILQ_REMOVE(&g->fenceq, cmd, next);
1166        g->inflight--;
1167        g_free(cmd);
1168    }
1169
1170#ifdef CONFIG_VIRGL
1171    if (g->parent_obj.use_virgl_renderer) {
1172        if (g->parent_obj.renderer_blocked) {
1173            g->renderer_reset = true;
1174        } else {
1175            virtio_gpu_virgl_reset(g);
1176        }
1177        g->parent_obj.use_virgl_renderer = false;
1178    }
1179#endif
1180
1181    virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1182}
1183
1184static void
1185virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1186{
1187    VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1188
1189    memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1190}
1191
1192static void
1193virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1194{
1195    VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1196    const struct virtio_gpu_config *vgconfig =
1197        (const struct virtio_gpu_config *)config;
1198
1199    if (vgconfig->events_clear) {
1200        g->virtio_config.events_read &= ~vgconfig->events_clear;
1201    }
1202}
1203
1204/*
1205 * For historical reasons virtio_gpu does not adhere to virtio migration
1206 * scheme as described in doc/virtio-migration.txt, in a sense that no
1207 * save/load callback are provided to the core. Instead the device data
1208 * is saved/loaded after the core data.
1209 *
1210 * Because of this we need a special vmsd.
1211 */
1212static const VMStateDescription vmstate_virtio_gpu = {
1213    .name = "virtio-gpu",
1214    .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1215    .version_id = VIRTIO_GPU_VM_VERSION,
1216    .fields = (VMStateField[]) {
1217        VMSTATE_VIRTIO_DEVICE /* core */,
1218        {
1219            .name = "virtio-gpu",
1220            .info = &(const VMStateInfo) {
1221                        .name = "virtio-gpu",
1222                        .get = virtio_gpu_load,
1223                        .put = virtio_gpu_save,
1224            },
1225            .flags = VMS_SINGLE,
1226        } /* device */,
1227        VMSTATE_END_OF_LIST()
1228    },
1229};
1230
1231static Property virtio_gpu_properties[] = {
1232    VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1233    DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1234                     256 * MiB),
1235#ifdef CONFIG_VIRGL
1236    DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
1237                    VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1238    DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
1239                    VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1240#endif
1241    DEFINE_PROP_END_OF_LIST(),
1242};
1243
1244static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1245{
1246    DeviceClass *dc = DEVICE_CLASS(klass);
1247    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1248    VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
1249
1250    vgc->gl_unblock = virtio_gpu_gl_unblock;
1251    vdc->realize = virtio_gpu_device_realize;
1252    vdc->reset = virtio_gpu_reset;
1253    vdc->get_config = virtio_gpu_get_config;
1254    vdc->set_config = virtio_gpu_set_config;
1255
1256    dc->vmsd = &vmstate_virtio_gpu;
1257    dc->props = virtio_gpu_properties;
1258}
1259
1260static const TypeInfo virtio_gpu_info = {
1261    .name = TYPE_VIRTIO_GPU,
1262    .parent = TYPE_VIRTIO_GPU_BASE,
1263    .instance_size = sizeof(VirtIOGPU),
1264    .class_init = virtio_gpu_class_init,
1265};
1266
1267static void virtio_register_types(void)
1268{
1269    type_register_static(&virtio_gpu_info);
1270}
1271
1272type_init(virtio_register_types)
1273