qemu/hw/display/virtio-gpu.c
<<
>>
Prefs
   1/*
   2 * Virtio GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2013-2014
   5 *
   6 * Authors:
   7 *     Dave Airlie <airlied@redhat.com>
   8 *     Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/units.h"
  16#include "qemu-common.h"
  17#include "qemu/iov.h"
  18#include "ui/console.h"
  19#include "trace.h"
  20#include "sysemu/dma.h"
  21#include "hw/virtio/virtio.h"
  22#include "hw/virtio/virtio-gpu.h"
  23#include "hw/virtio/virtio-bus.h"
  24#include "hw/display/edid.h"
  25#include "migration/blocker.h"
  26#include "qemu/log.h"
  27#include "qapi/error.h"
  28
  29#define VIRTIO_GPU_VM_VERSION 1
  30
  31static struct virtio_gpu_simple_resource*
  32virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  33
  34static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  35                                       struct virtio_gpu_simple_resource *res);
  36
  37static void
  38virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
  39{
  40    le32_to_cpus(&hdr->type);
  41    le32_to_cpus(&hdr->flags);
  42    le64_to_cpus(&hdr->fence_id);
  43    le32_to_cpus(&hdr->ctx_id);
  44    le32_to_cpus(&hdr->padding);
  45}
  46
  47static void virtio_gpu_bswap_32(void *ptr,
  48                                size_t size)
  49{
  50#ifdef HOST_WORDS_BIGENDIAN
  51
  52    size_t i;
  53    struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
  54
  55    virtio_gpu_ctrl_hdr_bswap(hdr);
  56
  57    i = sizeof(struct virtio_gpu_ctrl_hdr);
  58    while (i < size) {
  59        le32_to_cpus((uint32_t *)(ptr + i));
  60        i = i + sizeof(uint32_t);
  61    }
  62
  63#endif
  64}
  65
  66static void
  67virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
  68{
  69    virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
  70    le32_to_cpus(&t2d->r.x);
  71    le32_to_cpus(&t2d->r.y);
  72    le32_to_cpus(&t2d->r.width);
  73    le32_to_cpus(&t2d->r.height);
  74    le64_to_cpus(&t2d->offset);
  75    le32_to_cpus(&t2d->resource_id);
  76    le32_to_cpus(&t2d->padding);
  77}
  78
  79#ifdef CONFIG_VIRGL
  80#include <virglrenderer.h>
  81#define VIRGL(_g, _virgl, _simple, ...)                     \
  82    do {                                                    \
  83        if (_g->use_virgl_renderer) {                       \
  84            _virgl(__VA_ARGS__);                            \
  85        } else {                                            \
  86            _simple(__VA_ARGS__);                           \
  87        }                                                   \
  88    } while (0)
  89#else
  90#define VIRGL(_g, _virgl, _simple, ...)                 \
  91    do {                                                \
  92        _simple(__VA_ARGS__);                           \
  93    } while (0)
  94#endif
  95
  96static void update_cursor_data_simple(VirtIOGPU *g,
  97                                      struct virtio_gpu_scanout *s,
  98                                      uint32_t resource_id)
  99{
 100    struct virtio_gpu_simple_resource *res;
 101    uint32_t pixels;
 102
 103    res = virtio_gpu_find_resource(g, resource_id);
 104    if (!res) {
 105        return;
 106    }
 107
 108    if (pixman_image_get_width(res->image)  != s->current_cursor->width ||
 109        pixman_image_get_height(res->image) != s->current_cursor->height) {
 110        return;
 111    }
 112
 113    pixels = s->current_cursor->width * s->current_cursor->height;
 114    memcpy(s->current_cursor->data,
 115           pixman_image_get_data(res->image),
 116           pixels * sizeof(uint32_t));
 117}
 118
 119#ifdef CONFIG_VIRGL
 120
 121static void update_cursor_data_virgl(VirtIOGPU *g,
 122                                     struct virtio_gpu_scanout *s,
 123                                     uint32_t resource_id)
 124{
 125    uint32_t width, height;
 126    uint32_t pixels, *data;
 127
 128    data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
 129    if (!data) {
 130        return;
 131    }
 132
 133    if (width != s->current_cursor->width ||
 134        height != s->current_cursor->height) {
 135        free(data);
 136        return;
 137    }
 138
 139    pixels = s->current_cursor->width * s->current_cursor->height;
 140    memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
 141    free(data);
 142}
 143
 144#endif
 145
 146static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
 147{
 148    struct virtio_gpu_scanout *s;
 149    bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
 150
 151    if (cursor->pos.scanout_id >= g->conf.max_outputs) {
 152        return;
 153    }
 154    s = &g->scanout[cursor->pos.scanout_id];
 155
 156    trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
 157                                   cursor->pos.x,
 158                                   cursor->pos.y,
 159                                   move ? "move" : "update",
 160                                   cursor->resource_id);
 161
 162    if (!move) {
 163        if (!s->current_cursor) {
 164            s->current_cursor = cursor_alloc(64, 64);
 165        }
 166
 167        s->current_cursor->hot_x = cursor->hot_x;
 168        s->current_cursor->hot_y = cursor->hot_y;
 169
 170        if (cursor->resource_id > 0) {
 171            VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
 172                  g, s, cursor->resource_id);
 173        }
 174        dpy_cursor_define(s->con, s->current_cursor);
 175
 176        s->cursor = *cursor;
 177    } else {
 178        s->cursor.pos.x = cursor->pos.x;
 179        s->cursor.pos.y = cursor->pos.y;
 180    }
 181    dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
 182                  cursor->resource_id ? 1 : 0);
 183}
 184
 185static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
 186{
 187    VirtIOGPU *g = VIRTIO_GPU(vdev);
 188    memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
 189}
 190
 191static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
 192{
 193    VirtIOGPU *g = VIRTIO_GPU(vdev);
 194    struct virtio_gpu_config vgconfig;
 195
 196    memcpy(&vgconfig, config, sizeof(g->virtio_config));
 197
 198    if (vgconfig.events_clear) {
 199        g->virtio_config.events_read &= ~vgconfig.events_clear;
 200    }
 201}
 202
 203static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
 204                                        Error **errp)
 205{
 206    VirtIOGPU *g = VIRTIO_GPU(vdev);
 207
 208    if (virtio_gpu_virgl_enabled(g->conf)) {
 209        features |= (1 << VIRTIO_GPU_F_VIRGL);
 210    }
 211    if (virtio_gpu_edid_enabled(g->conf)) {
 212        features |= (1 << VIRTIO_GPU_F_EDID);
 213    }
 214    return features;
 215}
 216
 217static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
 218{
 219    static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
 220    VirtIOGPU *g = VIRTIO_GPU(vdev);
 221
 222    g->use_virgl_renderer = ((features & virgl) == virgl);
 223    trace_virtio_gpu_features(g->use_virgl_renderer);
 224}
 225
 226static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
 227{
 228    g->virtio_config.events_read |= event_type;
 229    virtio_notify_config(&g->parent_obj);
 230}
 231
 232static struct virtio_gpu_simple_resource *
 233virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
 234{
 235    struct virtio_gpu_simple_resource *res;
 236
 237    QTAILQ_FOREACH(res, &g->reslist, next) {
 238        if (res->resource_id == resource_id) {
 239            return res;
 240        }
 241    }
 242    return NULL;
 243}
 244
 245void virtio_gpu_ctrl_response(VirtIOGPU *g,
 246                              struct virtio_gpu_ctrl_command *cmd,
 247                              struct virtio_gpu_ctrl_hdr *resp,
 248                              size_t resp_len)
 249{
 250    size_t s;
 251
 252    if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
 253        resp->flags |= VIRTIO_GPU_FLAG_FENCE;
 254        resp->fence_id = cmd->cmd_hdr.fence_id;
 255        resp->ctx_id = cmd->cmd_hdr.ctx_id;
 256    }
 257    virtio_gpu_ctrl_hdr_bswap(resp);
 258    s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
 259    if (s != resp_len) {
 260        qemu_log_mask(LOG_GUEST_ERROR,
 261                      "%s: response size incorrect %zu vs %zu\n",
 262                      __func__, s, resp_len);
 263    }
 264    virtqueue_push(cmd->vq, &cmd->elem, s);
 265    virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
 266    cmd->finished = true;
 267}
 268
 269void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
 270                                     struct virtio_gpu_ctrl_command *cmd,
 271                                     enum virtio_gpu_ctrl_type type)
 272{
 273    struct virtio_gpu_ctrl_hdr resp;
 274
 275    memset(&resp, 0, sizeof(resp));
 276    resp.type = type;
 277    virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
 278}
 279
 280static void
 281virtio_gpu_fill_display_info(VirtIOGPU *g,
 282                             struct virtio_gpu_resp_display_info *dpy_info)
 283{
 284    int i;
 285
 286    for (i = 0; i < g->conf.max_outputs; i++) {
 287        if (g->enabled_output_bitmask & (1 << i)) {
 288            dpy_info->pmodes[i].enabled = 1;
 289            dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
 290            dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
 291        }
 292    }
 293}
 294
 295void virtio_gpu_get_display_info(VirtIOGPU *g,
 296                                 struct virtio_gpu_ctrl_command *cmd)
 297{
 298    struct virtio_gpu_resp_display_info display_info;
 299
 300    trace_virtio_gpu_cmd_get_display_info();
 301    memset(&display_info, 0, sizeof(display_info));
 302    display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
 303    virtio_gpu_fill_display_info(g, &display_info);
 304    virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
 305                             sizeof(display_info));
 306}
 307
 308static void
 309virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
 310                         struct virtio_gpu_resp_edid *edid)
 311{
 312    qemu_edid_info info = {
 313        .prefx = g->req_state[scanout].width,
 314        .prefy = g->req_state[scanout].height,
 315    };
 316
 317    edid->size = cpu_to_le32(sizeof(edid->edid));
 318    qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
 319}
 320
 321void virtio_gpu_get_edid(VirtIOGPU *g,
 322                         struct virtio_gpu_ctrl_command *cmd)
 323{
 324    struct virtio_gpu_resp_edid edid;
 325    struct virtio_gpu_cmd_get_edid get_edid;
 326
 327    VIRTIO_GPU_FILL_CMD(get_edid);
 328    virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
 329
 330    if (get_edid.scanout >= g->conf.max_outputs) {
 331        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 332        return;
 333    }
 334
 335    trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
 336    memset(&edid, 0, sizeof(edid));
 337    edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
 338    virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
 339    virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
 340}
 341
 342static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
 343{
 344    switch (virtio_gpu_format) {
 345    case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
 346        return PIXMAN_BE_b8g8r8x8;
 347    case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
 348        return PIXMAN_BE_b8g8r8a8;
 349    case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
 350        return PIXMAN_BE_x8r8g8b8;
 351    case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
 352        return PIXMAN_BE_a8r8g8b8;
 353    case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
 354        return PIXMAN_BE_r8g8b8x8;
 355    case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
 356        return PIXMAN_BE_r8g8b8a8;
 357    case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
 358        return PIXMAN_BE_x8b8g8r8;
 359    case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
 360        return PIXMAN_BE_a8b8g8r8;
 361    default:
 362        return 0;
 363    }
 364}
 365
 366static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
 367                                   uint32_t width, uint32_t height)
 368{
 369    /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
 370     * pixman_image_create_bits will fail in case it overflow.
 371     */
 372
 373    int bpp = PIXMAN_FORMAT_BPP(pformat);
 374    int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
 375    return height * stride;
 376}
 377
 378static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
 379                                          struct virtio_gpu_ctrl_command *cmd)
 380{
 381    pixman_format_code_t pformat;
 382    struct virtio_gpu_simple_resource *res;
 383    struct virtio_gpu_resource_create_2d c2d;
 384
 385    VIRTIO_GPU_FILL_CMD(c2d);
 386    virtio_gpu_bswap_32(&c2d, sizeof(c2d));
 387    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
 388                                       c2d.width, c2d.height);
 389
 390    if (c2d.resource_id == 0) {
 391        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
 392                      __func__);
 393        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 394        return;
 395    }
 396
 397    res = virtio_gpu_find_resource(g, c2d.resource_id);
 398    if (res) {
 399        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
 400                      __func__, c2d.resource_id);
 401        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 402        return;
 403    }
 404
 405    res = g_new0(struct virtio_gpu_simple_resource, 1);
 406
 407    res->width = c2d.width;
 408    res->height = c2d.height;
 409    res->format = c2d.format;
 410    res->resource_id = c2d.resource_id;
 411
 412    pformat = get_pixman_format(c2d.format);
 413    if (!pformat) {
 414        qemu_log_mask(LOG_GUEST_ERROR,
 415                      "%s: host couldn't handle guest format %d\n",
 416                      __func__, c2d.format);
 417        g_free(res);
 418        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 419        return;
 420    }
 421
 422    res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
 423    if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
 424        res->image = pixman_image_create_bits(pformat,
 425                                              c2d.width,
 426                                              c2d.height,
 427                                              NULL, 0);
 428    }
 429
 430    if (!res->image) {
 431        qemu_log_mask(LOG_GUEST_ERROR,
 432                      "%s: resource creation failed %d %d %d\n",
 433                      __func__, c2d.resource_id, c2d.width, c2d.height);
 434        g_free(res);
 435        cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
 436        return;
 437    }
 438
 439    QTAILQ_INSERT_HEAD(&g->reslist, res, next);
 440    g->hostmem += res->hostmem;
 441}
 442
 443static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
 444{
 445    struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
 446    struct virtio_gpu_simple_resource *res;
 447    DisplaySurface *ds = NULL;
 448
 449    if (scanout->resource_id == 0) {
 450        return;
 451    }
 452
 453    res = virtio_gpu_find_resource(g, scanout->resource_id);
 454    if (res) {
 455        res->scanout_bitmask &= ~(1 << scanout_id);
 456    }
 457
 458    if (scanout_id == 0) {
 459        /* primary head */
 460        ds = qemu_create_message_surface(scanout->width  ?: 640,
 461                                         scanout->height ?: 480,
 462                                         "Guest disabled display.");
 463    }
 464    dpy_gfx_replace_surface(scanout->con, ds);
 465    scanout->resource_id = 0;
 466    scanout->ds = NULL;
 467    scanout->width = 0;
 468    scanout->height = 0;
 469}
 470
 471static void virtio_gpu_resource_destroy(VirtIOGPU *g,
 472                                        struct virtio_gpu_simple_resource *res)
 473{
 474    int i;
 475
 476    if (res->scanout_bitmask) {
 477        for (i = 0; i < g->conf.max_outputs; i++) {
 478            if (res->scanout_bitmask & (1 << i)) {
 479                virtio_gpu_disable_scanout(g, i);
 480            }
 481        }
 482    }
 483
 484    pixman_image_unref(res->image);
 485    virtio_gpu_cleanup_mapping(g, res);
 486    QTAILQ_REMOVE(&g->reslist, res, next);
 487    g->hostmem -= res->hostmem;
 488    g_free(res);
 489}
 490
 491static void virtio_gpu_resource_unref(VirtIOGPU *g,
 492                                      struct virtio_gpu_ctrl_command *cmd)
 493{
 494    struct virtio_gpu_simple_resource *res;
 495    struct virtio_gpu_resource_unref unref;
 496
 497    VIRTIO_GPU_FILL_CMD(unref);
 498    virtio_gpu_bswap_32(&unref, sizeof(unref));
 499    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
 500
 501    res = virtio_gpu_find_resource(g, unref.resource_id);
 502    if (!res) {
 503        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 504                      __func__, unref.resource_id);
 505        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 506        return;
 507    }
 508    virtio_gpu_resource_destroy(g, res);
 509}
 510
 511static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
 512                                           struct virtio_gpu_ctrl_command *cmd)
 513{
 514    struct virtio_gpu_simple_resource *res;
 515    int h;
 516    uint32_t src_offset, dst_offset, stride;
 517    int bpp;
 518    pixman_format_code_t format;
 519    struct virtio_gpu_transfer_to_host_2d t2d;
 520
 521    VIRTIO_GPU_FILL_CMD(t2d);
 522    virtio_gpu_t2d_bswap(&t2d);
 523    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 524
 525    res = virtio_gpu_find_resource(g, t2d.resource_id);
 526    if (!res || !res->iov) {
 527        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 528                      __func__, t2d.resource_id);
 529        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 530        return;
 531    }
 532
 533    if (t2d.r.x > res->width ||
 534        t2d.r.y > res->height ||
 535        t2d.r.width > res->width ||
 536        t2d.r.height > res->height ||
 537        t2d.r.x + t2d.r.width > res->width ||
 538        t2d.r.y + t2d.r.height > res->height) {
 539        qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
 540                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 541                      __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
 542                      t2d.r.width, t2d.r.height, res->width, res->height);
 543        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 544        return;
 545    }
 546
 547    format = pixman_image_get_format(res->image);
 548    bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 549    stride = pixman_image_get_stride(res->image);
 550
 551    if (t2d.offset || t2d.r.x || t2d.r.y ||
 552        t2d.r.width != pixman_image_get_width(res->image)) {
 553        void *img_data = pixman_image_get_data(res->image);
 554        for (h = 0; h < t2d.r.height; h++) {
 555            src_offset = t2d.offset + stride * h;
 556            dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
 557
 558            iov_to_buf(res->iov, res->iov_cnt, src_offset,
 559                       (uint8_t *)img_data
 560                       + dst_offset, t2d.r.width * bpp);
 561        }
 562    } else {
 563        iov_to_buf(res->iov, res->iov_cnt, 0,
 564                   pixman_image_get_data(res->image),
 565                   pixman_image_get_stride(res->image)
 566                   * pixman_image_get_height(res->image));
 567    }
 568}
 569
 570static void virtio_gpu_resource_flush(VirtIOGPU *g,
 571                                      struct virtio_gpu_ctrl_command *cmd)
 572{
 573    struct virtio_gpu_simple_resource *res;
 574    struct virtio_gpu_resource_flush rf;
 575    pixman_region16_t flush_region;
 576    int i;
 577
 578    VIRTIO_GPU_FILL_CMD(rf);
 579    virtio_gpu_bswap_32(&rf, sizeof(rf));
 580    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
 581                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 582
 583    res = virtio_gpu_find_resource(g, rf.resource_id);
 584    if (!res) {
 585        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 586                      __func__, rf.resource_id);
 587        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 588        return;
 589    }
 590
 591    if (rf.r.x > res->width ||
 592        rf.r.y > res->height ||
 593        rf.r.width > res->width ||
 594        rf.r.height > res->height ||
 595        rf.r.x + rf.r.width > res->width ||
 596        rf.r.y + rf.r.height > res->height) {
 597        qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
 598                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 599                      __func__, rf.resource_id, rf.r.x, rf.r.y,
 600                      rf.r.width, rf.r.height, res->width, res->height);
 601        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 602        return;
 603    }
 604
 605    pixman_region_init_rect(&flush_region,
 606                            rf.r.x, rf.r.y, rf.r.width, rf.r.height);
 607    for (i = 0; i < g->conf.max_outputs; i++) {
 608        struct virtio_gpu_scanout *scanout;
 609        pixman_region16_t region, finalregion;
 610        pixman_box16_t *extents;
 611
 612        if (!(res->scanout_bitmask & (1 << i))) {
 613            continue;
 614        }
 615        scanout = &g->scanout[i];
 616
 617        pixman_region_init(&finalregion);
 618        pixman_region_init_rect(&region, scanout->x, scanout->y,
 619                                scanout->width, scanout->height);
 620
 621        pixman_region_intersect(&finalregion, &flush_region, &region);
 622        pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
 623        extents = pixman_region_extents(&finalregion);
 624        /* work out the area we need to update for each console */
 625        dpy_gfx_update(g->scanout[i].con,
 626                       extents->x1, extents->y1,
 627                       extents->x2 - extents->x1,
 628                       extents->y2 - extents->y1);
 629
 630        pixman_region_fini(&region);
 631        pixman_region_fini(&finalregion);
 632    }
 633    pixman_region_fini(&flush_region);
 634}
 635
 636static void virtio_unref_resource(pixman_image_t *image, void *data)
 637{
 638    pixman_image_unref(data);
 639}
 640
 641static void virtio_gpu_set_scanout(VirtIOGPU *g,
 642                                   struct virtio_gpu_ctrl_command *cmd)
 643{
 644    struct virtio_gpu_simple_resource *res, *ores;
 645    struct virtio_gpu_scanout *scanout;
 646    pixman_format_code_t format;
 647    uint32_t offset;
 648    int bpp;
 649    struct virtio_gpu_set_scanout ss;
 650
 651    VIRTIO_GPU_FILL_CMD(ss);
 652    virtio_gpu_bswap_32(&ss, sizeof(ss));
 653    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
 654                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 655
 656    if (ss.scanout_id >= g->conf.max_outputs) {
 657        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
 658                      __func__, ss.scanout_id);
 659        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 660        return;
 661    }
 662
 663    g->enable = 1;
 664    if (ss.resource_id == 0) {
 665        virtio_gpu_disable_scanout(g, ss.scanout_id);
 666        return;
 667    }
 668
 669    /* create a surface for this scanout */
 670    res = virtio_gpu_find_resource(g, ss.resource_id);
 671    if (!res) {
 672        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 673                      __func__, ss.resource_id);
 674        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 675        return;
 676    }
 677
 678    if (ss.r.x > res->width ||
 679        ss.r.y > res->height ||
 680        ss.r.width > res->width ||
 681        ss.r.height > res->height ||
 682        ss.r.x + ss.r.width > res->width ||
 683        ss.r.y + ss.r.height > res->height) {
 684        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
 685                      " resource %d, (%d,%d)+%d,%d vs %d %d\n",
 686                      __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
 687                      ss.r.width, ss.r.height, res->width, res->height);
 688        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 689        return;
 690    }
 691
 692    scanout = &g->scanout[ss.scanout_id];
 693
 694    format = pixman_image_get_format(res->image);
 695    bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
 696    offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
 697    if (!scanout->ds || surface_data(scanout->ds)
 698        != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
 699        scanout->width != ss.r.width ||
 700        scanout->height != ss.r.height) {
 701        pixman_image_t *rect;
 702        void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
 703        rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
 704                                        pixman_image_get_stride(res->image));
 705        pixman_image_ref(res->image);
 706        pixman_image_set_destroy_function(rect, virtio_unref_resource,
 707                                          res->image);
 708        /* realloc the surface ptr */
 709        scanout->ds = qemu_create_displaysurface_pixman(rect);
 710        if (!scanout->ds) {
 711            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 712            return;
 713        }
 714        pixman_image_unref(rect);
 715        dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
 716    }
 717
 718    ores = virtio_gpu_find_resource(g, scanout->resource_id);
 719    if (ores) {
 720        ores->scanout_bitmask &= ~(1 << ss.scanout_id);
 721    }
 722
 723    res->scanout_bitmask |= (1 << ss.scanout_id);
 724    scanout->resource_id = ss.resource_id;
 725    scanout->x = ss.r.x;
 726    scanout->y = ss.r.y;
 727    scanout->width = ss.r.width;
 728    scanout->height = ss.r.height;
 729}
 730
 731int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
 732                                  struct virtio_gpu_resource_attach_backing *ab,
 733                                  struct virtio_gpu_ctrl_command *cmd,
 734                                  uint64_t **addr, struct iovec **iov)
 735{
 736    struct virtio_gpu_mem_entry *ents;
 737    size_t esize, s;
 738    int i;
 739
 740    if (ab->nr_entries > 16384) {
 741        qemu_log_mask(LOG_GUEST_ERROR,
 742                      "%s: nr_entries is too big (%d > 16384)\n",
 743                      __func__, ab->nr_entries);
 744        return -1;
 745    }
 746
 747    esize = sizeof(*ents) * ab->nr_entries;
 748    ents = g_malloc(esize);
 749    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
 750                   sizeof(*ab), ents, esize);
 751    if (s != esize) {
 752        qemu_log_mask(LOG_GUEST_ERROR,
 753                      "%s: command data size incorrect %zu vs %zu\n",
 754                      __func__, s, esize);
 755        g_free(ents);
 756        return -1;
 757    }
 758
 759    *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
 760    if (addr) {
 761        *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
 762    }
 763    for (i = 0; i < ab->nr_entries; i++) {
 764        uint64_t a = le64_to_cpu(ents[i].addr);
 765        uint32_t l = le32_to_cpu(ents[i].length);
 766        hwaddr len = l;
 767        (*iov)[i].iov_len = l;
 768        (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
 769                                            a, &len, DMA_DIRECTION_TO_DEVICE);
 770        if (addr) {
 771            (*addr)[i] = a;
 772        }
 773        if (!(*iov)[i].iov_base || len != l) {
 774            qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
 775                          " resource %d element %d\n",
 776                          __func__, ab->resource_id, i);
 777            virtio_gpu_cleanup_mapping_iov(g, *iov, i);
 778            g_free(ents);
 779            *iov = NULL;
 780            if (addr) {
 781                g_free(*addr);
 782                *addr = NULL;
 783            }
 784            return -1;
 785        }
 786    }
 787    g_free(ents);
 788    return 0;
 789}
 790
 791void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
 792                                    struct iovec *iov, uint32_t count)
 793{
 794    int i;
 795
 796    for (i = 0; i < count; i++) {
 797        dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
 798                         iov[i].iov_base, iov[i].iov_len,
 799                         DMA_DIRECTION_TO_DEVICE,
 800                         iov[i].iov_len);
 801    }
 802    g_free(iov);
 803}
 804
 805static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
 806                                       struct virtio_gpu_simple_resource *res)
 807{
 808    virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
 809    res->iov = NULL;
 810    res->iov_cnt = 0;
 811    g_free(res->addrs);
 812    res->addrs = NULL;
 813}
 814
 815static void
 816virtio_gpu_resource_attach_backing(VirtIOGPU *g,
 817                                   struct virtio_gpu_ctrl_command *cmd)
 818{
 819    struct virtio_gpu_simple_resource *res;
 820    struct virtio_gpu_resource_attach_backing ab;
 821    int ret;
 822
 823    VIRTIO_GPU_FILL_CMD(ab);
 824    virtio_gpu_bswap_32(&ab, sizeof(ab));
 825    trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
 826
 827    res = virtio_gpu_find_resource(g, ab.resource_id);
 828    if (!res) {
 829        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 830                      __func__, ab.resource_id);
 831        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 832        return;
 833    }
 834
 835    if (res->iov) {
 836        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 837        return;
 838    }
 839
 840    ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
 841    if (ret != 0) {
 842        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 843        return;
 844    }
 845
 846    res->iov_cnt = ab.nr_entries;
 847}
 848
 849static void
 850virtio_gpu_resource_detach_backing(VirtIOGPU *g,
 851                                   struct virtio_gpu_ctrl_command *cmd)
 852{
 853    struct virtio_gpu_simple_resource *res;
 854    struct virtio_gpu_resource_detach_backing detach;
 855
 856    VIRTIO_GPU_FILL_CMD(detach);
 857    virtio_gpu_bswap_32(&detach, sizeof(detach));
 858    trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
 859
 860    res = virtio_gpu_find_resource(g, detach.resource_id);
 861    if (!res || !res->iov) {
 862        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 863                      __func__, detach.resource_id);
 864        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 865        return;
 866    }
 867    virtio_gpu_cleanup_mapping(g, res);
 868}
 869
 870static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
 871                                          struct virtio_gpu_ctrl_command *cmd)
 872{
 873    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 874    virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
 875
 876    switch (cmd->cmd_hdr.type) {
 877    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 878        virtio_gpu_get_display_info(g, cmd);
 879        break;
 880    case VIRTIO_GPU_CMD_GET_EDID:
 881        virtio_gpu_get_edid(g, cmd);
 882        break;
 883    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
 884        virtio_gpu_resource_create_2d(g, cmd);
 885        break;
 886    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
 887        virtio_gpu_resource_unref(g, cmd);
 888        break;
 889    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
 890        virtio_gpu_resource_flush(g, cmd);
 891        break;
 892    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
 893        virtio_gpu_transfer_to_host_2d(g, cmd);
 894        break;
 895    case VIRTIO_GPU_CMD_SET_SCANOUT:
 896        virtio_gpu_set_scanout(g, cmd);
 897        break;
 898    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
 899        virtio_gpu_resource_attach_backing(g, cmd);
 900        break;
 901    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
 902        virtio_gpu_resource_detach_backing(g, cmd);
 903        break;
 904    default:
 905        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 906        break;
 907    }
 908    if (!cmd->finished) {
 909        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
 910                                        VIRTIO_GPU_RESP_OK_NODATA);
 911    }
 912}
 913
 914static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
 915{
 916    VirtIOGPU *g = VIRTIO_GPU(vdev);
 917    qemu_bh_schedule(g->ctrl_bh);
 918}
 919
 920static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
 921{
 922    VirtIOGPU *g = VIRTIO_GPU(vdev);
 923    qemu_bh_schedule(g->cursor_bh);
 924}
 925
 926void virtio_gpu_process_cmdq(VirtIOGPU *g)
 927{
 928    struct virtio_gpu_ctrl_command *cmd;
 929
 930    while (!QTAILQ_EMPTY(&g->cmdq)) {
 931        cmd = QTAILQ_FIRST(&g->cmdq);
 932
 933        if (g->renderer_blocked) {
 934            break;
 935        }
 936
 937        /* process command */
 938        VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
 939              g, cmd);
 940
 941        QTAILQ_REMOVE(&g->cmdq, cmd, next);
 942        if (virtio_gpu_stats_enabled(g->conf)) {
 943            g->stats.requests++;
 944        }
 945
 946        if (!cmd->finished) {
 947            QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
 948            g->inflight++;
 949            if (virtio_gpu_stats_enabled(g->conf)) {
 950                if (g->stats.max_inflight < g->inflight) {
 951                    g->stats.max_inflight = g->inflight;
 952                }
 953                fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
 954            }
 955        } else {
 956            g_free(cmd);
 957        }
 958    }
 959}
 960
 961static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 962{
 963    VirtIOGPU *g = VIRTIO_GPU(vdev);
 964    struct virtio_gpu_ctrl_command *cmd;
 965
 966    if (!virtio_queue_ready(vq)) {
 967        return;
 968    }
 969
 970#ifdef CONFIG_VIRGL
 971    if (!g->renderer_inited && g->use_virgl_renderer) {
 972        virtio_gpu_virgl_init(g);
 973        g->renderer_inited = true;
 974    }
 975#endif
 976
 977    cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
 978    while (cmd) {
 979        cmd->vq = vq;
 980        cmd->error = 0;
 981        cmd->finished = false;
 982        QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
 983        cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
 984    }
 985
 986    virtio_gpu_process_cmdq(g);
 987
 988#ifdef CONFIG_VIRGL
 989    if (g->use_virgl_renderer) {
 990        virtio_gpu_virgl_fence_poll(g);
 991    }
 992#endif
 993}
 994
 995static void virtio_gpu_ctrl_bh(void *opaque)
 996{
 997    VirtIOGPU *g = opaque;
 998    virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
 999}
1000
1001static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1002{
1003    VirtIOGPU *g = VIRTIO_GPU(vdev);
1004    VirtQueueElement *elem;
1005    size_t s;
1006    struct virtio_gpu_update_cursor cursor_info;
1007
1008    if (!virtio_queue_ready(vq)) {
1009        return;
1010    }
1011    for (;;) {
1012        elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1013        if (!elem) {
1014            break;
1015        }
1016
1017        s = iov_to_buf(elem->out_sg, elem->out_num, 0,
1018                       &cursor_info, sizeof(cursor_info));
1019        if (s != sizeof(cursor_info)) {
1020            qemu_log_mask(LOG_GUEST_ERROR,
1021                          "%s: cursor size incorrect %zu vs %zu\n",
1022                          __func__, s, sizeof(cursor_info));
1023        } else {
1024            virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
1025            update_cursor(g, &cursor_info);
1026        }
1027        virtqueue_push(vq, elem, 0);
1028        virtio_notify(vdev, vq);
1029        g_free(elem);
1030    }
1031}
1032
1033static void virtio_gpu_cursor_bh(void *opaque)
1034{
1035    VirtIOGPU *g = opaque;
1036    virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
1037}
1038
1039static void virtio_gpu_invalidate_display(void *opaque)
1040{
1041}
1042
1043static void virtio_gpu_update_display(void *opaque)
1044{
1045}
1046
1047static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
1048{
1049}
1050
1051static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
1052{
1053    VirtIOGPU *g = opaque;
1054
1055    if (idx >= g->conf.max_outputs) {
1056        return -1;
1057    }
1058
1059    g->req_state[idx].x = info->xoff;
1060    g->req_state[idx].y = info->yoff;
1061    g->req_state[idx].width = info->width;
1062    g->req_state[idx].height = info->height;
1063
1064    if (info->width && info->height) {
1065        g->enabled_output_bitmask |= (1 << idx);
1066    } else {
1067        g->enabled_output_bitmask &= ~(1 << idx);
1068    }
1069
1070    /* send event to guest */
1071    virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
1072    return 0;
1073}
1074
1075static void virtio_gpu_gl_block(void *opaque, bool block)
1076{
1077    VirtIOGPU *g = opaque;
1078
1079    if (block) {
1080        g->renderer_blocked++;
1081    } else {
1082        g->renderer_blocked--;
1083    }
1084    assert(g->renderer_blocked >= 0);
1085
1086    if (g->renderer_blocked == 0) {
1087#ifdef CONFIG_VIRGL
1088        if (g->renderer_reset) {
1089            g->renderer_reset = false;
1090            virtio_gpu_virgl_reset(g);
1091        }
1092#endif
1093        virtio_gpu_process_cmdq(g);
1094    }
1095}
1096
1097const GraphicHwOps virtio_gpu_ops = {
1098    .invalidate = virtio_gpu_invalidate_display,
1099    .gfx_update = virtio_gpu_update_display,
1100    .text_update = virtio_gpu_text_update,
1101    .ui_info = virtio_gpu_ui_info,
1102    .gl_block = virtio_gpu_gl_block,
1103};
1104
1105static const VMStateDescription vmstate_virtio_gpu_scanout = {
1106    .name = "virtio-gpu-one-scanout",
1107    .version_id = 1,
1108    .fields = (VMStateField[]) {
1109        VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1110        VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1111        VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1112        VMSTATE_INT32(x, struct virtio_gpu_scanout),
1113        VMSTATE_INT32(y, struct virtio_gpu_scanout),
1114        VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1115        VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1116        VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1117        VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1118        VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1119        VMSTATE_END_OF_LIST()
1120    },
1121};
1122
1123static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1124    .name = "virtio-gpu-scanouts",
1125    .version_id = 1,
1126    .fields = (VMStateField[]) {
1127        VMSTATE_INT32(enable, struct VirtIOGPU),
1128        VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
1129        VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
1130                                     conf.max_outputs, 1,
1131                                     vmstate_virtio_gpu_scanout,
1132                                     struct virtio_gpu_scanout),
1133        VMSTATE_END_OF_LIST()
1134    },
1135};
1136
1137static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1138                           const VMStateField *field, QJSON *vmdesc)
1139{
1140    VirtIOGPU *g = opaque;
1141    struct virtio_gpu_simple_resource *res;
1142    int i;
1143
1144    /* in 2d mode we should never find unprocessed commands here */
1145    assert(QTAILQ_EMPTY(&g->cmdq));
1146
1147    QTAILQ_FOREACH(res, &g->reslist, next) {
1148        qemu_put_be32(f, res->resource_id);
1149        qemu_put_be32(f, res->width);
1150        qemu_put_be32(f, res->height);
1151        qemu_put_be32(f, res->format);
1152        qemu_put_be32(f, res->iov_cnt);
1153        for (i = 0; i < res->iov_cnt; i++) {
1154            qemu_put_be64(f, res->addrs[i]);
1155            qemu_put_be32(f, res->iov[i].iov_len);
1156        }
1157        qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1158                        pixman_image_get_stride(res->image) * res->height);
1159    }
1160    qemu_put_be32(f, 0); /* end of list */
1161
1162    return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1163}
1164
1165static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1166                           const VMStateField *field)
1167{
1168    VirtIOGPU *g = opaque;
1169    struct virtio_gpu_simple_resource *res;
1170    struct virtio_gpu_scanout *scanout;
1171    uint32_t resource_id, pformat;
1172    int i;
1173
1174    g->hostmem = 0;
1175
1176    resource_id = qemu_get_be32(f);
1177    while (resource_id != 0) {
1178        res = g_new0(struct virtio_gpu_simple_resource, 1);
1179        res->resource_id = resource_id;
1180        res->width = qemu_get_be32(f);
1181        res->height = qemu_get_be32(f);
1182        res->format = qemu_get_be32(f);
1183        res->iov_cnt = qemu_get_be32(f);
1184
1185        /* allocate */
1186        pformat = get_pixman_format(res->format);
1187        if (!pformat) {
1188            g_free(res);
1189            return -EINVAL;
1190        }
1191        res->image = pixman_image_create_bits(pformat,
1192                                              res->width, res->height,
1193                                              NULL, 0);
1194        if (!res->image) {
1195            g_free(res);
1196            return -EINVAL;
1197        }
1198
1199        res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1200
1201        res->addrs = g_new(uint64_t, res->iov_cnt);
1202        res->iov = g_new(struct iovec, res->iov_cnt);
1203
1204        /* read data */
1205        for (i = 0; i < res->iov_cnt; i++) {
1206            res->addrs[i] = qemu_get_be64(f);
1207            res->iov[i].iov_len = qemu_get_be32(f);
1208        }
1209        qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1210                        pixman_image_get_stride(res->image) * res->height);
1211
1212        /* restore mapping */
1213        for (i = 0; i < res->iov_cnt; i++) {
1214            hwaddr len = res->iov[i].iov_len;
1215            res->iov[i].iov_base =
1216                dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1217                               res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
1218
1219            if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1220                /* Clean up the half-a-mapping we just created... */
1221                if (res->iov[i].iov_base) {
1222                    dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1223                                     res->iov[i].iov_base,
1224                                     res->iov[i].iov_len,
1225                                     DMA_DIRECTION_TO_DEVICE,
1226                                     res->iov[i].iov_len);
1227                }
1228                /* ...and the mappings for previous loop iterations */
1229                res->iov_cnt = i;
1230                virtio_gpu_cleanup_mapping(g, res);
1231                pixman_image_unref(res->image);
1232                g_free(res);
1233                return -EINVAL;
1234            }
1235        }
1236
1237        QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1238        g->hostmem += res->hostmem;
1239
1240        resource_id = qemu_get_be32(f);
1241    }
1242
1243    /* load & apply scanout state */
1244    vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1245    for (i = 0; i < g->conf.max_outputs; i++) {
1246        scanout = &g->scanout[i];
1247        if (!scanout->resource_id) {
1248            continue;
1249        }
1250        res = virtio_gpu_find_resource(g, scanout->resource_id);
1251        if (!res) {
1252            return -EINVAL;
1253        }
1254        scanout->ds = qemu_create_displaysurface_pixman(res->image);
1255        if (!scanout->ds) {
1256            return -EINVAL;
1257        }
1258
1259        dpy_gfx_replace_surface(scanout->con, scanout->ds);
1260        dpy_gfx_update_full(scanout->con);
1261        if (scanout->cursor.resource_id) {
1262            update_cursor(g, &scanout->cursor);
1263        }
1264        res->scanout_bitmask |= (1 << i);
1265    }
1266
1267    return 0;
1268}
1269
1270static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1271{
1272    VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1273    VirtIOGPU *g = VIRTIO_GPU(qdev);
1274    bool have_virgl;
1275    Error *local_err = NULL;
1276    int i;
1277
1278    if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1279        error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1280        return;
1281    }
1282
1283    g->use_virgl_renderer = false;
1284#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1285    have_virgl = false;
1286#else
1287    have_virgl = display_opengl;
1288#endif
1289    if (!have_virgl) {
1290        g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1291    }
1292
1293    if (virtio_gpu_virgl_enabled(g->conf)) {
1294        error_setg(&g->migration_blocker, "virgl is not yet migratable");
1295        migrate_add_blocker(g->migration_blocker, &local_err);
1296        if (local_err) {
1297            error_propagate(errp, local_err);
1298            error_free(g->migration_blocker);
1299            return;
1300        }
1301    }
1302
1303    g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
1304    virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1305                sizeof(struct virtio_gpu_config));
1306
1307    g->req_state[0].width = g->conf.xres;
1308    g->req_state[0].height = g->conf.yres;
1309
1310    if (virtio_gpu_virgl_enabled(g->conf)) {
1311        /* use larger control queue in 3d mode */
1312        g->ctrl_vq   = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1313        g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1314
1315#if defined(CONFIG_VIRGL)
1316        g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
1317#else
1318        g->virtio_config.num_capsets = 0;
1319#endif
1320    } else {
1321        g->ctrl_vq   = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1322        g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1323    }
1324
1325    g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1326    g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1327    QTAILQ_INIT(&g->reslist);
1328    QTAILQ_INIT(&g->cmdq);
1329    QTAILQ_INIT(&g->fenceq);
1330
1331    g->enabled_output_bitmask = 1;
1332
1333    for (i = 0; i < g->conf.max_outputs; i++) {
1334        g->scanout[i].con =
1335            graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1336        if (i > 0) {
1337            dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1338        }
1339    }
1340}
1341
1342static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1343{
1344    VirtIOGPU *g = VIRTIO_GPU(qdev);
1345    if (g->migration_blocker) {
1346        migrate_del_blocker(g->migration_blocker);
1347        error_free(g->migration_blocker);
1348    }
1349}
1350
1351static void virtio_gpu_instance_init(Object *obj)
1352{
1353}
1354
1355static void virtio_gpu_reset(VirtIODevice *vdev)
1356{
1357    VirtIOGPU *g = VIRTIO_GPU(vdev);
1358    struct virtio_gpu_simple_resource *res, *tmp;
1359    struct virtio_gpu_ctrl_command *cmd;
1360    int i;
1361
1362    g->enable = 0;
1363
1364    QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1365        virtio_gpu_resource_destroy(g, res);
1366    }
1367    for (i = 0; i < g->conf.max_outputs; i++) {
1368        g->scanout[i].resource_id = 0;
1369        g->scanout[i].width = 0;
1370        g->scanout[i].height = 0;
1371        g->scanout[i].x = 0;
1372        g->scanout[i].y = 0;
1373        g->scanout[i].ds = NULL;
1374    }
1375
1376    while (!QTAILQ_EMPTY(&g->cmdq)) {
1377        cmd = QTAILQ_FIRST(&g->cmdq);
1378        QTAILQ_REMOVE(&g->cmdq, cmd, next);
1379        g_free(cmd);
1380    }
1381
1382    while (!QTAILQ_EMPTY(&g->fenceq)) {
1383        cmd = QTAILQ_FIRST(&g->fenceq);
1384        QTAILQ_REMOVE(&g->fenceq, cmd, next);
1385        g->inflight--;
1386        g_free(cmd);
1387    }
1388
1389#ifdef CONFIG_VIRGL
1390    if (g->use_virgl_renderer) {
1391        if (g->renderer_blocked) {
1392            g->renderer_reset = true;
1393        } else {
1394            virtio_gpu_virgl_reset(g);
1395        }
1396        g->use_virgl_renderer = 0;
1397    }
1398#endif
1399}
1400
1401/*
1402 * For historical reasons virtio_gpu does not adhere to virtio migration
1403 * scheme as described in doc/virtio-migration.txt, in a sense that no
1404 * save/load callback are provided to the core. Instead the device data
1405 * is saved/loaded after the core data.
1406 *
1407 * Because of this we need a special vmsd.
1408 */
1409static const VMStateDescription vmstate_virtio_gpu = {
1410    .name = "virtio-gpu",
1411    .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1412    .version_id = VIRTIO_GPU_VM_VERSION,
1413    .fields = (VMStateField[]) {
1414        VMSTATE_VIRTIO_DEVICE /* core */,
1415        {
1416            .name = "virtio-gpu",
1417            .info = &(const VMStateInfo) {
1418                        .name = "virtio-gpu",
1419                        .get = virtio_gpu_load,
1420                        .put = virtio_gpu_save,
1421            },
1422            .flags = VMS_SINGLE,
1423        } /* device */,
1424        VMSTATE_END_OF_LIST()
1425    },
1426};
1427
1428static Property virtio_gpu_properties[] = {
1429    DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1430    DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
1431#ifdef CONFIG_VIRGL
1432    DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1433                    VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1434    DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1435                    VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1436#endif
1437    DEFINE_PROP_BIT("edid", VirtIOGPU, conf.flags,
1438                    VIRTIO_GPU_FLAG_EDID_ENABLED, false),
1439    DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
1440    DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
1441    DEFINE_PROP_END_OF_LIST(),
1442};
1443
1444static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1445{
1446    DeviceClass *dc = DEVICE_CLASS(klass);
1447    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1448
1449    vdc->realize = virtio_gpu_device_realize;
1450    vdc->unrealize = virtio_gpu_device_unrealize;
1451    vdc->get_config = virtio_gpu_get_config;
1452    vdc->set_config = virtio_gpu_set_config;
1453    vdc->get_features = virtio_gpu_get_features;
1454    vdc->set_features = virtio_gpu_set_features;
1455
1456    vdc->reset = virtio_gpu_reset;
1457
1458    set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
1459    dc->props = virtio_gpu_properties;
1460    dc->vmsd = &vmstate_virtio_gpu;
1461    dc->hotpluggable = false;
1462}
1463
1464static const TypeInfo virtio_gpu_info = {
1465    .name = TYPE_VIRTIO_GPU,
1466    .parent = TYPE_VIRTIO_DEVICE,
1467    .instance_size = sizeof(VirtIOGPU),
1468    .instance_init = virtio_gpu_instance_init,
1469    .class_init = virtio_gpu_class_init,
1470};
1471
1472static void virtio_register_types(void)
1473{
1474    type_register_static(&virtio_gpu_info);
1475}
1476
1477type_init(virtio_register_types)
1478
1479QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr)                != 24);
1480QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor)           != 56);
1481QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref)          != 32);
1482QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d)      != 40);
1483QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout)             != 48);
1484QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush)          != 48);
1485QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d)     != 56);
1486QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry)               != 16);
1487QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1488QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1489QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info)       != 408);
1490
1491QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d)        != 72);
1492QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d)      != 72);
1493QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create)              != 96);
1494QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy)             != 24);
1495QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource)            != 32);
1496QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit)              != 32);
1497QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info)         != 32);
1498QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info)        != 40);
1499QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset)              != 32);
1500QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset)             != 24);
1501