qemu/hw/display/virtio-gpu.c
<<
>>
Prefs
   1/*
   2 * Virtio GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2013-2014
   5 *
   6 * Authors:
   7 *     Dave Airlie <airlied@redhat.com>
   8 *     Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 */
  13
  14#include "qemu-common.h"
  15#include "qemu/iov.h"
  16#include "ui/console.h"
  17#include "trace.h"
  18#include "hw/virtio/virtio.h"
  19#include "hw/virtio/virtio-gpu.h"
  20#include "hw/virtio/virtio-bus.h"
  21
  22static struct virtio_gpu_simple_resource*
  23virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  24
  25#ifdef CONFIG_VIRGL
  26#include "virglrenderer.h"
  27#define VIRGL(_g, _virgl, _simple, ...)                     \
  28    do {                                                    \
  29        if (_g->use_virgl_renderer) {                       \
  30            _virgl(__VA_ARGS__);                            \
  31        } else {                                            \
  32            _simple(__VA_ARGS__);                           \
  33        }                                                   \
  34    } while (0)
  35#else
  36#define VIRGL(_g, _virgl, _simple, ...)                 \
  37    do {                                                \
  38        _simple(__VA_ARGS__);                           \
  39    } while (0)
  40#endif
  41
  42static void update_cursor_data_simple(VirtIOGPU *g,
  43                                      struct virtio_gpu_scanout *s,
  44                                      uint32_t resource_id)
  45{
  46    struct virtio_gpu_simple_resource *res;
  47    uint32_t pixels;
  48
  49    res = virtio_gpu_find_resource(g, resource_id);
  50    if (!res) {
  51        return;
  52    }
  53
  54    if (pixman_image_get_width(res->image)  != s->current_cursor->width ||
  55        pixman_image_get_height(res->image) != s->current_cursor->height) {
  56        return;
  57    }
  58
  59    pixels = s->current_cursor->width * s->current_cursor->height;
  60    memcpy(s->current_cursor->data,
  61           pixman_image_get_data(res->image),
  62           pixels * sizeof(uint32_t));
  63}
  64
  65#ifdef CONFIG_VIRGL
  66
  67static void update_cursor_data_virgl(VirtIOGPU *g,
  68                                     struct virtio_gpu_scanout *s,
  69                                     uint32_t resource_id)
  70{
  71    uint32_t width, height;
  72    uint32_t pixels, *data;
  73
  74    data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
  75    if (!data) {
  76        return;
  77    }
  78
  79    if (width != s->current_cursor->width ||
  80        height != s->current_cursor->height) {
  81        return;
  82    }
  83
  84    pixels = s->current_cursor->width * s->current_cursor->height;
  85    memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
  86    free(data);
  87}
  88
  89#endif
  90
  91static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  92{
  93    struct virtio_gpu_scanout *s;
  94    bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR;
  95
  96    if (cursor->pos.scanout_id >= g->conf.max_outputs) {
  97        return;
  98    }
  99    s = &g->scanout[cursor->pos.scanout_id];
 100
 101    trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
 102                                   cursor->pos.x,
 103                                   cursor->pos.y,
 104                                   move ? "move" : "update",
 105                                   cursor->resource_id);
 106
 107    if (move) {
 108        if (!s->current_cursor) {
 109            s->current_cursor = cursor_alloc(64, 64);
 110        }
 111
 112        s->current_cursor->hot_x = cursor->hot_x;
 113        s->current_cursor->hot_y = cursor->hot_y;
 114
 115        if (cursor->resource_id > 0) {
 116            VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
 117                  g, s, cursor->resource_id);
 118        }
 119        dpy_cursor_define(s->con, s->current_cursor);
 120    }
 121    dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
 122                  cursor->resource_id ? 1 : 0);
 123}
 124
 125static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
 126{
 127    VirtIOGPU *g = VIRTIO_GPU(vdev);
 128    memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
 129}
 130
 131static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
 132{
 133    VirtIOGPU *g = VIRTIO_GPU(vdev);
 134    struct virtio_gpu_config vgconfig;
 135
 136    memcpy(&vgconfig, config, sizeof(g->virtio_config));
 137
 138    if (vgconfig.events_clear) {
 139        g->virtio_config.events_read &= ~vgconfig.events_clear;
 140    }
 141}
 142
 143static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
 144                                        Error **errp)
 145{
 146    VirtIOGPU *g = VIRTIO_GPU(vdev);
 147
 148    if (virtio_gpu_virgl_enabled(g->conf)) {
 149        features |= (1 << VIRTIO_GPU_FEATURE_VIRGL);
 150    }
 151    return features;
 152}
 153
 154static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
 155{
 156    static const uint32_t virgl = (1 << VIRTIO_GPU_FEATURE_VIRGL);
 157    VirtIOGPU *g = VIRTIO_GPU(vdev);
 158
 159    g->use_virgl_renderer = ((features & virgl) == virgl);
 160    trace_virtio_gpu_features(g->use_virgl_renderer);
 161}
 162
 163static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
 164{
 165    g->virtio_config.events_read |= event_type;
 166    virtio_notify_config(&g->parent_obj);
 167}
 168
 169static struct virtio_gpu_simple_resource *
 170virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
 171{
 172    struct virtio_gpu_simple_resource *res;
 173
 174    QTAILQ_FOREACH(res, &g->reslist, next) {
 175        if (res->resource_id == resource_id) {
 176            return res;
 177        }
 178    }
 179    return NULL;
 180}
 181
 182void virtio_gpu_ctrl_response(VirtIOGPU *g,
 183                              struct virtio_gpu_ctrl_command *cmd,
 184                              struct virtio_gpu_ctrl_hdr *resp,
 185                              size_t resp_len)
 186{
 187    size_t s;
 188
 189    if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
 190        resp->flags |= VIRTIO_GPU_FLAG_FENCE;
 191        resp->fence_id = cmd->cmd_hdr.fence_id;
 192        resp->ctx_id = cmd->cmd_hdr.ctx_id;
 193    }
 194    s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
 195    if (s != resp_len) {
 196        qemu_log_mask(LOG_GUEST_ERROR,
 197                      "%s: response size incorrect %zu vs %zu\n",
 198                      __func__, s, resp_len);
 199    }
 200    virtqueue_push(cmd->vq, &cmd->elem, s);
 201    virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
 202    cmd->finished = true;
 203}
 204
 205void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
 206                                     struct virtio_gpu_ctrl_command *cmd,
 207                                     enum virtio_gpu_ctrl_type type)
 208{
 209    struct virtio_gpu_ctrl_hdr resp;
 210
 211    memset(&resp, 0, sizeof(resp));
 212    resp.type = type;
 213    virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
 214}
 215
 216static void
 217virtio_gpu_fill_display_info(VirtIOGPU *g,
 218                             struct virtio_gpu_resp_display_info *dpy_info)
 219{
 220    int i;
 221
 222    for (i = 0; i < g->conf.max_outputs; i++) {
 223        if (g->enabled_output_bitmask & (1 << i)) {
 224            dpy_info->pmodes[i].enabled = 1;
 225            dpy_info->pmodes[i].r.width = g->req_state[i].width;
 226            dpy_info->pmodes[i].r.height = g->req_state[i].height;
 227        }
 228    }
 229}
 230
 231void virtio_gpu_get_display_info(VirtIOGPU *g,
 232                                 struct virtio_gpu_ctrl_command *cmd)
 233{
 234    struct virtio_gpu_resp_display_info display_info;
 235
 236    trace_virtio_gpu_cmd_get_display_info();
 237    memset(&display_info, 0, sizeof(display_info));
 238    display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
 239    virtio_gpu_fill_display_info(g, &display_info);
 240    virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
 241                             sizeof(display_info));
 242}
 243
 244static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
 245{
 246    switch (virtio_gpu_format) {
 247#ifdef HOST_WORDS_BIGENDIAN
 248    case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
 249        return PIXMAN_b8g8r8x8;
 250    case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
 251        return PIXMAN_b8g8r8a8;
 252    case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
 253        return PIXMAN_x8r8g8b8;
 254    case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
 255        return PIXMAN_a8r8g8b8;
 256    case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
 257        return PIXMAN_r8g8b8x8;
 258    case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
 259        return PIXMAN_r8g8b8a8;
 260    case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
 261        return PIXMAN_x8b8g8r8;
 262    case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
 263        return PIXMAN_a8b8g8r8;
 264#else
 265    case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
 266        return PIXMAN_x8r8g8b8;
 267    case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
 268        return PIXMAN_a8r8g8b8;
 269    case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
 270        return PIXMAN_b8g8r8x8;
 271    case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
 272        return PIXMAN_b8g8r8a8;
 273    case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
 274        return PIXMAN_x8b8g8r8;
 275    case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
 276        return PIXMAN_a8b8g8r8;
 277    case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
 278        return PIXMAN_r8g8b8x8;
 279    case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
 280        return PIXMAN_r8g8b8a8;
 281#endif
 282    default:
 283        return 0;
 284    }
 285}
 286
 287static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
 288                                          struct virtio_gpu_ctrl_command *cmd)
 289{
 290    pixman_format_code_t pformat;
 291    struct virtio_gpu_simple_resource *res;
 292    struct virtio_gpu_resource_create_2d c2d;
 293
 294    VIRTIO_GPU_FILL_CMD(c2d);
 295    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
 296                                       c2d.width, c2d.height);
 297
 298    if (c2d.resource_id == 0) {
 299        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
 300                      __func__);
 301        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 302        return;
 303    }
 304
 305    res = virtio_gpu_find_resource(g, c2d.resource_id);
 306    if (res) {
 307        qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
 308                      __func__, c2d.resource_id);
 309        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 310        return;
 311    }
 312
 313    res = g_new0(struct virtio_gpu_simple_resource, 1);
 314
 315    res->width = c2d.width;
 316    res->height = c2d.height;
 317    res->format = c2d.format;
 318    res->resource_id = c2d.resource_id;
 319
 320    pformat = get_pixman_format(c2d.format);
 321    if (!pformat) {
 322        qemu_log_mask(LOG_GUEST_ERROR,
 323                      "%s: host couldn't handle guest format %d\n",
 324                      __func__, c2d.format);
 325        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 326        return;
 327    }
 328    res->image = pixman_image_create_bits(pformat,
 329                                          c2d.width,
 330                                          c2d.height,
 331                                          NULL, 0);
 332
 333    if (!res->image) {
 334        qemu_log_mask(LOG_GUEST_ERROR,
 335                      "%s: resource creation failed %d %d %d\n",
 336                      __func__, c2d.resource_id, c2d.width, c2d.height);
 337        g_free(res);
 338        cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
 339        return;
 340    }
 341
 342    QTAILQ_INSERT_HEAD(&g->reslist, res, next);
 343}
 344
 345static void virtio_gpu_resource_destroy(VirtIOGPU *g,
 346                                        struct virtio_gpu_simple_resource *res)
 347{
 348    pixman_image_unref(res->image);
 349    QTAILQ_REMOVE(&g->reslist, res, next);
 350    g_free(res);
 351}
 352
 353static void virtio_gpu_resource_unref(VirtIOGPU *g,
 354                                      struct virtio_gpu_ctrl_command *cmd)
 355{
 356    struct virtio_gpu_simple_resource *res;
 357    struct virtio_gpu_resource_unref unref;
 358
 359    VIRTIO_GPU_FILL_CMD(unref);
 360    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
 361
 362    res = virtio_gpu_find_resource(g, unref.resource_id);
 363    if (!res) {
 364        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 365                      __func__, unref.resource_id);
 366        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 367        return;
 368    }
 369    virtio_gpu_resource_destroy(g, res);
 370}
 371
 372static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
 373                                           struct virtio_gpu_ctrl_command *cmd)
 374{
 375    struct virtio_gpu_simple_resource *res;
 376    int h;
 377    uint32_t src_offset, dst_offset, stride;
 378    int bpp;
 379    pixman_format_code_t format;
 380    struct virtio_gpu_transfer_to_host_2d t2d;
 381
 382    VIRTIO_GPU_FILL_CMD(t2d);
 383    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 384
 385    res = virtio_gpu_find_resource(g, t2d.resource_id);
 386    if (!res || !res->iov) {
 387        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 388                      __func__, t2d.resource_id);
 389        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 390        return;
 391    }
 392
 393    if (t2d.r.x > res->width ||
 394        t2d.r.y > res->height ||
 395        t2d.r.width > res->width ||
 396        t2d.r.height > res->height ||
 397        t2d.r.x + t2d.r.width > res->width ||
 398        t2d.r.y + t2d.r.height > res->height) {
 399        qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
 400                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 401                      __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
 402                      t2d.r.width, t2d.r.height, res->width, res->height);
 403        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 404        return;
 405    }
 406
 407    format = pixman_image_get_format(res->image);
 408    bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
 409    stride = pixman_image_get_stride(res->image);
 410
 411    if (t2d.offset || t2d.r.x || t2d.r.y ||
 412        t2d.r.width != pixman_image_get_width(res->image)) {
 413        void *img_data = pixman_image_get_data(res->image);
 414        for (h = 0; h < t2d.r.height; h++) {
 415            src_offset = t2d.offset + stride * h;
 416            dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
 417
 418            iov_to_buf(res->iov, res->iov_cnt, src_offset,
 419                       (uint8_t *)img_data
 420                       + dst_offset, t2d.r.width * bpp);
 421        }
 422    } else {
 423        iov_to_buf(res->iov, res->iov_cnt, 0,
 424                   pixman_image_get_data(res->image),
 425                   pixman_image_get_stride(res->image)
 426                   * pixman_image_get_height(res->image));
 427    }
 428}
 429
 430static void virtio_gpu_resource_flush(VirtIOGPU *g,
 431                                      struct virtio_gpu_ctrl_command *cmd)
 432{
 433    struct virtio_gpu_simple_resource *res;
 434    struct virtio_gpu_resource_flush rf;
 435    pixman_region16_t flush_region;
 436    int i;
 437
 438    VIRTIO_GPU_FILL_CMD(rf);
 439    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
 440                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 441
 442    res = virtio_gpu_find_resource(g, rf.resource_id);
 443    if (!res) {
 444        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 445                      __func__, rf.resource_id);
 446        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 447        return;
 448    }
 449
 450    if (rf.r.x > res->width ||
 451        rf.r.y > res->height ||
 452        rf.r.width > res->width ||
 453        rf.r.height > res->height ||
 454        rf.r.x + rf.r.width > res->width ||
 455        rf.r.y + rf.r.height > res->height) {
 456        qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
 457                      " bounds for resource %d: %d %d %d %d vs %d %d\n",
 458                      __func__, rf.resource_id, rf.r.x, rf.r.y,
 459                      rf.r.width, rf.r.height, res->width, res->height);
 460        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 461        return;
 462    }
 463
 464    pixman_region_init_rect(&flush_region,
 465                            rf.r.x, rf.r.y, rf.r.width, rf.r.height);
 466    for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) {
 467        struct virtio_gpu_scanout *scanout;
 468        pixman_region16_t region, finalregion;
 469        pixman_box16_t *extents;
 470
 471        if (!(res->scanout_bitmask & (1 << i))) {
 472            continue;
 473        }
 474        scanout = &g->scanout[i];
 475
 476        pixman_region_init(&finalregion);
 477        pixman_region_init_rect(&region, scanout->x, scanout->y,
 478                                scanout->width, scanout->height);
 479
 480        pixman_region_intersect(&finalregion, &flush_region, &region);
 481        pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
 482        extents = pixman_region_extents(&finalregion);
 483        /* work out the area we need to update for each console */
 484        dpy_gfx_update(g->scanout[i].con,
 485                       extents->x1, extents->y1,
 486                       extents->x2 - extents->x1,
 487                       extents->y2 - extents->y1);
 488
 489        pixman_region_fini(&region);
 490        pixman_region_fini(&finalregion);
 491    }
 492    pixman_region_fini(&flush_region);
 493}
 494
 495static void virtio_gpu_set_scanout(VirtIOGPU *g,
 496                                   struct virtio_gpu_ctrl_command *cmd)
 497{
 498    struct virtio_gpu_simple_resource *res;
 499    struct virtio_gpu_scanout *scanout;
 500    pixman_format_code_t format;
 501    uint32_t offset;
 502    int bpp;
 503    struct virtio_gpu_set_scanout ss;
 504
 505    VIRTIO_GPU_FILL_CMD(ss);
 506    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
 507                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 508
 509    g->enable = 1;
 510    if (ss.resource_id == 0) {
 511        scanout = &g->scanout[ss.scanout_id];
 512        if (scanout->resource_id) {
 513            res = virtio_gpu_find_resource(g, scanout->resource_id);
 514            if (res) {
 515                res->scanout_bitmask &= ~(1 << ss.scanout_id);
 516            }
 517        }
 518        if (ss.scanout_id == 0 ||
 519            ss.scanout_id >= g->conf.max_outputs) {
 520            qemu_log_mask(LOG_GUEST_ERROR,
 521                          "%s: illegal scanout id specified %d",
 522                          __func__, ss.scanout_id);
 523            cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 524            return;
 525        }
 526        dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
 527        scanout->ds = NULL;
 528        scanout->width = 0;
 529        scanout->height = 0;
 530        return;
 531    }
 532
 533    /* create a surface for this scanout */
 534    if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT ||
 535        ss.scanout_id >= g->conf.max_outputs) {
 536        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
 537                      __func__, ss.scanout_id);
 538        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 539        return;
 540    }
 541
 542    res = virtio_gpu_find_resource(g, ss.resource_id);
 543    if (!res) {
 544        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 545                      __func__, ss.resource_id);
 546        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 547        return;
 548    }
 549
 550    if (ss.r.x > res->width ||
 551        ss.r.y > res->height ||
 552        ss.r.width > res->width ||
 553        ss.r.height > res->height ||
 554        ss.r.x + ss.r.width > res->width ||
 555        ss.r.y + ss.r.height > res->height) {
 556        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
 557                      " resource %d, (%d,%d)+%d,%d vs %d %d\n",
 558                      __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
 559                      ss.r.width, ss.r.height, res->width, res->height);
 560        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 561        return;
 562    }
 563
 564    scanout = &g->scanout[ss.scanout_id];
 565
 566    format = pixman_image_get_format(res->image);
 567    bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
 568    offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
 569    if (!scanout->ds || surface_data(scanout->ds)
 570        != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
 571        scanout->width != ss.r.width ||
 572        scanout->height != ss.r.height) {
 573        /* realloc the surface ptr */
 574        scanout->ds = qemu_create_displaysurface_from
 575            (ss.r.width, ss.r.height, format,
 576             pixman_image_get_stride(res->image),
 577             (uint8_t *)pixman_image_get_data(res->image) + offset);
 578        if (!scanout->ds) {
 579            cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 580            return;
 581        }
 582        dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
 583    }
 584
 585    res->scanout_bitmask |= (1 << ss.scanout_id);
 586    scanout->resource_id = ss.resource_id;
 587    scanout->x = ss.r.x;
 588    scanout->y = ss.r.y;
 589    scanout->width = ss.r.width;
 590    scanout->height = ss.r.height;
 591}
 592
 593int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
 594                                  struct virtio_gpu_ctrl_command *cmd,
 595                                  struct iovec **iov)
 596{
 597    struct virtio_gpu_mem_entry *ents;
 598    size_t esize, s;
 599    int i;
 600
 601    if (ab->nr_entries > 16384) {
 602        qemu_log_mask(LOG_GUEST_ERROR,
 603                      "%s: nr_entries is too big (%d > 16384)\n",
 604                      __func__, ab->nr_entries);
 605        return -1;
 606    }
 607
 608    esize = sizeof(*ents) * ab->nr_entries;
 609    ents = g_malloc(esize);
 610    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
 611                   sizeof(*ab), ents, esize);
 612    if (s != esize) {
 613        qemu_log_mask(LOG_GUEST_ERROR,
 614                      "%s: command data size incorrect %zu vs %zu\n",
 615                      __func__, s, esize);
 616        g_free(ents);
 617        return -1;
 618    }
 619
 620    *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
 621    for (i = 0; i < ab->nr_entries; i++) {
 622        hwaddr len = ents[i].length;
 623        (*iov)[i].iov_len = ents[i].length;
 624        (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
 625        if (!(*iov)[i].iov_base || len != ents[i].length) {
 626            qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
 627                          " resource %d element %d\n",
 628                          __func__, ab->resource_id, i);
 629            virtio_gpu_cleanup_mapping_iov(*iov, i);
 630            g_free(ents);
 631            *iov = NULL;
 632            return -1;
 633        }
 634    }
 635    g_free(ents);
 636    return 0;
 637}
 638
 639void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
 640{
 641    int i;
 642
 643    for (i = 0; i < count; i++) {
 644        cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
 645                                  iov[i].iov_len);
 646    }
 647    g_free(iov);
 648}
 649
 650static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
 651{
 652    virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
 653    res->iov = NULL;
 654    res->iov_cnt = 0;
 655}
 656
 657static void
 658virtio_gpu_resource_attach_backing(VirtIOGPU *g,
 659                                   struct virtio_gpu_ctrl_command *cmd)
 660{
 661    struct virtio_gpu_simple_resource *res;
 662    struct virtio_gpu_resource_attach_backing ab;
 663    int ret;
 664
 665    VIRTIO_GPU_FILL_CMD(ab);
 666    trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
 667
 668    res = virtio_gpu_find_resource(g, ab.resource_id);
 669    if (!res) {
 670        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 671                      __func__, ab.resource_id);
 672        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 673        return;
 674    }
 675
 676    ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov);
 677    if (ret != 0) {
 678        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 679        return;
 680    }
 681
 682    res->iov_cnt = ab.nr_entries;
 683}
 684
 685static void
 686virtio_gpu_resource_detach_backing(VirtIOGPU *g,
 687                                   struct virtio_gpu_ctrl_command *cmd)
 688{
 689    struct virtio_gpu_simple_resource *res;
 690    struct virtio_gpu_resource_detach_backing detach;
 691
 692    VIRTIO_GPU_FILL_CMD(detach);
 693    trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
 694
 695    res = virtio_gpu_find_resource(g, detach.resource_id);
 696    if (!res || !res->iov) {
 697        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
 698                      __func__, detach.resource_id);
 699        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 700        return;
 701    }
 702    virtio_gpu_cleanup_mapping(res);
 703}
 704
 705static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
 706                                          struct virtio_gpu_ctrl_command *cmd)
 707{
 708    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 709
 710    switch (cmd->cmd_hdr.type) {
 711    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 712        virtio_gpu_get_display_info(g, cmd);
 713        break;
 714    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
 715        virtio_gpu_resource_create_2d(g, cmd);
 716        break;
 717    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
 718        virtio_gpu_resource_unref(g, cmd);
 719        break;
 720    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
 721        virtio_gpu_resource_flush(g, cmd);
 722        break;
 723    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
 724        virtio_gpu_transfer_to_host_2d(g, cmd);
 725        break;
 726    case VIRTIO_GPU_CMD_SET_SCANOUT:
 727        virtio_gpu_set_scanout(g, cmd);
 728        break;
 729    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
 730        virtio_gpu_resource_attach_backing(g, cmd);
 731        break;
 732    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
 733        virtio_gpu_resource_detach_backing(g, cmd);
 734        break;
 735    default:
 736        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 737        break;
 738    }
 739    if (!cmd->finished) {
 740        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
 741                                        VIRTIO_GPU_RESP_OK_NODATA);
 742    }
 743}
 744
 745static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
 746{
 747    VirtIOGPU *g = VIRTIO_GPU(vdev);
 748    qemu_bh_schedule(g->ctrl_bh);
 749}
 750
 751static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
 752{
 753    VirtIOGPU *g = VIRTIO_GPU(vdev);
 754    qemu_bh_schedule(g->cursor_bh);
 755}
 756
 757static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
 758{
 759    VirtIOGPU *g = VIRTIO_GPU(vdev);
 760    struct virtio_gpu_ctrl_command *cmd;
 761
 762    if (!virtio_queue_ready(vq)) {
 763        return;
 764    }
 765
 766#ifdef CONFIG_VIRGL
 767    if (!g->renderer_inited && g->use_virgl_renderer) {
 768        virtio_gpu_virgl_init(g);
 769        g->renderer_inited = true;
 770    }
 771#endif
 772
 773    cmd = g_new(struct virtio_gpu_ctrl_command, 1);
 774    while (virtqueue_pop(vq, &cmd->elem)) {
 775        cmd->vq = vq;
 776        cmd->error = 0;
 777        cmd->finished = false;
 778        if (virtio_gpu_stats_enabled(g->conf)) {
 779            g->stats.requests++;
 780        }
 781
 782        VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
 783              g, cmd);
 784        if (!cmd->finished) {
 785            QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
 786            g->inflight++;
 787            if (virtio_gpu_stats_enabled(g->conf)) {
 788                if (g->stats.max_inflight < g->inflight) {
 789                    g->stats.max_inflight = g->inflight;
 790                }
 791                fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
 792            }
 793            cmd = g_new(struct virtio_gpu_ctrl_command, 1);
 794        }
 795    }
 796    g_free(cmd);
 797
 798#ifdef CONFIG_VIRGL
 799    if (g->use_virgl_renderer) {
 800        virtio_gpu_virgl_fence_poll(g);
 801    }
 802#endif
 803}
 804
 805static void virtio_gpu_ctrl_bh(void *opaque)
 806{
 807    VirtIOGPU *g = opaque;
 808    virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
 809}
 810
 811static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
 812{
 813    VirtIOGPU *g = VIRTIO_GPU(vdev);
 814    VirtQueueElement elem;
 815    size_t s;
 816    struct virtio_gpu_update_cursor cursor_info;
 817
 818    if (!virtio_queue_ready(vq)) {
 819        return;
 820    }
 821    while (virtqueue_pop(vq, &elem)) {
 822        s = iov_to_buf(elem.out_sg, elem.out_num, 0,
 823                       &cursor_info, sizeof(cursor_info));
 824        if (s != sizeof(cursor_info)) {
 825            qemu_log_mask(LOG_GUEST_ERROR,
 826                          "%s: cursor size incorrect %zu vs %zu\n",
 827                          __func__, s, sizeof(cursor_info));
 828        } else {
 829            update_cursor(g, &cursor_info);
 830        }
 831        virtqueue_push(vq, &elem, 0);
 832        virtio_notify(vdev, vq);
 833    }
 834}
 835
 836static void virtio_gpu_cursor_bh(void *opaque)
 837{
 838    VirtIOGPU *g = opaque;
 839    virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
 840}
 841
 842static void virtio_gpu_invalidate_display(void *opaque)
 843{
 844}
 845
 846static void virtio_gpu_update_display(void *opaque)
 847{
 848}
 849
 850static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
 851{
 852}
 853
 854static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
 855{
 856    VirtIOGPU *g = opaque;
 857
 858    if (idx > g->conf.max_outputs) {
 859        return -1;
 860    }
 861
 862    g->req_state[idx].x = info->xoff;
 863    g->req_state[idx].y = info->yoff;
 864    g->req_state[idx].width = info->width;
 865    g->req_state[idx].height = info->height;
 866
 867    if (info->width && info->height) {
 868        g->enabled_output_bitmask |= (1 << idx);
 869    } else {
 870        g->enabled_output_bitmask &= ~(1 << idx);
 871    }
 872
 873    /* send event to guest */
 874    virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
 875    return 0;
 876}
 877
 878const GraphicHwOps virtio_gpu_ops = {
 879    .invalidate = virtio_gpu_invalidate_display,
 880    .gfx_update = virtio_gpu_update_display,
 881    .text_update = virtio_gpu_text_update,
 882    .ui_info = virtio_gpu_ui_info,
 883};
 884
 885static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
 886{
 887    VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
 888    VirtIOGPU *g = VIRTIO_GPU(qdev);
 889    bool have_virgl;
 890    int i;
 891
 892    g->config_size = sizeof(struct virtio_gpu_config);
 893    g->virtio_config.num_scanouts = g->conf.max_outputs;
 894    virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
 895                g->config_size);
 896
 897    g->req_state[0].width = 1024;
 898    g->req_state[0].height = 768;
 899
 900    g->use_virgl_renderer = false;
 901#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
 902    have_virgl = false;
 903#else
 904    have_virgl = display_opengl;
 905#endif
 906    if (!have_virgl) {
 907        g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
 908    }
 909
 910    if (virtio_gpu_virgl_enabled(g->conf)) {
 911        /* use larger control queue in 3d mode */
 912        g->ctrl_vq   = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
 913        g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
 914        g->virtio_config.num_capsets = 1;
 915    } else {
 916        g->ctrl_vq   = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
 917        g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
 918    }
 919
 920    g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
 921    g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
 922    QTAILQ_INIT(&g->reslist);
 923    QTAILQ_INIT(&g->fenceq);
 924
 925    g->enabled_output_bitmask = 1;
 926    g->qdev = qdev;
 927
 928    for (i = 0; i < g->conf.max_outputs; i++) {
 929        g->scanout[i].con =
 930            graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
 931        if (i > 0) {
 932            dpy_gfx_replace_surface(g->scanout[i].con, NULL);
 933        }
 934    }
 935}
 936
 937static void virtio_gpu_instance_init(Object *obj)
 938{
 939}
 940
 941static void virtio_gpu_reset(VirtIODevice *vdev)
 942{
 943    VirtIOGPU *g = VIRTIO_GPU(vdev);
 944    struct virtio_gpu_simple_resource *res, *tmp;
 945    int i;
 946
 947    g->enable = 0;
 948
 949    QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
 950        virtio_gpu_resource_destroy(g, res);
 951    }
 952    for (i = 0; i < g->conf.max_outputs; i++) {
 953#if 0
 954        g->req_state[i].x = 0;
 955        g->req_state[i].y = 0;
 956        if (i == 0) {
 957            g->req_state[0].width = 1024;
 958            g->req_state[0].height = 768;
 959        } else {
 960            g->req_state[i].width = 0;
 961            g->req_state[i].height = 0;
 962        }
 963#endif
 964        g->scanout[i].resource_id = 0;
 965        g->scanout[i].width = 0;
 966        g->scanout[i].height = 0;
 967        g->scanout[i].x = 0;
 968        g->scanout[i].y = 0;
 969        g->scanout[i].ds = NULL;
 970    }
 971    g->enabled_output_bitmask = 1;
 972
 973#ifdef CONFIG_VIRGL
 974    if (g->use_virgl_renderer) {
 975        virtio_gpu_virgl_reset(g);
 976        g->use_virgl_renderer = 0;
 977    }
 978#endif
 979}
 980
 981static Property virtio_gpu_properties[] = {
 982    DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
 983#ifdef CONFIG_VIRGL
 984    DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
 985                    VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
 986    DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
 987                    VIRTIO_GPU_FLAG_STATS_ENABLED, false),
 988#endif
 989    DEFINE_PROP_END_OF_LIST(),
 990};
 991
 992static void virtio_gpu_class_init(ObjectClass *klass, void *data)
 993{
 994    DeviceClass *dc = DEVICE_CLASS(klass);
 995    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
 996
 997    vdc->realize = virtio_gpu_device_realize;
 998    vdc->get_config = virtio_gpu_get_config;
 999    vdc->set_config = virtio_gpu_set_config;
1000    vdc->get_features = virtio_gpu_get_features;
1001    vdc->set_features = virtio_gpu_set_features;
1002
1003    vdc->reset = virtio_gpu_reset;
1004
1005    dc->props = virtio_gpu_properties;
1006}
1007
1008static const TypeInfo virtio_gpu_info = {
1009    .name = TYPE_VIRTIO_GPU,
1010    .parent = TYPE_VIRTIO_DEVICE,
1011    .instance_size = sizeof(VirtIOGPU),
1012    .instance_init = virtio_gpu_instance_init,
1013    .class_init = virtio_gpu_class_init,
1014};
1015
1016static void virtio_register_types(void)
1017{
1018    type_register_static(&virtio_gpu_info);
1019}
1020
1021type_init(virtio_register_types)
1022
1023QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr)                != 24);
1024QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor)           != 56);
1025QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref)          != 32);
1026QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d)      != 40);
1027QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout)             != 48);
1028QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush)          != 48);
1029QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d)     != 56);
1030QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry)               != 16);
1031QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1032QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1033QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info)       != 408);
1034
1035QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d)        != 72);
1036QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d)      != 72);
1037QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create)              != 96);
1038QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy)             != 24);
1039QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource)            != 32);
1040QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit)              != 32);
1041QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info)         != 32);
1042QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info)        != 40);
1043QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset)              != 32);
1044QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset)             != 24);
1045