qemu/hw/display/virtio-gpu-virgl.c
<<
>>
Prefs
   1/*
   2 * Virtio GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2013-2014
   5 *
   6 * Authors:
   7 *     Dave Airlie <airlied@redhat.com>
   8 *     Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/iov.h"
  16#include "trace.h"
  17#include "hw/virtio/virtio.h"
  18#include "hw/virtio/virtio-gpu.h"
  19
  20#include <virglrenderer.h>
  21
  22static struct virgl_renderer_callbacks virtio_gpu_3d_cbs;
  23
  24static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
  25                                         struct virtio_gpu_ctrl_command *cmd)
  26{
  27    struct virtio_gpu_resource_create_2d c2d;
  28    struct virgl_renderer_resource_create_args args;
  29
  30    VIRTIO_GPU_FILL_CMD(c2d);
  31    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  32                                       c2d.width, c2d.height);
  33
  34    args.handle = c2d.resource_id;
  35    args.target = 2;
  36    args.format = c2d.format;
  37    args.bind = (1 << 1);
  38    args.width = c2d.width;
  39    args.height = c2d.height;
  40    args.depth = 1;
  41    args.array_size = 1;
  42    args.last_level = 0;
  43    args.nr_samples = 0;
  44    args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
  45    virgl_renderer_resource_create(&args, NULL, 0);
  46}
  47
  48static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
  49                                         struct virtio_gpu_ctrl_command *cmd)
  50{
  51    struct virtio_gpu_resource_create_3d c3d;
  52    struct virgl_renderer_resource_create_args args;
  53
  54    VIRTIO_GPU_FILL_CMD(c3d);
  55    trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
  56                                       c3d.width, c3d.height, c3d.depth);
  57
  58    args.handle = c3d.resource_id;
  59    args.target = c3d.target;
  60    args.format = c3d.format;
  61    args.bind = c3d.bind;
  62    args.width = c3d.width;
  63    args.height = c3d.height;
  64    args.depth = c3d.depth;
  65    args.array_size = c3d.array_size;
  66    args.last_level = c3d.last_level;
  67    args.nr_samples = c3d.nr_samples;
  68    args.flags = c3d.flags;
  69    virgl_renderer_resource_create(&args, NULL, 0);
  70}
  71
  72static void virgl_cmd_resource_unref(VirtIOGPU *g,
  73                                     struct virtio_gpu_ctrl_command *cmd)
  74{
  75    struct virtio_gpu_resource_unref unref;
  76    struct iovec *res_iovs = NULL;
  77    int num_iovs = 0;
  78
  79    VIRTIO_GPU_FILL_CMD(unref);
  80    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  81
  82    virgl_renderer_resource_detach_iov(unref.resource_id,
  83                                       &res_iovs,
  84                                       &num_iovs);
  85    if (res_iovs != NULL && num_iovs != 0) {
  86        virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
  87    }
  88    virgl_renderer_resource_unref(unref.resource_id);
  89}
  90
  91static void virgl_cmd_context_create(VirtIOGPU *g,
  92                                     struct virtio_gpu_ctrl_command *cmd)
  93{
  94    struct virtio_gpu_ctx_create cc;
  95
  96    VIRTIO_GPU_FILL_CMD(cc);
  97    trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
  98                                    cc.debug_name);
  99
 100    virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
 101                                  cc.debug_name);
 102}
 103
 104static void virgl_cmd_context_destroy(VirtIOGPU *g,
 105                                      struct virtio_gpu_ctrl_command *cmd)
 106{
 107    struct virtio_gpu_ctx_destroy cd;
 108
 109    VIRTIO_GPU_FILL_CMD(cd);
 110    trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
 111
 112    virgl_renderer_context_destroy(cd.hdr.ctx_id);
 113}
 114
 115static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
 116                                int width, int height)
 117{
 118    if (!g->parent_obj.scanout[idx].con) {
 119        return;
 120    }
 121
 122    dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
 123}
 124
 125static void virgl_cmd_resource_flush(VirtIOGPU *g,
 126                                     struct virtio_gpu_ctrl_command *cmd)
 127{
 128    struct virtio_gpu_resource_flush rf;
 129    int i;
 130
 131    VIRTIO_GPU_FILL_CMD(rf);
 132    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
 133                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 134
 135    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 136        if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
 137            continue;
 138        }
 139        virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
 140    }
 141}
 142
 143static void virgl_cmd_set_scanout(VirtIOGPU *g,
 144                                  struct virtio_gpu_ctrl_command *cmd)
 145{
 146    struct virtio_gpu_set_scanout ss;
 147    struct virgl_renderer_resource_info info;
 148    int ret;
 149
 150    VIRTIO_GPU_FILL_CMD(ss);
 151    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
 152                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 153
 154    if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
 155        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
 156                      __func__, ss.scanout_id);
 157        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 158        return;
 159    }
 160    g->parent_obj.enable = 1;
 161
 162    memset(&info, 0, sizeof(info));
 163
 164    if (ss.resource_id && ss.r.width && ss.r.height) {
 165        ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
 166        if (ret == -1) {
 167            qemu_log_mask(LOG_GUEST_ERROR,
 168                          "%s: illegal resource specified %d\n",
 169                          __func__, ss.resource_id);
 170            cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 171            return;
 172        }
 173        qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
 174                            ss.r.width, ss.r.height);
 175        virgl_renderer_force_ctx_0();
 176        dpy_gl_scanout_texture(
 177            g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
 178            info.flags & 1 /* FIXME: Y_0_TOP */,
 179            info.width, info.height,
 180            ss.r.x, ss.r.y, ss.r.width, ss.r.height);
 181    } else {
 182        dpy_gfx_replace_surface(
 183            g->parent_obj.scanout[ss.scanout_id].con, NULL);
 184        dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
 185    }
 186    g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
 187}
 188
 189static void virgl_cmd_submit_3d(VirtIOGPU *g,
 190                                struct virtio_gpu_ctrl_command *cmd)
 191{
 192    struct virtio_gpu_cmd_submit cs;
 193    void *buf;
 194    size_t s;
 195
 196    VIRTIO_GPU_FILL_CMD(cs);
 197    trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
 198
 199    buf = g_malloc(cs.size);
 200    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
 201                   sizeof(cs), buf, cs.size);
 202    if (s != cs.size) {
 203        qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
 204                      __func__, s, cs.size);
 205        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 206        goto out;
 207    }
 208
 209    if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 210        g->stats.req_3d++;
 211        g->stats.bytes_3d += cs.size;
 212    }
 213
 214    virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
 215
 216out:
 217    g_free(buf);
 218}
 219
 220static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
 221                                          struct virtio_gpu_ctrl_command *cmd)
 222{
 223    struct virtio_gpu_transfer_to_host_2d t2d;
 224    struct virtio_gpu_box box;
 225
 226    VIRTIO_GPU_FILL_CMD(t2d);
 227    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 228
 229    box.x = t2d.r.x;
 230    box.y = t2d.r.y;
 231    box.z = 0;
 232    box.w = t2d.r.width;
 233    box.h = t2d.r.height;
 234    box.d = 1;
 235
 236    virgl_renderer_transfer_write_iov(t2d.resource_id,
 237                                      0,
 238                                      0,
 239                                      0,
 240                                      0,
 241                                      (struct virgl_box *)&box,
 242                                      t2d.offset, NULL, 0);
 243}
 244
 245static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
 246                                          struct virtio_gpu_ctrl_command *cmd)
 247{
 248    struct virtio_gpu_transfer_host_3d t3d;
 249
 250    VIRTIO_GPU_FILL_CMD(t3d);
 251    trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
 252
 253    virgl_renderer_transfer_write_iov(t3d.resource_id,
 254                                      t3d.hdr.ctx_id,
 255                                      t3d.level,
 256                                      t3d.stride,
 257                                      t3d.layer_stride,
 258                                      (struct virgl_box *)&t3d.box,
 259                                      t3d.offset, NULL, 0);
 260}
 261
 262static void
 263virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
 264                                struct virtio_gpu_ctrl_command *cmd)
 265{
 266    struct virtio_gpu_transfer_host_3d tf3d;
 267
 268    VIRTIO_GPU_FILL_CMD(tf3d);
 269    trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
 270
 271    virgl_renderer_transfer_read_iov(tf3d.resource_id,
 272                                     tf3d.hdr.ctx_id,
 273                                     tf3d.level,
 274                                     tf3d.stride,
 275                                     tf3d.layer_stride,
 276                                     (struct virgl_box *)&tf3d.box,
 277                                     tf3d.offset, NULL, 0);
 278}
 279
 280
 281static void virgl_resource_attach_backing(VirtIOGPU *g,
 282                                          struct virtio_gpu_ctrl_command *cmd)
 283{
 284    struct virtio_gpu_resource_attach_backing att_rb;
 285    struct iovec *res_iovs;
 286    uint32_t res_niov;
 287    int ret;
 288
 289    VIRTIO_GPU_FILL_CMD(att_rb);
 290    trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
 291
 292    ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
 293                                        cmd, NULL, &res_iovs, &res_niov);
 294    if (ret != 0) {
 295        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 296        return;
 297    }
 298
 299    ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
 300                                             res_iovs, res_niov);
 301
 302    if (ret != 0)
 303        virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
 304}
 305
 306static void virgl_resource_detach_backing(VirtIOGPU *g,
 307                                          struct virtio_gpu_ctrl_command *cmd)
 308{
 309    struct virtio_gpu_resource_detach_backing detach_rb;
 310    struct iovec *res_iovs = NULL;
 311    int num_iovs = 0;
 312
 313    VIRTIO_GPU_FILL_CMD(detach_rb);
 314    trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
 315
 316    virgl_renderer_resource_detach_iov(detach_rb.resource_id,
 317                                       &res_iovs,
 318                                       &num_iovs);
 319    if (res_iovs == NULL || num_iovs == 0) {
 320        return;
 321    }
 322    virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
 323}
 324
 325
 326static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
 327                                          struct virtio_gpu_ctrl_command *cmd)
 328{
 329    struct virtio_gpu_ctx_resource att_res;
 330
 331    VIRTIO_GPU_FILL_CMD(att_res);
 332    trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
 333                                        att_res.resource_id);
 334
 335    virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
 336}
 337
 338static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
 339                                          struct virtio_gpu_ctrl_command *cmd)
 340{
 341    struct virtio_gpu_ctx_resource det_res;
 342
 343    VIRTIO_GPU_FILL_CMD(det_res);
 344    trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
 345                                        det_res.resource_id);
 346
 347    virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
 348}
 349
 350static void virgl_cmd_get_capset_info(VirtIOGPU *g,
 351                                      struct virtio_gpu_ctrl_command *cmd)
 352{
 353    struct virtio_gpu_get_capset_info info;
 354    struct virtio_gpu_resp_capset_info resp;
 355
 356    VIRTIO_GPU_FILL_CMD(info);
 357
 358    memset(&resp, 0, sizeof(resp));
 359    if (info.capset_index == 0) {
 360        resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
 361        virgl_renderer_get_cap_set(resp.capset_id,
 362                                   &resp.capset_max_version,
 363                                   &resp.capset_max_size);
 364    } else if (info.capset_index == 1) {
 365        resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
 366        virgl_renderer_get_cap_set(resp.capset_id,
 367                                   &resp.capset_max_version,
 368                                   &resp.capset_max_size);
 369    } else {
 370        resp.capset_max_version = 0;
 371        resp.capset_max_size = 0;
 372    }
 373    resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
 374    virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
 375}
 376
 377static void virgl_cmd_get_capset(VirtIOGPU *g,
 378                                 struct virtio_gpu_ctrl_command *cmd)
 379{
 380    struct virtio_gpu_get_capset gc;
 381    struct virtio_gpu_resp_capset *resp;
 382    uint32_t max_ver, max_size;
 383    VIRTIO_GPU_FILL_CMD(gc);
 384
 385    virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
 386                               &max_size);
 387    if (!max_size) {
 388        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 389        return;
 390    }
 391
 392    resp = g_malloc0(sizeof(*resp) + max_size);
 393    resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
 394    virgl_renderer_fill_caps(gc.capset_id,
 395                             gc.capset_version,
 396                             (void *)resp->capset_data);
 397    virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
 398    g_free(resp);
 399}
 400
 401void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
 402                                      struct virtio_gpu_ctrl_command *cmd)
 403{
 404    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 405
 406    virgl_renderer_force_ctx_0();
 407    switch (cmd->cmd_hdr.type) {
 408    case VIRTIO_GPU_CMD_CTX_CREATE:
 409        virgl_cmd_context_create(g, cmd);
 410        break;
 411    case VIRTIO_GPU_CMD_CTX_DESTROY:
 412        virgl_cmd_context_destroy(g, cmd);
 413        break;
 414    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
 415        virgl_cmd_create_resource_2d(g, cmd);
 416        break;
 417    case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
 418        virgl_cmd_create_resource_3d(g, cmd);
 419        break;
 420    case VIRTIO_GPU_CMD_SUBMIT_3D:
 421        virgl_cmd_submit_3d(g, cmd);
 422        break;
 423    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
 424        virgl_cmd_transfer_to_host_2d(g, cmd);
 425        break;
 426    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
 427        virgl_cmd_transfer_to_host_3d(g, cmd);
 428        break;
 429    case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
 430        virgl_cmd_transfer_from_host_3d(g, cmd);
 431        break;
 432    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
 433        virgl_resource_attach_backing(g, cmd);
 434        break;
 435    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
 436        virgl_resource_detach_backing(g, cmd);
 437        break;
 438    case VIRTIO_GPU_CMD_SET_SCANOUT:
 439        virgl_cmd_set_scanout(g, cmd);
 440        break;
 441    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
 442        virgl_cmd_resource_flush(g, cmd);
 443        break;
 444    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
 445        virgl_cmd_resource_unref(g, cmd);
 446        break;
 447    case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
 448        /* TODO add security */
 449        virgl_cmd_ctx_attach_resource(g, cmd);
 450        break;
 451    case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
 452        /* TODO add security */
 453        virgl_cmd_ctx_detach_resource(g, cmd);
 454        break;
 455    case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
 456        virgl_cmd_get_capset_info(g, cmd);
 457        break;
 458    case VIRTIO_GPU_CMD_GET_CAPSET:
 459        virgl_cmd_get_capset(g, cmd);
 460        break;
 461    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 462        virtio_gpu_get_display_info(g, cmd);
 463        break;
 464    case VIRTIO_GPU_CMD_GET_EDID:
 465        virtio_gpu_get_edid(g, cmd);
 466        break;
 467    default:
 468        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 469        break;
 470    }
 471
 472    if (cmd->finished) {
 473        return;
 474    }
 475    if (cmd->error) {
 476        fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
 477                cmd->cmd_hdr.type, cmd->error);
 478        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
 479        return;
 480    }
 481    if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
 482        virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
 483        return;
 484    }
 485
 486    trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
 487    virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
 488}
 489
 490static void virgl_write_fence(void *opaque, uint32_t fence)
 491{
 492    VirtIOGPU *g = opaque;
 493    struct virtio_gpu_ctrl_command *cmd, *tmp;
 494
 495    QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
 496        /*
 497         * the guest can end up emitting fences out of order
 498         * so we should check all fenced cmds not just the first one.
 499         */
 500        if (cmd->cmd_hdr.fence_id > fence) {
 501            continue;
 502        }
 503        trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
 504        virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
 505        QTAILQ_REMOVE(&g->fenceq, cmd, next);
 506        g_free(cmd);
 507        g->inflight--;
 508        if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 509            fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
 510        }
 511    }
 512}
 513
 514static virgl_renderer_gl_context
 515virgl_create_context(void *opaque, int scanout_idx,
 516                     struct virgl_renderer_gl_ctx_param *params)
 517{
 518    VirtIOGPU *g = opaque;
 519    QEMUGLContext ctx;
 520    QEMUGLParams qparams;
 521
 522    qparams.major_ver = params->major_ver;
 523    qparams.minor_ver = params->minor_ver;
 524
 525    ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
 526    return (virgl_renderer_gl_context)ctx;
 527}
 528
 529static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
 530{
 531    VirtIOGPU *g = opaque;
 532    QEMUGLContext qctx = (QEMUGLContext)ctx;
 533
 534    dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
 535}
 536
 537static int virgl_make_context_current(void *opaque, int scanout_idx,
 538                                      virgl_renderer_gl_context ctx)
 539{
 540    VirtIOGPU *g = opaque;
 541    QEMUGLContext qctx = (QEMUGLContext)ctx;
 542
 543    return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
 544                                   qctx);
 545}
 546
 547static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
 548    .version             = 1,
 549    .write_fence         = virgl_write_fence,
 550    .create_gl_context   = virgl_create_context,
 551    .destroy_gl_context  = virgl_destroy_context,
 552    .make_current        = virgl_make_context_current,
 553};
 554
 555static void virtio_gpu_print_stats(void *opaque)
 556{
 557    VirtIOGPU *g = opaque;
 558
 559    if (g->stats.requests) {
 560        fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
 561                g->stats.requests,
 562                g->stats.max_inflight,
 563                g->stats.req_3d,
 564                g->stats.bytes_3d);
 565        g->stats.requests     = 0;
 566        g->stats.max_inflight = 0;
 567        g->stats.req_3d       = 0;
 568        g->stats.bytes_3d     = 0;
 569    } else {
 570        fprintf(stderr, "stats: idle\r");
 571    }
 572    timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
 573}
 574
 575static void virtio_gpu_fence_poll(void *opaque)
 576{
 577    VirtIOGPU *g = opaque;
 578
 579    virgl_renderer_poll();
 580    virtio_gpu_process_cmdq(g);
 581    if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
 582        timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
 583    }
 584}
 585
 586void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
 587{
 588    virtio_gpu_fence_poll(g);
 589}
 590
 591void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
 592{
 593    int i;
 594
 595    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 596        dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
 597        dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
 598    }
 599}
 600
 601void virtio_gpu_virgl_reset(VirtIOGPU *g)
 602{
 603    virgl_renderer_reset();
 604}
 605
 606int virtio_gpu_virgl_init(VirtIOGPU *g)
 607{
 608    int ret;
 609
 610    ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs);
 611    if (ret != 0) {
 612        return ret;
 613    }
 614
 615    g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
 616                                 virtio_gpu_fence_poll, g);
 617
 618    if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 619        g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
 620                                      virtio_gpu_print_stats, g);
 621        timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
 622    }
 623    return 0;
 624}
 625
 626int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
 627{
 628    uint32_t capset2_max_ver, capset2_max_size;
 629    virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
 630                              &capset2_max_ver,
 631                              &capset2_max_size);
 632
 633    return capset2_max_ver ? 2 : 1;
 634}
 635