qemu/hw/display/virtio-gpu-3d.c
<<
>>
Prefs
   1/*
   2 * Virtio GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2013-2014
   5 *
   6 * Authors:
   7 *     Dave Airlie <airlied@redhat.com>
   8 *     Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/iov.h"
  16#include "trace.h"
  17#include "hw/virtio/virtio.h"
  18#include "hw/virtio/virtio-gpu.h"
  19
  20#include <virglrenderer.h>
  21
  22static struct virgl_renderer_callbacks virtio_gpu_3d_cbs;
  23
  24static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
  25                                         struct virtio_gpu_ctrl_command *cmd)
  26{
  27    struct virtio_gpu_resource_create_2d c2d;
  28    struct virgl_renderer_resource_create_args args;
  29
  30    VIRTIO_GPU_FILL_CMD(c2d);
  31    trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  32                                       c2d.width, c2d.height);
  33
  34    args.handle = c2d.resource_id;
  35    args.target = 2;
  36    args.format = c2d.format;
  37    args.bind = (1 << 1);
  38    args.width = c2d.width;
  39    args.height = c2d.height;
  40    args.depth = 1;
  41    args.array_size = 1;
  42    args.last_level = 0;
  43    args.nr_samples = 0;
  44    args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
  45    virgl_renderer_resource_create(&args, NULL, 0);
  46}
  47
  48static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
  49                                         struct virtio_gpu_ctrl_command *cmd)
  50{
  51    struct virtio_gpu_resource_create_3d c3d;
  52    struct virgl_renderer_resource_create_args args;
  53
  54    VIRTIO_GPU_FILL_CMD(c3d);
  55    trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
  56                                       c3d.width, c3d.height, c3d.depth);
  57
  58    args.handle = c3d.resource_id;
  59    args.target = c3d.target;
  60    args.format = c3d.format;
  61    args.bind = c3d.bind;
  62    args.width = c3d.width;
  63    args.height = c3d.height;
  64    args.depth = c3d.depth;
  65    args.array_size = c3d.array_size;
  66    args.last_level = c3d.last_level;
  67    args.nr_samples = c3d.nr_samples;
  68    args.flags = c3d.flags;
  69    virgl_renderer_resource_create(&args, NULL, 0);
  70}
  71
  72static void virgl_cmd_resource_unref(VirtIOGPU *g,
  73                                     struct virtio_gpu_ctrl_command *cmd)
  74{
  75    struct virtio_gpu_resource_unref unref;
  76    struct iovec *res_iovs = NULL;
  77    int num_iovs = 0;
  78
  79    VIRTIO_GPU_FILL_CMD(unref);
  80    trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  81
  82    virgl_renderer_resource_detach_iov(unref.resource_id,
  83                                       &res_iovs,
  84                                       &num_iovs);
  85    if (res_iovs != NULL && num_iovs != 0) {
  86        virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
  87    }
  88    virgl_renderer_resource_unref(unref.resource_id);
  89}
  90
  91static void virgl_cmd_context_create(VirtIOGPU *g,
  92                                     struct virtio_gpu_ctrl_command *cmd)
  93{
  94    struct virtio_gpu_ctx_create cc;
  95
  96    VIRTIO_GPU_FILL_CMD(cc);
  97    trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
  98                                    cc.debug_name);
  99
 100    virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
 101                                  cc.debug_name);
 102}
 103
 104static void virgl_cmd_context_destroy(VirtIOGPU *g,
 105                                      struct virtio_gpu_ctrl_command *cmd)
 106{
 107    struct virtio_gpu_ctx_destroy cd;
 108
 109    VIRTIO_GPU_FILL_CMD(cd);
 110    trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
 111
 112    virgl_renderer_context_destroy(cd.hdr.ctx_id);
 113}
 114
 115static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
 116                                int width, int height)
 117{
 118    if (!g->parent_obj.scanout[idx].con) {
 119        return;
 120    }
 121
 122    dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
 123}
 124
 125static void virgl_cmd_resource_flush(VirtIOGPU *g,
 126                                     struct virtio_gpu_ctrl_command *cmd)
 127{
 128    struct virtio_gpu_resource_flush rf;
 129    int i;
 130
 131    VIRTIO_GPU_FILL_CMD(rf);
 132    trace_virtio_gpu_cmd_res_flush(rf.resource_id,
 133                                   rf.r.width, rf.r.height, rf.r.x, rf.r.y);
 134
 135    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 136        if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
 137            continue;
 138        }
 139        virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
 140    }
 141}
 142
 143static void virgl_cmd_set_scanout(VirtIOGPU *g,
 144                                  struct virtio_gpu_ctrl_command *cmd)
 145{
 146    struct virtio_gpu_set_scanout ss;
 147    struct virgl_renderer_resource_info info;
 148    int ret;
 149
 150    VIRTIO_GPU_FILL_CMD(ss);
 151    trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
 152                                     ss.r.width, ss.r.height, ss.r.x, ss.r.y);
 153
 154    if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
 155        qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
 156                      __func__, ss.scanout_id);
 157        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
 158        return;
 159    }
 160    g->parent_obj.enable = 1;
 161
 162    memset(&info, 0, sizeof(info));
 163
 164    if (ss.resource_id && ss.r.width && ss.r.height) {
 165        ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
 166        if (ret == -1) {
 167            qemu_log_mask(LOG_GUEST_ERROR,
 168                          "%s: illegal resource specified %d\n",
 169                          __func__, ss.resource_id);
 170            cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
 171            return;
 172        }
 173        qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
 174                            ss.r.width, ss.r.height);
 175        virgl_renderer_force_ctx_0();
 176        dpy_gl_scanout_texture(
 177            g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
 178            info.flags & 1 /* FIXME: Y_0_TOP */,
 179            info.width, info.height,
 180            ss.r.x, ss.r.y, ss.r.width, ss.r.height);
 181    } else {
 182        dpy_gfx_replace_surface(
 183            g->parent_obj.scanout[ss.scanout_id].con, NULL);
 184        dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
 185    }
 186    g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
 187}
 188
 189static void virgl_cmd_submit_3d(VirtIOGPU *g,
 190                                struct virtio_gpu_ctrl_command *cmd)
 191{
 192    struct virtio_gpu_cmd_submit cs;
 193    void *buf;
 194    size_t s;
 195
 196    VIRTIO_GPU_FILL_CMD(cs);
 197    trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
 198
 199    buf = g_malloc(cs.size);
 200    s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
 201                   sizeof(cs), buf, cs.size);
 202    if (s != cs.size) {
 203        qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
 204                      __func__, s, cs.size);
 205        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 206        goto out;
 207    }
 208
 209    if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 210        g->stats.req_3d++;
 211        g->stats.bytes_3d += cs.size;
 212    }
 213
 214    virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
 215
 216out:
 217    g_free(buf);
 218}
 219
 220static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
 221                                          struct virtio_gpu_ctrl_command *cmd)
 222{
 223    struct virtio_gpu_transfer_to_host_2d t2d;
 224    struct virtio_gpu_box box;
 225
 226    VIRTIO_GPU_FILL_CMD(t2d);
 227    trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
 228
 229    box.x = t2d.r.x;
 230    box.y = t2d.r.y;
 231    box.z = 0;
 232    box.w = t2d.r.width;
 233    box.h = t2d.r.height;
 234    box.d = 1;
 235
 236    virgl_renderer_transfer_write_iov(t2d.resource_id,
 237                                      0,
 238                                      0,
 239                                      0,
 240                                      0,
 241                                      (struct virgl_box *)&box,
 242                                      t2d.offset, NULL, 0);
 243}
 244
 245static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
 246                                          struct virtio_gpu_ctrl_command *cmd)
 247{
 248    struct virtio_gpu_transfer_host_3d t3d;
 249
 250    VIRTIO_GPU_FILL_CMD(t3d);
 251    trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
 252
 253    virgl_renderer_transfer_write_iov(t3d.resource_id,
 254                                      t3d.hdr.ctx_id,
 255                                      t3d.level,
 256                                      t3d.stride,
 257                                      t3d.layer_stride,
 258                                      (struct virgl_box *)&t3d.box,
 259                                      t3d.offset, NULL, 0);
 260}
 261
 262static void
 263virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
 264                                struct virtio_gpu_ctrl_command *cmd)
 265{
 266    struct virtio_gpu_transfer_host_3d tf3d;
 267
 268    VIRTIO_GPU_FILL_CMD(tf3d);
 269    trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
 270
 271    virgl_renderer_transfer_read_iov(tf3d.resource_id,
 272                                     tf3d.hdr.ctx_id,
 273                                     tf3d.level,
 274                                     tf3d.stride,
 275                                     tf3d.layer_stride,
 276                                     (struct virgl_box *)&tf3d.box,
 277                                     tf3d.offset, NULL, 0);
 278}
 279
 280
 281static void virgl_resource_attach_backing(VirtIOGPU *g,
 282                                          struct virtio_gpu_ctrl_command *cmd)
 283{
 284    struct virtio_gpu_resource_attach_backing att_rb;
 285    struct iovec *res_iovs;
 286    int ret;
 287
 288    VIRTIO_GPU_FILL_CMD(att_rb);
 289    trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
 290
 291    ret = virtio_gpu_create_mapping_iov(g, &att_rb, cmd, NULL, &res_iovs);
 292    if (ret != 0) {
 293        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 294        return;
 295    }
 296
 297    ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
 298                                             res_iovs, att_rb.nr_entries);
 299
 300    if (ret != 0)
 301        virtio_gpu_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries);
 302}
 303
 304static void virgl_resource_detach_backing(VirtIOGPU *g,
 305                                          struct virtio_gpu_ctrl_command *cmd)
 306{
 307    struct virtio_gpu_resource_detach_backing detach_rb;
 308    struct iovec *res_iovs = NULL;
 309    int num_iovs = 0;
 310
 311    VIRTIO_GPU_FILL_CMD(detach_rb);
 312    trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
 313
 314    virgl_renderer_resource_detach_iov(detach_rb.resource_id,
 315                                       &res_iovs,
 316                                       &num_iovs);
 317    if (res_iovs == NULL || num_iovs == 0) {
 318        return;
 319    }
 320    virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
 321}
 322
 323
 324static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
 325                                          struct virtio_gpu_ctrl_command *cmd)
 326{
 327    struct virtio_gpu_ctx_resource att_res;
 328
 329    VIRTIO_GPU_FILL_CMD(att_res);
 330    trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
 331                                        att_res.resource_id);
 332
 333    virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
 334}
 335
 336static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
 337                                          struct virtio_gpu_ctrl_command *cmd)
 338{
 339    struct virtio_gpu_ctx_resource det_res;
 340
 341    VIRTIO_GPU_FILL_CMD(det_res);
 342    trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
 343                                        det_res.resource_id);
 344
 345    virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
 346}
 347
 348static void virgl_cmd_get_capset_info(VirtIOGPU *g,
 349                                      struct virtio_gpu_ctrl_command *cmd)
 350{
 351    struct virtio_gpu_get_capset_info info;
 352    struct virtio_gpu_resp_capset_info resp;
 353
 354    VIRTIO_GPU_FILL_CMD(info);
 355
 356    memset(&resp, 0, sizeof(resp));
 357    if (info.capset_index == 0) {
 358        resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
 359        virgl_renderer_get_cap_set(resp.capset_id,
 360                                   &resp.capset_max_version,
 361                                   &resp.capset_max_size);
 362    } else if (info.capset_index == 1) {
 363        resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
 364        virgl_renderer_get_cap_set(resp.capset_id,
 365                                   &resp.capset_max_version,
 366                                   &resp.capset_max_size);
 367    } else {
 368        resp.capset_max_version = 0;
 369        resp.capset_max_size = 0;
 370    }
 371    resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
 372    virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
 373}
 374
 375static void virgl_cmd_get_capset(VirtIOGPU *g,
 376                                 struct virtio_gpu_ctrl_command *cmd)
 377{
 378    struct virtio_gpu_get_capset gc;
 379    struct virtio_gpu_resp_capset *resp;
 380    uint32_t max_ver, max_size;
 381    VIRTIO_GPU_FILL_CMD(gc);
 382
 383    virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
 384                               &max_size);
 385    if (!max_size) {
 386        cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
 387        return;
 388    }
 389
 390    resp = g_malloc0(sizeof(*resp) + max_size);
 391    resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
 392    virgl_renderer_fill_caps(gc.capset_id,
 393                             gc.capset_version,
 394                             (void *)resp->capset_data);
 395    virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
 396    g_free(resp);
 397}
 398
 399void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
 400                                      struct virtio_gpu_ctrl_command *cmd)
 401{
 402    VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
 403
 404    virgl_renderer_force_ctx_0();
 405    switch (cmd->cmd_hdr.type) {
 406    case VIRTIO_GPU_CMD_CTX_CREATE:
 407        virgl_cmd_context_create(g, cmd);
 408        break;
 409    case VIRTIO_GPU_CMD_CTX_DESTROY:
 410        virgl_cmd_context_destroy(g, cmd);
 411        break;
 412    case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
 413        virgl_cmd_create_resource_2d(g, cmd);
 414        break;
 415    case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
 416        virgl_cmd_create_resource_3d(g, cmd);
 417        break;
 418    case VIRTIO_GPU_CMD_SUBMIT_3D:
 419        virgl_cmd_submit_3d(g, cmd);
 420        break;
 421    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
 422        virgl_cmd_transfer_to_host_2d(g, cmd);
 423        break;
 424    case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
 425        virgl_cmd_transfer_to_host_3d(g, cmd);
 426        break;
 427    case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
 428        virgl_cmd_transfer_from_host_3d(g, cmd);
 429        break;
 430    case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
 431        virgl_resource_attach_backing(g, cmd);
 432        break;
 433    case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
 434        virgl_resource_detach_backing(g, cmd);
 435        break;
 436    case VIRTIO_GPU_CMD_SET_SCANOUT:
 437        virgl_cmd_set_scanout(g, cmd);
 438        break;
 439    case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
 440        virgl_cmd_resource_flush(g, cmd);
 441        break;
 442    case VIRTIO_GPU_CMD_RESOURCE_UNREF:
 443        virgl_cmd_resource_unref(g, cmd);
 444        break;
 445    case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
 446        /* TODO add security */
 447        virgl_cmd_ctx_attach_resource(g, cmd);
 448        break;
 449    case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
 450        /* TODO add security */
 451        virgl_cmd_ctx_detach_resource(g, cmd);
 452        break;
 453    case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
 454        virgl_cmd_get_capset_info(g, cmd);
 455        break;
 456    case VIRTIO_GPU_CMD_GET_CAPSET:
 457        virgl_cmd_get_capset(g, cmd);
 458        break;
 459    case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
 460        virtio_gpu_get_display_info(g, cmd);
 461        break;
 462    case VIRTIO_GPU_CMD_GET_EDID:
 463        virtio_gpu_get_edid(g, cmd);
 464        break;
 465    default:
 466        cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
 467        break;
 468    }
 469
 470    if (cmd->finished) {
 471        return;
 472    }
 473    if (cmd->error) {
 474        fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
 475                cmd->cmd_hdr.type, cmd->error);
 476        virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
 477        return;
 478    }
 479    if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
 480        virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
 481        return;
 482    }
 483
 484    trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
 485    virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
 486}
 487
 488static void virgl_write_fence(void *opaque, uint32_t fence)
 489{
 490    VirtIOGPU *g = opaque;
 491    struct virtio_gpu_ctrl_command *cmd, *tmp;
 492
 493    QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
 494        /*
 495         * the guest can end up emitting fences out of order
 496         * so we should check all fenced cmds not just the first one.
 497         */
 498        if (cmd->cmd_hdr.fence_id > fence) {
 499            continue;
 500        }
 501        trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
 502        virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
 503        QTAILQ_REMOVE(&g->fenceq, cmd, next);
 504        g_free(cmd);
 505        g->inflight--;
 506        if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 507            fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
 508        }
 509    }
 510}
 511
 512static virgl_renderer_gl_context
 513virgl_create_context(void *opaque, int scanout_idx,
 514                     struct virgl_renderer_gl_ctx_param *params)
 515{
 516    VirtIOGPU *g = opaque;
 517    QEMUGLContext ctx;
 518    QEMUGLParams qparams;
 519
 520    qparams.major_ver = params->major_ver;
 521    qparams.minor_ver = params->minor_ver;
 522
 523    ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
 524    return (virgl_renderer_gl_context)ctx;
 525}
 526
 527static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
 528{
 529    VirtIOGPU *g = opaque;
 530    QEMUGLContext qctx = (QEMUGLContext)ctx;
 531
 532    dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
 533}
 534
 535static int virgl_make_context_current(void *opaque, int scanout_idx,
 536                                      virgl_renderer_gl_context ctx)
 537{
 538    VirtIOGPU *g = opaque;
 539    QEMUGLContext qctx = (QEMUGLContext)ctx;
 540
 541    return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
 542                                   qctx);
 543}
 544
 545static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
 546    .version             = 1,
 547    .write_fence         = virgl_write_fence,
 548    .create_gl_context   = virgl_create_context,
 549    .destroy_gl_context  = virgl_destroy_context,
 550    .make_current        = virgl_make_context_current,
 551};
 552
 553static void virtio_gpu_print_stats(void *opaque)
 554{
 555    VirtIOGPU *g = opaque;
 556
 557    if (g->stats.requests) {
 558        fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
 559                g->stats.requests,
 560                g->stats.max_inflight,
 561                g->stats.req_3d,
 562                g->stats.bytes_3d);
 563        g->stats.requests     = 0;
 564        g->stats.max_inflight = 0;
 565        g->stats.req_3d       = 0;
 566        g->stats.bytes_3d     = 0;
 567    } else {
 568        fprintf(stderr, "stats: idle\r");
 569    }
 570    timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
 571}
 572
 573static void virtio_gpu_fence_poll(void *opaque)
 574{
 575    VirtIOGPU *g = opaque;
 576
 577    virgl_renderer_poll();
 578    virtio_gpu_process_cmdq(g);
 579    if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
 580        timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
 581    }
 582}
 583
 584void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
 585{
 586    virtio_gpu_fence_poll(g);
 587}
 588
 589void virtio_gpu_virgl_reset(VirtIOGPU *g)
 590{
 591    int i;
 592
 593    virgl_renderer_reset();
 594    for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
 595        dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
 596        dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
 597    }
 598}
 599
 600int virtio_gpu_virgl_init(VirtIOGPU *g)
 601{
 602    int ret;
 603
 604    ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs);
 605    if (ret != 0) {
 606        return ret;
 607    }
 608
 609    g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
 610                                 virtio_gpu_fence_poll, g);
 611
 612    if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
 613        g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
 614                                      virtio_gpu_print_stats, g);
 615        timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
 616    }
 617    return 0;
 618}
 619
 620int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g)
 621{
 622    uint32_t capset2_max_ver, capset2_max_size;
 623    virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
 624                              &capset2_max_ver,
 625                              &capset2_max_size);
 626
 627    return capset2_max_ver ? 2 : 1;
 628}
 629