qemu/hw/display/vhost-user-gpu.c
<<
>>
Prefs
   1/*
   2 * vhost-user GPU Device
   3 *
   4 * Copyright Red Hat, Inc. 2018
   5 *
   6 * Authors:
   7 *     Marc-André Lureau <marcandre.lureau@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "hw/qdev-properties.h"
  15#include "hw/virtio/virtio-gpu.h"
  16#include "chardev/char-fe.h"
  17#include "qapi/error.h"
  18#include "migration/blocker.h"
  19
  20#define VHOST_USER_GPU(obj)                                    \
  21    OBJECT_CHECK(VhostUserGPU, (obj), TYPE_VHOST_USER_GPU)
  22
  23typedef enum VhostUserGpuRequest {
  24    VHOST_USER_GPU_NONE = 0,
  25    VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
  26    VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
  27    VHOST_USER_GPU_GET_DISPLAY_INFO,
  28    VHOST_USER_GPU_CURSOR_POS,
  29    VHOST_USER_GPU_CURSOR_POS_HIDE,
  30    VHOST_USER_GPU_CURSOR_UPDATE,
  31    VHOST_USER_GPU_SCANOUT,
  32    VHOST_USER_GPU_UPDATE,
  33    VHOST_USER_GPU_DMABUF_SCANOUT,
  34    VHOST_USER_GPU_DMABUF_UPDATE,
  35} VhostUserGpuRequest;
  36
  37typedef struct VhostUserGpuDisplayInfoReply {
  38    struct virtio_gpu_resp_display_info info;
  39} VhostUserGpuDisplayInfoReply;
  40
  41typedef struct VhostUserGpuCursorPos {
  42    uint32_t scanout_id;
  43    uint32_t x;
  44    uint32_t y;
  45} QEMU_PACKED VhostUserGpuCursorPos;
  46
  47typedef struct VhostUserGpuCursorUpdate {
  48    VhostUserGpuCursorPos pos;
  49    uint32_t hot_x;
  50    uint32_t hot_y;
  51    uint32_t data[64 * 64];
  52} QEMU_PACKED VhostUserGpuCursorUpdate;
  53
  54typedef struct VhostUserGpuScanout {
  55    uint32_t scanout_id;
  56    uint32_t width;
  57    uint32_t height;
  58} QEMU_PACKED VhostUserGpuScanout;
  59
  60typedef struct VhostUserGpuUpdate {
  61    uint32_t scanout_id;
  62    uint32_t x;
  63    uint32_t y;
  64    uint32_t width;
  65    uint32_t height;
  66    uint8_t data[];
  67} QEMU_PACKED VhostUserGpuUpdate;
  68
  69typedef struct VhostUserGpuDMABUFScanout {
  70    uint32_t scanout_id;
  71    uint32_t x;
  72    uint32_t y;
  73    uint32_t width;
  74    uint32_t height;
  75    uint32_t fd_width;
  76    uint32_t fd_height;
  77    uint32_t fd_stride;
  78    uint32_t fd_flags;
  79    int fd_drm_fourcc;
  80} QEMU_PACKED VhostUserGpuDMABUFScanout;
  81
  82typedef struct VhostUserGpuMsg {
  83    uint32_t request; /* VhostUserGpuRequest */
  84    uint32_t flags;
  85    uint32_t size; /* the following payload size */
  86    union {
  87        VhostUserGpuCursorPos cursor_pos;
  88        VhostUserGpuCursorUpdate cursor_update;
  89        VhostUserGpuScanout scanout;
  90        VhostUserGpuUpdate update;
  91        VhostUserGpuDMABUFScanout dmabuf_scanout;
  92        struct virtio_gpu_resp_display_info display_info;
  93        uint64_t u64;
  94    } payload;
  95} QEMU_PACKED VhostUserGpuMsg;
  96
  97static VhostUserGpuMsg m __attribute__ ((unused));
  98#define VHOST_USER_GPU_HDR_SIZE \
  99    (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
 100
 101#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
 102
 103static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
 104
 105static void
 106vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
 107{
 108    VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
 109    struct virtio_gpu_scanout *s;
 110
 111    if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
 112        return;
 113    }
 114    s = &g->parent_obj.scanout[pos->scanout_id];
 115
 116    if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
 117        VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
 118        if (!s->current_cursor) {
 119            s->current_cursor = cursor_alloc(64, 64);
 120        }
 121
 122        s->current_cursor->hot_x = up->hot_x;
 123        s->current_cursor->hot_y = up->hot_y;
 124
 125        memcpy(s->current_cursor->data, up->data,
 126               64 * 64 * sizeof(uint32_t));
 127
 128        dpy_cursor_define(s->con, s->current_cursor);
 129    }
 130
 131    dpy_mouse_set(s->con, pos->x, pos->y,
 132                  msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
 133}
 134
 135static void
 136vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
 137{
 138    qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
 139                      VHOST_USER_GPU_HDR_SIZE + msg->size);
 140}
 141
 142static void
 143vhost_user_gpu_unblock(VhostUserGPU *g)
 144{
 145    VhostUserGpuMsg msg = {
 146        .request = VHOST_USER_GPU_DMABUF_UPDATE,
 147        .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
 148    };
 149
 150    vhost_user_gpu_send_msg(g, &msg);
 151}
 152
 153static void
 154vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
 155{
 156    QemuConsole *con = NULL;
 157    struct virtio_gpu_scanout *s;
 158
 159    switch (msg->request) {
 160    case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
 161        VhostUserGpuMsg reply = {
 162            .request = msg->request,
 163            .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
 164            .size = sizeof(uint64_t),
 165        };
 166
 167        vhost_user_gpu_send_msg(g, &reply);
 168        break;
 169    }
 170    case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
 171        break;
 172    }
 173    case VHOST_USER_GPU_GET_DISPLAY_INFO: {
 174        struct virtio_gpu_resp_display_info display_info = { {} };
 175        VhostUserGpuMsg reply = {
 176            .request = msg->request,
 177            .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
 178            .size = sizeof(struct virtio_gpu_resp_display_info),
 179        };
 180
 181        display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
 182        virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
 183        memcpy(&reply.payload.display_info, &display_info,
 184               sizeof(display_info));
 185        vhost_user_gpu_send_msg(g, &reply);
 186        break;
 187    }
 188    case VHOST_USER_GPU_SCANOUT: {
 189        VhostUserGpuScanout *m = &msg->payload.scanout;
 190
 191        if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
 192            return;
 193        }
 194
 195        g->parent_obj.enable = 1;
 196        s = &g->parent_obj.scanout[m->scanout_id];
 197        con = s->con;
 198
 199        if (m->scanout_id == 0 && m->width == 0) {
 200            s->ds = qemu_create_message_surface(640, 480,
 201                                                "Guest disabled display.");
 202            dpy_gfx_replace_surface(con, s->ds);
 203        } else {
 204            s->ds = qemu_create_displaysurface(m->width, m->height);
 205            /* replace surface on next update */
 206        }
 207
 208        break;
 209    }
 210    case VHOST_USER_GPU_DMABUF_SCANOUT: {
 211        VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
 212        int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
 213        QemuDmaBuf *dmabuf;
 214
 215        if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
 216            error_report("invalid scanout: %d", m->scanout_id);
 217            if (fd >= 0) {
 218                close(fd);
 219            }
 220            break;
 221        }
 222
 223        g->parent_obj.enable = 1;
 224        con = g->parent_obj.scanout[m->scanout_id].con;
 225        dmabuf = &g->dmabuf[m->scanout_id];
 226        if (dmabuf->fd >= 0) {
 227            close(dmabuf->fd);
 228            dmabuf->fd = -1;
 229        }
 230        if (!console_has_gl_dmabuf(con)) {
 231            /* it would be nice to report that error earlier */
 232            error_report("console doesn't support dmabuf!");
 233            break;
 234        }
 235        dpy_gl_release_dmabuf(con, dmabuf);
 236        if (fd == -1) {
 237            dpy_gl_scanout_disable(con);
 238            break;
 239        }
 240        *dmabuf = (QemuDmaBuf) {
 241            .fd = fd,
 242            .width = m->fd_width,
 243            .height = m->fd_height,
 244            .stride = m->fd_stride,
 245            .fourcc = m->fd_drm_fourcc,
 246            .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
 247        };
 248        dpy_gl_scanout_dmabuf(con, dmabuf);
 249        break;
 250    }
 251    case VHOST_USER_GPU_DMABUF_UPDATE: {
 252        VhostUserGpuUpdate *m = &msg->payload.update;
 253
 254        if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
 255            !g->parent_obj.scanout[m->scanout_id].con) {
 256            error_report("invalid scanout update: %d", m->scanout_id);
 257            vhost_user_gpu_unblock(g);
 258            break;
 259        }
 260
 261        con = g->parent_obj.scanout[m->scanout_id].con;
 262        if (!console_has_gl(con)) {
 263            error_report("console doesn't support GL!");
 264            vhost_user_gpu_unblock(g);
 265            break;
 266        }
 267        dpy_gl_update(con, m->x, m->y, m->width, m->height);
 268        g->backend_blocked = true;
 269        break;
 270    }
 271    case VHOST_USER_GPU_UPDATE: {
 272        VhostUserGpuUpdate *m = &msg->payload.update;
 273
 274        if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
 275            break;
 276        }
 277        s = &g->parent_obj.scanout[m->scanout_id];
 278        con = s->con;
 279        pixman_image_t *image =
 280            pixman_image_create_bits(PIXMAN_x8r8g8b8,
 281                                     m->width,
 282                                     m->height,
 283                                     (uint32_t *)m->data,
 284                                     m->width * 4);
 285
 286        pixman_image_composite(PIXMAN_OP_SRC,
 287                               image, NULL, s->ds->image,
 288                               0, 0, 0, 0, m->x, m->y, m->width, m->height);
 289
 290        pixman_image_unref(image);
 291        if (qemu_console_surface(con) != s->ds) {
 292            dpy_gfx_replace_surface(con, s->ds);
 293        } else {
 294            dpy_gfx_update(con, m->x, m->y, m->width, m->height);
 295        }
 296        break;
 297    }
 298    default:
 299        g_warning("unhandled message %d %d", msg->request, msg->size);
 300    }
 301
 302    if (con && qemu_console_is_gl_blocked(con)) {
 303        vhost_user_gpu_update_blocked(g, true);
 304    }
 305}
 306
 307static void
 308vhost_user_gpu_chr_read(void *opaque)
 309{
 310    VhostUserGPU *g = opaque;
 311    VhostUserGpuMsg *msg = NULL;
 312    VhostUserGpuRequest request;
 313    uint32_t size, flags;
 314    int r;
 315
 316    r = qemu_chr_fe_read_all(&g->vhost_chr,
 317                             (uint8_t *)&request, sizeof(uint32_t));
 318    if (r != sizeof(uint32_t)) {
 319        error_report("failed to read msg header: %d, %d", r, errno);
 320        goto end;
 321    }
 322
 323    r = qemu_chr_fe_read_all(&g->vhost_chr,
 324                             (uint8_t *)&flags, sizeof(uint32_t));
 325    if (r != sizeof(uint32_t)) {
 326        error_report("failed to read msg flags");
 327        goto end;
 328    }
 329
 330    r = qemu_chr_fe_read_all(&g->vhost_chr,
 331                             (uint8_t *)&size, sizeof(uint32_t));
 332    if (r != sizeof(uint32_t)) {
 333        error_report("failed to read msg size");
 334        goto end;
 335    }
 336
 337    msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
 338    g_return_if_fail(msg != NULL);
 339
 340    r = qemu_chr_fe_read_all(&g->vhost_chr,
 341                             (uint8_t *)&msg->payload, size);
 342    if (r != size) {
 343        error_report("failed to read msg payload %d != %d", r, size);
 344        goto end;
 345    }
 346
 347    msg->request = request;
 348    msg->flags = size;
 349    msg->size = size;
 350
 351    if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
 352        request == VHOST_USER_GPU_CURSOR_POS ||
 353        request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
 354        vhost_user_gpu_handle_cursor(g, msg);
 355    } else {
 356        vhost_user_gpu_handle_display(g, msg);
 357    }
 358
 359end:
 360    g_free(msg);
 361}
 362
 363static void
 364vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
 365{
 366    qemu_set_fd_handler(g->vhost_gpu_fd,
 367                        blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
 368}
 369
 370static void
 371vhost_user_gpu_gl_unblock(VirtIOGPUBase *b)
 372{
 373    VhostUserGPU *g = VHOST_USER_GPU(b);
 374
 375    if (g->backend_blocked) {
 376        vhost_user_gpu_unblock(VHOST_USER_GPU(g));
 377        g->backend_blocked = false;
 378    }
 379
 380    vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
 381}
 382
 383static bool
 384vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
 385{
 386    Chardev *chr;
 387    int sv[2];
 388
 389    if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
 390        error_setg_errno(errp, errno, "socketpair() failed");
 391        return false;
 392    }
 393
 394    chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
 395    if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
 396        error_setg(errp, "Failed to make socket chardev");
 397        goto err;
 398    }
 399    if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
 400        goto err;
 401    }
 402    if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
 403        error_setg(errp, "Failed to set vhost-user-gpu socket");
 404        qemu_chr_fe_deinit(&g->vhost_chr, false);
 405        goto err;
 406    }
 407
 408    g->vhost_gpu_fd = sv[0];
 409    vhost_user_gpu_update_blocked(g, false);
 410    close(sv[1]);
 411    return true;
 412
 413err:
 414    close(sv[0]);
 415    close(sv[1]);
 416    if (chr) {
 417        object_unref(OBJECT(chr));
 418    }
 419    return false;
 420}
 421
 422static void
 423vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
 424{
 425    VhostUserGPU *g = VHOST_USER_GPU(vdev);
 426    VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
 427    struct virtio_gpu_config *vgconfig =
 428        (struct virtio_gpu_config *)config_data;
 429    int ret;
 430
 431    memset(config_data, 0, sizeof(struct virtio_gpu_config));
 432
 433    ret = vhost_dev_get_config(&g->vhost->dev,
 434                               config_data, sizeof(struct virtio_gpu_config));
 435    if (ret) {
 436        error_report("vhost-user-gpu: get device config space failed");
 437        return;
 438    }
 439
 440    /* those fields are managed by qemu */
 441    vgconfig->num_scanouts = b->virtio_config.num_scanouts;
 442    vgconfig->events_read = b->virtio_config.events_read;
 443    vgconfig->events_clear = b->virtio_config.events_clear;
 444}
 445
 446static void
 447vhost_user_gpu_set_config(VirtIODevice *vdev,
 448                          const uint8_t *config_data)
 449{
 450    VhostUserGPU *g = VHOST_USER_GPU(vdev);
 451    VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
 452    const struct virtio_gpu_config *vgconfig =
 453        (const struct virtio_gpu_config *)config_data;
 454    int ret;
 455
 456    if (vgconfig->events_clear) {
 457        b->virtio_config.events_read &= ~vgconfig->events_clear;
 458    }
 459
 460    ret = vhost_dev_set_config(&g->vhost->dev, config_data,
 461                               0, sizeof(struct virtio_gpu_config),
 462                               VHOST_SET_CONFIG_TYPE_MASTER);
 463    if (ret) {
 464        error_report("vhost-user-gpu: set device config space failed");
 465        return;
 466    }
 467}
 468
 469static void
 470vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
 471{
 472    VhostUserGPU *g = VHOST_USER_GPU(vdev);
 473    Error *err = NULL;
 474
 475    if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
 476        if (!vhost_user_gpu_do_set_socket(g, &err)) {
 477            error_report_err(err);
 478            return;
 479        }
 480        vhost_user_backend_start(g->vhost);
 481    } else {
 482        /* unblock any wait and stop processing */
 483        if (g->vhost_gpu_fd != -1) {
 484            vhost_user_gpu_update_blocked(g, true);
 485            qemu_chr_fe_deinit(&g->vhost_chr, true);
 486            g->vhost_gpu_fd = -1;
 487        }
 488        vhost_user_backend_stop(g->vhost);
 489    }
 490}
 491
 492static bool
 493vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
 494{
 495    VhostUserGPU *g = VHOST_USER_GPU(vdev);
 496
 497    return vhost_virtqueue_pending(&g->vhost->dev, idx);
 498}
 499
 500static void
 501vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
 502{
 503    VhostUserGPU *g = VHOST_USER_GPU(vdev);
 504
 505    vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
 506}
 507
 508static void
 509vhost_user_gpu_instance_init(Object *obj)
 510{
 511    VhostUserGPU *g = VHOST_USER_GPU(obj);
 512
 513    g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
 514    object_property_add_alias(obj, "chardev",
 515                              OBJECT(g->vhost), "chardev", &error_abort);
 516}
 517
 518static void
 519vhost_user_gpu_instance_finalize(Object *obj)
 520{
 521    VhostUserGPU *g = VHOST_USER_GPU(obj);
 522
 523    object_unref(OBJECT(g->vhost));
 524}
 525
 526static void
 527vhost_user_gpu_reset(VirtIODevice *vdev)
 528{
 529    VhostUserGPU *g = VHOST_USER_GPU(vdev);
 530
 531    virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
 532
 533    vhost_user_backend_stop(g->vhost);
 534}
 535
 536static int
 537vhost_user_gpu_config_change(struct vhost_dev *dev)
 538{
 539    error_report("vhost-user-gpu: unhandled backend config change");
 540    return -1;
 541}
 542
 543static const VhostDevConfigOps config_ops = {
 544    .vhost_dev_config_notifier = vhost_user_gpu_config_change,
 545};
 546
 547static void
 548vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
 549{
 550    VhostUserGPU *g = VHOST_USER_GPU(qdev);
 551    VirtIODevice *vdev = VIRTIO_DEVICE(g);
 552
 553    vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
 554    if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
 555        return;
 556    }
 557
 558    if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
 559        g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
 560    }
 561
 562    if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
 563        return;
 564    }
 565
 566    g->vhost_gpu_fd = -1;
 567}
 568
 569static Property vhost_user_gpu_properties[] = {
 570    VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
 571    DEFINE_PROP_END_OF_LIST(),
 572};
 573
 574static void
 575vhost_user_gpu_class_init(ObjectClass *klass, void *data)
 576{
 577    DeviceClass *dc = DEVICE_CLASS(klass);
 578    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
 579    VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
 580
 581    vgc->gl_unblock = vhost_user_gpu_gl_unblock;
 582
 583    vdc->realize = vhost_user_gpu_device_realize;
 584    vdc->reset = vhost_user_gpu_reset;
 585    vdc->set_status   = vhost_user_gpu_set_status;
 586    vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
 587    vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
 588    vdc->get_config = vhost_user_gpu_get_config;
 589    vdc->set_config = vhost_user_gpu_set_config;
 590
 591    dc->props = vhost_user_gpu_properties;
 592}
 593
 594static const TypeInfo vhost_user_gpu_info = {
 595    .name = TYPE_VHOST_USER_GPU,
 596    .parent = TYPE_VIRTIO_GPU_BASE,
 597    .instance_size = sizeof(VhostUserGPU),
 598    .instance_init = vhost_user_gpu_instance_init,
 599    .instance_finalize = vhost_user_gpu_instance_finalize,
 600    .class_init = vhost_user_gpu_class_init,
 601};
 602
 603static void vhost_user_gpu_register_types(void)
 604{
 605    type_register_static(&vhost_user_gpu_info);
 606}
 607
 608type_init(vhost_user_gpu_register_types)
 609