qemu/hw/virtio/vhost-user.c
<<
>>
Prefs
   1/*
   2 * vhost-user
   3 *
   4 * Copyright (c) 2013 Virtual Open Systems Sarl.
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include "qemu/osdep.h"
  12#include "qapi/error.h"
  13#include "hw/virtio/vhost.h"
  14#include "hw/virtio/vhost-user.h"
  15#include "hw/virtio/vhost-backend.h"
  16#include "hw/virtio/virtio.h"
  17#include "hw/virtio/virtio-net.h"
  18#include "chardev/char-fe.h"
  19#include "io/channel-socket.h"
  20#include "sysemu/kvm.h"
  21#include "qemu/error-report.h"
  22#include "qemu/main-loop.h"
  23#include "qemu/sockets.h"
  24#include "sysemu/cryptodev.h"
  25#include "migration/migration.h"
  26#include "migration/postcopy-ram.h"
  27#include "trace.h"
  28
  29#include <sys/ioctl.h>
  30#include <sys/socket.h>
  31#include <sys/un.h>
  32
  33#include "standard-headers/linux/vhost_types.h"
  34
  35#ifdef CONFIG_LINUX
  36#include <linux/userfaultfd.h>
  37#endif
  38
  39#define VHOST_MEMORY_BASELINE_NREGIONS    8
  40#define VHOST_USER_F_PROTOCOL_FEATURES 30
  41#define VHOST_USER_SLAVE_MAX_FDS     8
  42
  43/*
  44 * Set maximum number of RAM slots supported to
  45 * the maximum number supported by the target
  46 * hardware plaform.
  47 */
  48#if defined(TARGET_X86) || defined(TARGET_X86_64) || \
  49    defined(TARGET_ARM) || defined(TARGET_ARM_64)
  50#include "hw/acpi/acpi.h"
  51#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
  52
  53#elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
  54#include "hw/ppc/spapr.h"
  55#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
  56
  57#else
  58#define VHOST_USER_MAX_RAM_SLOTS 512
  59#endif
  60
  61/*
  62 * Maximum size of virtio device config space
  63 */
  64#define VHOST_USER_MAX_CONFIG_SIZE 256
  65
  66enum VhostUserProtocolFeature {
  67    VHOST_USER_PROTOCOL_F_MQ = 0,
  68    VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
  69    VHOST_USER_PROTOCOL_F_RARP = 2,
  70    VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
  71    VHOST_USER_PROTOCOL_F_NET_MTU = 4,
  72    VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
  73    VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
  74    VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
  75    VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
  76    VHOST_USER_PROTOCOL_F_CONFIG = 9,
  77    VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
  78    VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
  79    VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
  80    VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
  81    /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
  82    VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
  83    VHOST_USER_PROTOCOL_F_MAX
  84};
  85
  86#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
  87
  88typedef enum VhostUserRequest {
  89    VHOST_USER_NONE = 0,
  90    VHOST_USER_GET_FEATURES = 1,
  91    VHOST_USER_SET_FEATURES = 2,
  92    VHOST_USER_SET_OWNER = 3,
  93    VHOST_USER_RESET_OWNER = 4,
  94    VHOST_USER_SET_MEM_TABLE = 5,
  95    VHOST_USER_SET_LOG_BASE = 6,
  96    VHOST_USER_SET_LOG_FD = 7,
  97    VHOST_USER_SET_VRING_NUM = 8,
  98    VHOST_USER_SET_VRING_ADDR = 9,
  99    VHOST_USER_SET_VRING_BASE = 10,
 100    VHOST_USER_GET_VRING_BASE = 11,
 101    VHOST_USER_SET_VRING_KICK = 12,
 102    VHOST_USER_SET_VRING_CALL = 13,
 103    VHOST_USER_SET_VRING_ERR = 14,
 104    VHOST_USER_GET_PROTOCOL_FEATURES = 15,
 105    VHOST_USER_SET_PROTOCOL_FEATURES = 16,
 106    VHOST_USER_GET_QUEUE_NUM = 17,
 107    VHOST_USER_SET_VRING_ENABLE = 18,
 108    VHOST_USER_SEND_RARP = 19,
 109    VHOST_USER_NET_SET_MTU = 20,
 110    VHOST_USER_SET_SLAVE_REQ_FD = 21,
 111    VHOST_USER_IOTLB_MSG = 22,
 112    VHOST_USER_SET_VRING_ENDIAN = 23,
 113    VHOST_USER_GET_CONFIG = 24,
 114    VHOST_USER_SET_CONFIG = 25,
 115    VHOST_USER_CREATE_CRYPTO_SESSION = 26,
 116    VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
 117    VHOST_USER_POSTCOPY_ADVISE  = 28,
 118    VHOST_USER_POSTCOPY_LISTEN  = 29,
 119    VHOST_USER_POSTCOPY_END     = 30,
 120    VHOST_USER_GET_INFLIGHT_FD = 31,
 121    VHOST_USER_SET_INFLIGHT_FD = 32,
 122    VHOST_USER_GPU_SET_SOCKET = 33,
 123    VHOST_USER_RESET_DEVICE = 34,
 124    /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
 125    VHOST_USER_GET_MAX_MEM_SLOTS = 36,
 126    VHOST_USER_ADD_MEM_REG = 37,
 127    VHOST_USER_REM_MEM_REG = 38,
 128    VHOST_USER_MAX
 129} VhostUserRequest;
 130
 131typedef enum VhostUserSlaveRequest {
 132    VHOST_USER_SLAVE_NONE = 0,
 133    VHOST_USER_SLAVE_IOTLB_MSG = 1,
 134    VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
 135    VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
 136    VHOST_USER_SLAVE_MAX
 137}  VhostUserSlaveRequest;
 138
 139typedef struct VhostUserMemoryRegion {
 140    uint64_t guest_phys_addr;
 141    uint64_t memory_size;
 142    uint64_t userspace_addr;
 143    uint64_t mmap_offset;
 144} VhostUserMemoryRegion;
 145
 146typedef struct VhostUserMemory {
 147    uint32_t nregions;
 148    uint32_t padding;
 149    VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
 150} VhostUserMemory;
 151
 152typedef struct VhostUserMemRegMsg {
 153    uint64_t padding;
 154    VhostUserMemoryRegion region;
 155} VhostUserMemRegMsg;
 156
 157typedef struct VhostUserLog {
 158    uint64_t mmap_size;
 159    uint64_t mmap_offset;
 160} VhostUserLog;
 161
 162typedef struct VhostUserConfig {
 163    uint32_t offset;
 164    uint32_t size;
 165    uint32_t flags;
 166    uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
 167} VhostUserConfig;
 168
 169#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN    512
 170#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN  64
 171
 172typedef struct VhostUserCryptoSession {
 173    /* session id for success, -1 on errors */
 174    int64_t session_id;
 175    CryptoDevBackendSymSessionInfo session_setup_data;
 176    uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
 177    uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
 178} VhostUserCryptoSession;
 179
 180static VhostUserConfig c __attribute__ ((unused));
 181#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
 182                                   + sizeof(c.size) \
 183                                   + sizeof(c.flags))
 184
 185typedef struct VhostUserVringArea {
 186    uint64_t u64;
 187    uint64_t size;
 188    uint64_t offset;
 189} VhostUserVringArea;
 190
 191typedef struct VhostUserInflight {
 192    uint64_t mmap_size;
 193    uint64_t mmap_offset;
 194    uint16_t num_queues;
 195    uint16_t queue_size;
 196} VhostUserInflight;
 197
 198typedef struct {
 199    VhostUserRequest request;
 200
 201#define VHOST_USER_VERSION_MASK     (0x3)
 202#define VHOST_USER_REPLY_MASK       (0x1<<2)
 203#define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
 204    uint32_t flags;
 205    uint32_t size; /* the following payload size */
 206} QEMU_PACKED VhostUserHeader;
 207
 208typedef union {
 209#define VHOST_USER_VRING_IDX_MASK   (0xff)
 210#define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
 211        uint64_t u64;
 212        struct vhost_vring_state state;
 213        struct vhost_vring_addr addr;
 214        VhostUserMemory memory;
 215        VhostUserMemRegMsg mem_reg;
 216        VhostUserLog log;
 217        struct vhost_iotlb_msg iotlb;
 218        VhostUserConfig config;
 219        VhostUserCryptoSession session;
 220        VhostUserVringArea area;
 221        VhostUserInflight inflight;
 222} VhostUserPayload;
 223
 224typedef struct VhostUserMsg {
 225    VhostUserHeader hdr;
 226    VhostUserPayload payload;
 227} QEMU_PACKED VhostUserMsg;
 228
 229static VhostUserMsg m __attribute__ ((unused));
 230#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
 231
 232#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
 233
 234/* The version of the protocol we support */
 235#define VHOST_USER_VERSION    (0x1)
 236
 237struct vhost_user {
 238    struct vhost_dev *dev;
 239    /* Shared between vhost devs of the same virtio device */
 240    VhostUserState *user;
 241    QIOChannel *slave_ioc;
 242    GSource *slave_src;
 243    NotifierWithReturn postcopy_notifier;
 244    struct PostCopyFD  postcopy_fd;
 245    uint64_t           postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
 246    /* Length of the region_rb and region_rb_offset arrays */
 247    size_t             region_rb_len;
 248    /* RAMBlock associated with a given region */
 249    RAMBlock         **region_rb;
 250    /* The offset from the start of the RAMBlock to the start of the
 251     * vhost region.
 252     */
 253    ram_addr_t        *region_rb_offset;
 254
 255    /* True once we've entered postcopy_listen */
 256    bool               postcopy_listen;
 257
 258    /* Our current regions */
 259    int num_shadow_regions;
 260    struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
 261};
 262
 263struct scrub_regions {
 264    struct vhost_memory_region *region;
 265    int reg_idx;
 266    int fd_idx;
 267};
 268
 269static bool ioeventfd_enabled(void)
 270{
 271    return !kvm_enabled() || kvm_eventfds_enabled();
 272}
 273
 274static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
 275{
 276    struct vhost_user *u = dev->opaque;
 277    CharBackend *chr = u->user->chr;
 278    uint8_t *p = (uint8_t *) msg;
 279    int r, size = VHOST_USER_HDR_SIZE;
 280
 281    r = qemu_chr_fe_read_all(chr, p, size);
 282    if (r != size) {
 283        error_report("Failed to read msg header. Read %d instead of %d."
 284                     " Original request %d.", r, size, msg->hdr.request);
 285        return -1;
 286    }
 287
 288    /* validate received flags */
 289    if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
 290        error_report("Failed to read msg header."
 291                " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
 292                VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
 293        return -1;
 294    }
 295
 296    return 0;
 297}
 298
 299struct vhost_user_read_cb_data {
 300    struct vhost_dev *dev;
 301    VhostUserMsg *msg;
 302    GMainLoop *loop;
 303    int ret;
 304};
 305
 306static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
 307                                   gpointer opaque)
 308{
 309    struct vhost_user_read_cb_data *data = opaque;
 310    struct vhost_dev *dev = data->dev;
 311    VhostUserMsg *msg = data->msg;
 312    struct vhost_user *u = dev->opaque;
 313    CharBackend *chr = u->user->chr;
 314    uint8_t *p = (uint8_t *) msg;
 315    int r, size;
 316
 317    if (vhost_user_read_header(dev, msg) < 0) {
 318        data->ret = -1;
 319        goto end;
 320    }
 321
 322    /* validate message size is sane */
 323    if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
 324        error_report("Failed to read msg header."
 325                " Size %d exceeds the maximum %zu.", msg->hdr.size,
 326                VHOST_USER_PAYLOAD_SIZE);
 327        data->ret = -1;
 328        goto end;
 329    }
 330
 331    if (msg->hdr.size) {
 332        p += VHOST_USER_HDR_SIZE;
 333        size = msg->hdr.size;
 334        r = qemu_chr_fe_read_all(chr, p, size);
 335        if (r != size) {
 336            error_report("Failed to read msg payload."
 337                         " Read %d instead of %d.", r, msg->hdr.size);
 338            data->ret = -1;
 339            goto end;
 340        }
 341    }
 342
 343end:
 344    g_main_loop_quit(data->loop);
 345    return G_SOURCE_REMOVE;
 346}
 347
 348static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
 349                           gpointer opaque);
 350
 351/*
 352 * This updates the read handler to use a new event loop context.
 353 * Event sources are removed from the previous context : this ensures
 354 * that events detected in the previous context are purged. They will
 355 * be re-detected and processed in the new context.
 356 */
 357static void slave_update_read_handler(struct vhost_dev *dev,
 358                                      GMainContext *ctxt)
 359{
 360    struct vhost_user *u = dev->opaque;
 361
 362    if (!u->slave_ioc) {
 363        return;
 364    }
 365
 366    if (u->slave_src) {
 367        g_source_destroy(u->slave_src);
 368        g_source_unref(u->slave_src);
 369    }
 370
 371    u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
 372                                                G_IO_IN | G_IO_HUP,
 373                                                slave_read, dev, NULL,
 374                                                ctxt);
 375}
 376
 377static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
 378{
 379    struct vhost_user *u = dev->opaque;
 380    CharBackend *chr = u->user->chr;
 381    GMainContext *prev_ctxt = chr->chr->gcontext;
 382    GMainContext *ctxt = g_main_context_new();
 383    GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
 384    struct vhost_user_read_cb_data data = {
 385        .dev = dev,
 386        .loop = loop,
 387        .msg = msg,
 388        .ret = 0
 389    };
 390
 391    /*
 392     * We want to be able to monitor the slave channel fd while waiting
 393     * for chr I/O. This requires an event loop, but we can't nest the
 394     * one to which chr is currently attached : its fd handlers might not
 395     * be prepared for re-entrancy. So we create a new one and switch chr
 396     * to use it.
 397     */
 398    slave_update_read_handler(dev, ctxt);
 399    qemu_chr_be_update_read_handlers(chr->chr, ctxt);
 400    qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
 401
 402    g_main_loop_run(loop);
 403
 404    /*
 405     * Restore the previous event loop context. This also destroys/recreates
 406     * event sources : this guarantees that all pending events in the original
 407     * context that have been processed by the nested loop are purged.
 408     */
 409    qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
 410    slave_update_read_handler(dev, NULL);
 411
 412    g_main_loop_unref(loop);
 413    g_main_context_unref(ctxt);
 414
 415    return data.ret;
 416}
 417
 418static int process_message_reply(struct vhost_dev *dev,
 419                                 const VhostUserMsg *msg)
 420{
 421    VhostUserMsg msg_reply;
 422
 423    if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
 424        return 0;
 425    }
 426
 427    if (vhost_user_read(dev, &msg_reply) < 0) {
 428        return -1;
 429    }
 430
 431    if (msg_reply.hdr.request != msg->hdr.request) {
 432        error_report("Received unexpected msg type."
 433                     "Expected %d received %d",
 434                     msg->hdr.request, msg_reply.hdr.request);
 435        return -1;
 436    }
 437
 438    return msg_reply.payload.u64 ? -1 : 0;
 439}
 440
 441static bool vhost_user_one_time_request(VhostUserRequest request)
 442{
 443    switch (request) {
 444    case VHOST_USER_SET_OWNER:
 445    case VHOST_USER_RESET_OWNER:
 446    case VHOST_USER_SET_MEM_TABLE:
 447    case VHOST_USER_GET_QUEUE_NUM:
 448    case VHOST_USER_NET_SET_MTU:
 449        return true;
 450    default:
 451        return false;
 452    }
 453}
 454
 455/* most non-init callers ignore the error */
 456static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
 457                            int *fds, int fd_num)
 458{
 459    struct vhost_user *u = dev->opaque;
 460    CharBackend *chr = u->user->chr;
 461    int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
 462
 463    /*
 464     * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
 465     * we just need send it once in the first time. For later such
 466     * request, we just ignore it.
 467     */
 468    if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
 469        msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
 470        return 0;
 471    }
 472
 473    if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
 474        error_report("Failed to set msg fds.");
 475        return -1;
 476    }
 477
 478    ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
 479    if (ret != size) {
 480        error_report("Failed to write msg."
 481                     " Wrote %d instead of %d.", ret, size);
 482        return -1;
 483    }
 484
 485    return 0;
 486}
 487
 488int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
 489{
 490    VhostUserMsg msg = {
 491        .hdr.request = VHOST_USER_GPU_SET_SOCKET,
 492        .hdr.flags = VHOST_USER_VERSION,
 493    };
 494
 495    return vhost_user_write(dev, &msg, &fd, 1);
 496}
 497
 498static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
 499                                   struct vhost_log *log)
 500{
 501    int fds[VHOST_USER_MAX_RAM_SLOTS];
 502    size_t fd_num = 0;
 503    bool shmfd = virtio_has_feature(dev->protocol_features,
 504                                    VHOST_USER_PROTOCOL_F_LOG_SHMFD);
 505    VhostUserMsg msg = {
 506        .hdr.request = VHOST_USER_SET_LOG_BASE,
 507        .hdr.flags = VHOST_USER_VERSION,
 508        .payload.log.mmap_size = log->size * sizeof(*(log->log)),
 509        .payload.log.mmap_offset = 0,
 510        .hdr.size = sizeof(msg.payload.log),
 511    };
 512
 513    if (shmfd && log->fd != -1) {
 514        fds[fd_num++] = log->fd;
 515    }
 516
 517    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
 518        return -1;
 519    }
 520
 521    if (shmfd) {
 522        msg.hdr.size = 0;
 523        if (vhost_user_read(dev, &msg) < 0) {
 524            return -1;
 525        }
 526
 527        if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
 528            error_report("Received unexpected msg type. "
 529                         "Expected %d received %d",
 530                         VHOST_USER_SET_LOG_BASE, msg.hdr.request);
 531            return -1;
 532        }
 533    }
 534
 535    return 0;
 536}
 537
 538static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
 539                                            int *fd)
 540{
 541    MemoryRegion *mr;
 542
 543    assert((uintptr_t)addr == addr);
 544    mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
 545    *fd = memory_region_get_fd(mr);
 546
 547    return mr;
 548}
 549
 550static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
 551                                       struct vhost_memory_region *src,
 552                                       uint64_t mmap_offset)
 553{
 554    assert(src != NULL && dst != NULL);
 555    dst->userspace_addr = src->userspace_addr;
 556    dst->memory_size = src->memory_size;
 557    dst->guest_phys_addr = src->guest_phys_addr;
 558    dst->mmap_offset = mmap_offset;
 559}
 560
 561static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
 562                                             struct vhost_dev *dev,
 563                                             VhostUserMsg *msg,
 564                                             int *fds, size_t *fd_num,
 565                                             bool track_ramblocks)
 566{
 567    int i, fd;
 568    ram_addr_t offset;
 569    MemoryRegion *mr;
 570    struct vhost_memory_region *reg;
 571    VhostUserMemoryRegion region_buffer;
 572
 573    msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
 574
 575    for (i = 0; i < dev->mem->nregions; ++i) {
 576        reg = dev->mem->regions + i;
 577
 578        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
 579        if (fd > 0) {
 580            if (track_ramblocks) {
 581                assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
 582                trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
 583                                                      reg->memory_size,
 584                                                      reg->guest_phys_addr,
 585                                                      reg->userspace_addr,
 586                                                      offset);
 587                u->region_rb_offset[i] = offset;
 588                u->region_rb[i] = mr->ram_block;
 589            } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
 590                error_report("Failed preparing vhost-user memory table msg");
 591                return -1;
 592            }
 593            vhost_user_fill_msg_region(&region_buffer, reg, offset);
 594            msg->payload.memory.regions[*fd_num] = region_buffer;
 595            fds[(*fd_num)++] = fd;
 596        } else if (track_ramblocks) {
 597            u->region_rb_offset[i] = 0;
 598            u->region_rb[i] = NULL;
 599        }
 600    }
 601
 602    msg->payload.memory.nregions = *fd_num;
 603
 604    if (!*fd_num) {
 605        error_report("Failed initializing vhost-user memory map, "
 606                     "consider using -object memory-backend-file share=on");
 607        return -1;
 608    }
 609
 610    msg->hdr.size = sizeof(msg->payload.memory.nregions);
 611    msg->hdr.size += sizeof(msg->payload.memory.padding);
 612    msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
 613
 614    return 1;
 615}
 616
 617static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
 618                             struct vhost_memory_region *vdev_reg)
 619{
 620    return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
 621        shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
 622        shadow_reg->memory_size == vdev_reg->memory_size;
 623}
 624
 625static void scrub_shadow_regions(struct vhost_dev *dev,
 626                                 struct scrub_regions *add_reg,
 627                                 int *nr_add_reg,
 628                                 struct scrub_regions *rem_reg,
 629                                 int *nr_rem_reg, uint64_t *shadow_pcb,
 630                                 bool track_ramblocks)
 631{
 632    struct vhost_user *u = dev->opaque;
 633    bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
 634    struct vhost_memory_region *reg, *shadow_reg;
 635    int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
 636    ram_addr_t offset;
 637    MemoryRegion *mr;
 638    bool matching;
 639
 640    /*
 641     * Find memory regions present in our shadow state which are not in
 642     * the device's current memory state.
 643     *
 644     * Mark regions in both the shadow and device state as "found".
 645     */
 646    for (i = 0; i < u->num_shadow_regions; i++) {
 647        shadow_reg = &u->shadow_regions[i];
 648        matching = false;
 649
 650        for (j = 0; j < dev->mem->nregions; j++) {
 651            reg = &dev->mem->regions[j];
 652
 653            mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
 654
 655            if (reg_equal(shadow_reg, reg)) {
 656                matching = true;
 657                found[j] = true;
 658                if (track_ramblocks) {
 659                    /*
 660                     * Reset postcopy client bases, region_rb, and
 661                     * region_rb_offset in case regions are removed.
 662                     */
 663                    if (fd > 0) {
 664                        u->region_rb_offset[j] = offset;
 665                        u->region_rb[j] = mr->ram_block;
 666                        shadow_pcb[j] = u->postcopy_client_bases[i];
 667                    } else {
 668                        u->region_rb_offset[j] = 0;
 669                        u->region_rb[j] = NULL;
 670                    }
 671                }
 672                break;
 673            }
 674        }
 675
 676        /*
 677         * If the region was not found in the current device memory state
 678         * create an entry for it in the removed list.
 679         */
 680        if (!matching) {
 681            rem_reg[rm_idx].region = shadow_reg;
 682            rem_reg[rm_idx++].reg_idx = i;
 683        }
 684    }
 685
 686    /*
 687     * For regions not marked "found", create entries in the added list.
 688     *
 689     * Note their indexes in the device memory state and the indexes of their
 690     * file descriptors.
 691     */
 692    for (i = 0; i < dev->mem->nregions; i++) {
 693        reg = &dev->mem->regions[i];
 694        vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
 695        if (fd > 0) {
 696            ++fd_num;
 697        }
 698
 699        /*
 700         * If the region was in both the shadow and device state we don't
 701         * need to send a VHOST_USER_ADD_MEM_REG message for it.
 702         */
 703        if (found[i]) {
 704            continue;
 705        }
 706
 707        add_reg[add_idx].region = reg;
 708        add_reg[add_idx].reg_idx = i;
 709        add_reg[add_idx++].fd_idx = fd_num;
 710    }
 711    *nr_rem_reg = rm_idx;
 712    *nr_add_reg = add_idx;
 713
 714    return;
 715}
 716
 717static int send_remove_regions(struct vhost_dev *dev,
 718                               struct scrub_regions *remove_reg,
 719                               int nr_rem_reg, VhostUserMsg *msg,
 720                               bool reply_supported)
 721{
 722    struct vhost_user *u = dev->opaque;
 723    struct vhost_memory_region *shadow_reg;
 724    int i, fd, shadow_reg_idx, ret;
 725    ram_addr_t offset;
 726    VhostUserMemoryRegion region_buffer;
 727
 728    /*
 729     * The regions in remove_reg appear in the same order they do in the
 730     * shadow table. Therefore we can minimize memory copies by iterating
 731     * through remove_reg backwards.
 732     */
 733    for (i = nr_rem_reg - 1; i >= 0; i--) {
 734        shadow_reg = remove_reg[i].region;
 735        shadow_reg_idx = remove_reg[i].reg_idx;
 736
 737        vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
 738
 739        if (fd > 0) {
 740            msg->hdr.request = VHOST_USER_REM_MEM_REG;
 741            vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
 742            msg->payload.mem_reg.region = region_buffer;
 743
 744            if (vhost_user_write(dev, msg, &fd, 1) < 0) {
 745                return -1;
 746            }
 747
 748            if (reply_supported) {
 749                ret = process_message_reply(dev, msg);
 750                if (ret) {
 751                    return ret;
 752                }
 753            }
 754        }
 755
 756        /*
 757         * At this point we know the backend has unmapped the region. It is now
 758         * safe to remove it from the shadow table.
 759         */
 760        memmove(&u->shadow_regions[shadow_reg_idx],
 761                &u->shadow_regions[shadow_reg_idx + 1],
 762                sizeof(struct vhost_memory_region) *
 763                (u->num_shadow_regions - shadow_reg_idx - 1));
 764        u->num_shadow_regions--;
 765    }
 766
 767    return 0;
 768}
 769
 770static int send_add_regions(struct vhost_dev *dev,
 771                            struct scrub_regions *add_reg, int nr_add_reg,
 772                            VhostUserMsg *msg, uint64_t *shadow_pcb,
 773                            bool reply_supported, bool track_ramblocks)
 774{
 775    struct vhost_user *u = dev->opaque;
 776    int i, fd, ret, reg_idx, reg_fd_idx;
 777    struct vhost_memory_region *reg;
 778    MemoryRegion *mr;
 779    ram_addr_t offset;
 780    VhostUserMsg msg_reply;
 781    VhostUserMemoryRegion region_buffer;
 782
 783    for (i = 0; i < nr_add_reg; i++) {
 784        reg = add_reg[i].region;
 785        reg_idx = add_reg[i].reg_idx;
 786        reg_fd_idx = add_reg[i].fd_idx;
 787
 788        mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
 789
 790        if (fd > 0) {
 791            if (track_ramblocks) {
 792                trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
 793                                                      reg->memory_size,
 794                                                      reg->guest_phys_addr,
 795                                                      reg->userspace_addr,
 796                                                      offset);
 797                u->region_rb_offset[reg_idx] = offset;
 798                u->region_rb[reg_idx] = mr->ram_block;
 799            }
 800            msg->hdr.request = VHOST_USER_ADD_MEM_REG;
 801            vhost_user_fill_msg_region(&region_buffer, reg, offset);
 802            msg->payload.mem_reg.region = region_buffer;
 803
 804            if (vhost_user_write(dev, msg, &fd, 1) < 0) {
 805                return -1;
 806            }
 807
 808            if (track_ramblocks) {
 809                uint64_t reply_gpa;
 810
 811                if (vhost_user_read(dev, &msg_reply) < 0) {
 812                    return -1;
 813                }
 814
 815                reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
 816
 817                if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
 818                    error_report("%s: Received unexpected msg type."
 819                                 "Expected %d received %d", __func__,
 820                                 VHOST_USER_ADD_MEM_REG,
 821                                 msg_reply.hdr.request);
 822                    return -1;
 823                }
 824
 825                /*
 826                 * We're using the same structure, just reusing one of the
 827                 * fields, so it should be the same size.
 828                 */
 829                if (msg_reply.hdr.size != msg->hdr.size) {
 830                    error_report("%s: Unexpected size for postcopy reply "
 831                                 "%d vs %d", __func__, msg_reply.hdr.size,
 832                                 msg->hdr.size);
 833                    return -1;
 834                }
 835
 836                /* Get the postcopy client base from the backend's reply. */
 837                if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
 838                    shadow_pcb[reg_idx] =
 839                        msg_reply.payload.mem_reg.region.userspace_addr;
 840                    trace_vhost_user_set_mem_table_postcopy(
 841                        msg_reply.payload.mem_reg.region.userspace_addr,
 842                        msg->payload.mem_reg.region.userspace_addr,
 843                        reg_fd_idx, reg_idx);
 844                } else {
 845                    error_report("%s: invalid postcopy reply for region. "
 846                                 "Got guest physical address %" PRIX64 ", expected "
 847                                 "%" PRIX64, __func__, reply_gpa,
 848                                 dev->mem->regions[reg_idx].guest_phys_addr);
 849                    return -1;
 850                }
 851            } else if (reply_supported) {
 852                ret = process_message_reply(dev, msg);
 853                if (ret) {
 854                    return ret;
 855                }
 856            }
 857        } else if (track_ramblocks) {
 858            u->region_rb_offset[reg_idx] = 0;
 859            u->region_rb[reg_idx] = NULL;
 860        }
 861
 862        /*
 863         * At this point, we know the backend has mapped in the new
 864         * region, if the region has a valid file descriptor.
 865         *
 866         * The region should now be added to the shadow table.
 867         */
 868        u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
 869            reg->guest_phys_addr;
 870        u->shadow_regions[u->num_shadow_regions].userspace_addr =
 871            reg->userspace_addr;
 872        u->shadow_regions[u->num_shadow_regions].memory_size =
 873            reg->memory_size;
 874        u->num_shadow_regions++;
 875    }
 876
 877    return 0;
 878}
 879
 880static int vhost_user_add_remove_regions(struct vhost_dev *dev,
 881                                         VhostUserMsg *msg,
 882                                         bool reply_supported,
 883                                         bool track_ramblocks)
 884{
 885    struct vhost_user *u = dev->opaque;
 886    struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
 887    struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
 888    uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
 889    int nr_add_reg, nr_rem_reg;
 890
 891    msg->hdr.size = sizeof(msg->payload.mem_reg);
 892
 893    /* Find the regions which need to be removed or added. */
 894    scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
 895                         shadow_pcb, track_ramblocks);
 896
 897    if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
 898                reply_supported) < 0)
 899    {
 900        goto err;
 901    }
 902
 903    if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
 904                shadow_pcb, reply_supported, track_ramblocks) < 0)
 905    {
 906        goto err;
 907    }
 908
 909    if (track_ramblocks) {
 910        memcpy(u->postcopy_client_bases, shadow_pcb,
 911               sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
 912        /*
 913         * Now we've registered this with the postcopy code, we ack to the
 914         * client, because now we're in the position to be able to deal with
 915         * any faults it generates.
 916         */
 917        /* TODO: Use this for failure cases as well with a bad value. */
 918        msg->hdr.size = sizeof(msg->payload.u64);
 919        msg->payload.u64 = 0; /* OK */
 920
 921        if (vhost_user_write(dev, msg, NULL, 0) < 0) {
 922            return -1;
 923        }
 924    }
 925
 926    return 0;
 927
 928err:
 929    if (track_ramblocks) {
 930        memcpy(u->postcopy_client_bases, shadow_pcb,
 931               sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
 932    }
 933
 934    return -1;
 935}
 936
 937static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
 938                                             struct vhost_memory *mem,
 939                                             bool reply_supported,
 940                                             bool config_mem_slots)
 941{
 942    struct vhost_user *u = dev->opaque;
 943    int fds[VHOST_MEMORY_BASELINE_NREGIONS];
 944    size_t fd_num = 0;
 945    VhostUserMsg msg_reply;
 946    int region_i, msg_i;
 947
 948    VhostUserMsg msg = {
 949        .hdr.flags = VHOST_USER_VERSION,
 950    };
 951
 952    if (u->region_rb_len < dev->mem->nregions) {
 953        u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
 954        u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
 955                                      dev->mem->nregions);
 956        memset(&(u->region_rb[u->region_rb_len]), '\0',
 957               sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
 958        memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
 959               sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
 960        u->region_rb_len = dev->mem->nregions;
 961    }
 962
 963    if (config_mem_slots) {
 964        if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
 965                                          true) < 0) {
 966            return -1;
 967        }
 968    } else {
 969        if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
 970                                              true) < 0) {
 971            return -1;
 972        }
 973
 974        if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
 975            return -1;
 976        }
 977
 978        if (vhost_user_read(dev, &msg_reply) < 0) {
 979            return -1;
 980        }
 981
 982        if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
 983            error_report("%s: Received unexpected msg type."
 984                         "Expected %d received %d", __func__,
 985                         VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
 986            return -1;
 987        }
 988
 989        /*
 990         * We're using the same structure, just reusing one of the
 991         * fields, so it should be the same size.
 992         */
 993        if (msg_reply.hdr.size != msg.hdr.size) {
 994            error_report("%s: Unexpected size for postcopy reply "
 995                         "%d vs %d", __func__, msg_reply.hdr.size,
 996                         msg.hdr.size);
 997            return -1;
 998        }
 999
1000        memset(u->postcopy_client_bases, 0,
1001               sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
1002
1003        /*
1004         * They're in the same order as the regions that were sent
1005         * but some of the regions were skipped (above) if they
1006         * didn't have fd's
1007         */
1008        for (msg_i = 0, region_i = 0;
1009             region_i < dev->mem->nregions;
1010             region_i++) {
1011            if (msg_i < fd_num &&
1012                msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
1013                dev->mem->regions[region_i].guest_phys_addr) {
1014                u->postcopy_client_bases[region_i] =
1015                    msg_reply.payload.memory.regions[msg_i].userspace_addr;
1016                trace_vhost_user_set_mem_table_postcopy(
1017                    msg_reply.payload.memory.regions[msg_i].userspace_addr,
1018                    msg.payload.memory.regions[msg_i].userspace_addr,
1019                    msg_i, region_i);
1020                msg_i++;
1021            }
1022        }
1023        if (msg_i != fd_num) {
1024            error_report("%s: postcopy reply not fully consumed "
1025                         "%d vs %zd",
1026                         __func__, msg_i, fd_num);
1027            return -1;
1028        }
1029
1030        /*
1031         * Now we've registered this with the postcopy code, we ack to the
1032         * client, because now we're in the position to be able to deal
1033         * with any faults it generates.
1034         */
1035        /* TODO: Use this for failure cases as well with a bad value. */
1036        msg.hdr.size = sizeof(msg.payload.u64);
1037        msg.payload.u64 = 0; /* OK */
1038        if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1039            return -1;
1040        }
1041    }
1042
1043    return 0;
1044}
1045
1046static int vhost_user_set_mem_table(struct vhost_dev *dev,
1047                                    struct vhost_memory *mem)
1048{
1049    struct vhost_user *u = dev->opaque;
1050    int fds[VHOST_MEMORY_BASELINE_NREGIONS];
1051    size_t fd_num = 0;
1052    bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
1053    bool reply_supported = virtio_has_feature(dev->protocol_features,
1054                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
1055    bool config_mem_slots =
1056        virtio_has_feature(dev->protocol_features,
1057                           VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
1058
1059    if (do_postcopy) {
1060        /*
1061         * Postcopy has enough differences that it's best done in it's own
1062         * version
1063         */
1064        return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1065                                                 config_mem_slots);
1066    }
1067
1068    VhostUserMsg msg = {
1069        .hdr.flags = VHOST_USER_VERSION,
1070    };
1071
1072    if (reply_supported) {
1073        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1074    }
1075
1076    if (config_mem_slots) {
1077        if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
1078                                          false) < 0) {
1079            return -1;
1080        }
1081    } else {
1082        if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1083                                              false) < 0) {
1084            return -1;
1085        }
1086        if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
1087            return -1;
1088        }
1089
1090        if (reply_supported) {
1091            return process_message_reply(dev, &msg);
1092        }
1093    }
1094
1095    return 0;
1096}
1097
1098static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1099                                     struct vhost_vring_addr *addr)
1100{
1101    VhostUserMsg msg = {
1102        .hdr.request = VHOST_USER_SET_VRING_ADDR,
1103        .hdr.flags = VHOST_USER_VERSION,
1104        .payload.addr = *addr,
1105        .hdr.size = sizeof(msg.payload.addr),
1106    };
1107
1108    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1109        return -1;
1110    }
1111
1112    return 0;
1113}
1114
1115static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1116                                       struct vhost_vring_state *ring)
1117{
1118    bool cross_endian = virtio_has_feature(dev->protocol_features,
1119                                           VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1120    VhostUserMsg msg = {
1121        .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1122        .hdr.flags = VHOST_USER_VERSION,
1123        .payload.state = *ring,
1124        .hdr.size = sizeof(msg.payload.state),
1125    };
1126
1127    if (!cross_endian) {
1128        error_report("vhost-user trying to send unhandled ioctl");
1129        return -1;
1130    }
1131
1132    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1133        return -1;
1134    }
1135
1136    return 0;
1137}
1138
1139static int vhost_set_vring(struct vhost_dev *dev,
1140                           unsigned long int request,
1141                           struct vhost_vring_state *ring)
1142{
1143    VhostUserMsg msg = {
1144        .hdr.request = request,
1145        .hdr.flags = VHOST_USER_VERSION,
1146        .payload.state = *ring,
1147        .hdr.size = sizeof(msg.payload.state),
1148    };
1149
1150    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1151        return -1;
1152    }
1153
1154    return 0;
1155}
1156
1157static int vhost_user_set_vring_num(struct vhost_dev *dev,
1158                                    struct vhost_vring_state *ring)
1159{
1160    return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
1161}
1162
1163static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
1164                                             int queue_idx)
1165{
1166    struct vhost_user *u = dev->opaque;
1167    VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
1168    VirtIODevice *vdev = dev->vdev;
1169
1170    if (n->addr && !n->set) {
1171        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
1172        n->set = true;
1173    }
1174}
1175
1176static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
1177                                            int queue_idx)
1178{
1179    struct vhost_user *u = dev->opaque;
1180    VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
1181    VirtIODevice *vdev = dev->vdev;
1182
1183    if (n->addr && n->set) {
1184        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
1185        n->set = false;
1186    }
1187}
1188
1189static int vhost_user_set_vring_base(struct vhost_dev *dev,
1190                                     struct vhost_vring_state *ring)
1191{
1192    vhost_user_host_notifier_restore(dev, ring->index);
1193
1194    return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
1195}
1196
1197static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1198{
1199    int i;
1200
1201    if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1202        return -1;
1203    }
1204
1205    for (i = 0; i < dev->nvqs; ++i) {
1206        struct vhost_vring_state state = {
1207            .index = dev->vq_index + i,
1208            .num   = enable,
1209        };
1210
1211        vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
1212    }
1213
1214    return 0;
1215}
1216
1217static int vhost_user_get_vring_base(struct vhost_dev *dev,
1218                                     struct vhost_vring_state *ring)
1219{
1220    VhostUserMsg msg = {
1221        .hdr.request = VHOST_USER_GET_VRING_BASE,
1222        .hdr.flags = VHOST_USER_VERSION,
1223        .payload.state = *ring,
1224        .hdr.size = sizeof(msg.payload.state),
1225    };
1226
1227    vhost_user_host_notifier_remove(dev, ring->index);
1228
1229    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1230        return -1;
1231    }
1232
1233    if (vhost_user_read(dev, &msg) < 0) {
1234        return -1;
1235    }
1236
1237    if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
1238        error_report("Received unexpected msg type. Expected %d received %d",
1239                     VHOST_USER_GET_VRING_BASE, msg.hdr.request);
1240        return -1;
1241    }
1242
1243    if (msg.hdr.size != sizeof(msg.payload.state)) {
1244        error_report("Received bad msg size.");
1245        return -1;
1246    }
1247
1248    *ring = msg.payload.state;
1249
1250    return 0;
1251}
1252
1253static int vhost_set_vring_file(struct vhost_dev *dev,
1254                                VhostUserRequest request,
1255                                struct vhost_vring_file *file)
1256{
1257    int fds[VHOST_USER_MAX_RAM_SLOTS];
1258    size_t fd_num = 0;
1259    VhostUserMsg msg = {
1260        .hdr.request = request,
1261        .hdr.flags = VHOST_USER_VERSION,
1262        .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
1263        .hdr.size = sizeof(msg.payload.u64),
1264    };
1265
1266    if (ioeventfd_enabled() && file->fd > 0) {
1267        fds[fd_num++] = file->fd;
1268    } else {
1269        msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
1270    }
1271
1272    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
1273        return -1;
1274    }
1275
1276    return 0;
1277}
1278
1279static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1280                                     struct vhost_vring_file *file)
1281{
1282    return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1283}
1284
1285static int vhost_user_set_vring_call(struct vhost_dev *dev,
1286                                     struct vhost_vring_file *file)
1287{
1288    return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1289}
1290
1291static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
1292{
1293    VhostUserMsg msg = {
1294        .hdr.request = request,
1295        .hdr.flags = VHOST_USER_VERSION,
1296        .payload.u64 = u64,
1297        .hdr.size = sizeof(msg.payload.u64),
1298    };
1299
1300    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1301        return -1;
1302    }
1303
1304    return 0;
1305}
1306
1307static int vhost_user_set_features(struct vhost_dev *dev,
1308                                   uint64_t features)
1309{
1310    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
1311}
1312
1313static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1314                                            uint64_t features)
1315{
1316    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
1317}
1318
1319static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1320{
1321    VhostUserMsg msg = {
1322        .hdr.request = request,
1323        .hdr.flags = VHOST_USER_VERSION,
1324    };
1325
1326    if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
1327        return 0;
1328    }
1329
1330    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1331        return -1;
1332    }
1333
1334    if (vhost_user_read(dev, &msg) < 0) {
1335        return -1;
1336    }
1337
1338    if (msg.hdr.request != request) {
1339        error_report("Received unexpected msg type. Expected %d received %d",
1340                     request, msg.hdr.request);
1341        return -1;
1342    }
1343
1344    if (msg.hdr.size != sizeof(msg.payload.u64)) {
1345        error_report("Received bad msg size.");
1346        return -1;
1347    }
1348
1349    *u64 = msg.payload.u64;
1350
1351    return 0;
1352}
1353
1354static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1355{
1356    if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1357        return -EPROTO;
1358    }
1359
1360    return 0;
1361}
1362
1363static int vhost_user_set_owner(struct vhost_dev *dev)
1364{
1365    VhostUserMsg msg = {
1366        .hdr.request = VHOST_USER_SET_OWNER,
1367        .hdr.flags = VHOST_USER_VERSION,
1368    };
1369
1370    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1371        return -EPROTO;
1372    }
1373
1374    return 0;
1375}
1376
1377static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1378                                       uint64_t *max_memslots)
1379{
1380    uint64_t backend_max_memslots;
1381    int err;
1382
1383    err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1384                             &backend_max_memslots);
1385    if (err < 0) {
1386        return err;
1387    }
1388
1389    *max_memslots = backend_max_memslots;
1390
1391    return 0;
1392}
1393
1394static int vhost_user_reset_device(struct vhost_dev *dev)
1395{
1396    VhostUserMsg msg = {
1397        .hdr.flags = VHOST_USER_VERSION,
1398    };
1399
1400    msg.hdr.request = virtio_has_feature(dev->protocol_features,
1401                                         VHOST_USER_PROTOCOL_F_RESET_DEVICE)
1402        ? VHOST_USER_RESET_DEVICE
1403        : VHOST_USER_RESET_OWNER;
1404
1405    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1406        return -1;
1407    }
1408
1409    return 0;
1410}
1411
1412static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
1413{
1414    int ret = -1;
1415
1416    if (!dev->config_ops) {
1417        return -1;
1418    }
1419
1420    if (dev->config_ops->vhost_dev_config_notifier) {
1421        ret = dev->config_ops->vhost_dev_config_notifier(dev);
1422    }
1423
1424    return ret;
1425}
1426
1427static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
1428                                                       VhostUserVringArea *area,
1429                                                       int fd)
1430{
1431    int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
1432    size_t page_size = qemu_real_host_page_size;
1433    struct vhost_user *u = dev->opaque;
1434    VhostUserState *user = u->user;
1435    VirtIODevice *vdev = dev->vdev;
1436    VhostUserHostNotifier *n;
1437    void *addr;
1438    char *name;
1439
1440    if (!virtio_has_feature(dev->protocol_features,
1441                            VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1442        vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1443        return -1;
1444    }
1445
1446    n = &user->notifier[queue_idx];
1447
1448    if (n->addr) {
1449        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
1450        object_unparent(OBJECT(&n->mr));
1451        munmap(n->addr, page_size);
1452        n->addr = NULL;
1453    }
1454
1455    if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1456        return 0;
1457    }
1458
1459    /* Sanity check. */
1460    if (area->size != page_size) {
1461        return -1;
1462    }
1463
1464    addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1465                fd, area->offset);
1466    if (addr == MAP_FAILED) {
1467        return -1;
1468    }
1469
1470    name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1471                           user, queue_idx);
1472    memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1473                                      page_size, addr);
1474    g_free(name);
1475
1476    if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1477        munmap(addr, page_size);
1478        return -1;
1479    }
1480
1481    n->addr = addr;
1482    n->set = true;
1483
1484    return 0;
1485}
1486
1487static void close_slave_channel(struct vhost_user *u)
1488{
1489    g_source_destroy(u->slave_src);
1490    g_source_unref(u->slave_src);
1491    u->slave_src = NULL;
1492    object_unref(OBJECT(u->slave_ioc));
1493    u->slave_ioc = NULL;
1494}
1495
1496static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
1497                           gpointer opaque)
1498{
1499    struct vhost_dev *dev = opaque;
1500    struct vhost_user *u = dev->opaque;
1501    VhostUserHeader hdr = { 0, };
1502    VhostUserPayload payload = { 0, };
1503    Error *local_err = NULL;
1504    gboolean rc = G_SOURCE_CONTINUE;
1505    int ret = 0;
1506    struct iovec iov;
1507    g_autofree int *fd = NULL;
1508    size_t fdsize = 0;
1509    int i;
1510
1511    /* Read header */
1512    iov.iov_base = &hdr;
1513    iov.iov_len = VHOST_USER_HDR_SIZE;
1514
1515    if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1516        error_report_err(local_err);
1517        goto err;
1518    }
1519
1520    if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
1521        error_report("Failed to read msg header."
1522                " Size %d exceeds the maximum %zu.", hdr.size,
1523                VHOST_USER_PAYLOAD_SIZE);
1524        goto err;
1525    }
1526
1527    /* Read payload */
1528    if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1529        error_report_err(local_err);
1530        goto err;
1531    }
1532
1533    switch (hdr.request) {
1534    case VHOST_USER_SLAVE_IOTLB_MSG:
1535        ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
1536        break;
1537    case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1538        ret = vhost_user_slave_handle_config_change(dev);
1539        break;
1540    case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1541        ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
1542                                                          fd ? fd[0] : -1);
1543        break;
1544    default:
1545        error_report("Received unexpected msg type: %d.", hdr.request);
1546        ret = -EINVAL;
1547    }
1548
1549    /*
1550     * REPLY_ACK feature handling. Other reply types has to be managed
1551     * directly in their request handlers.
1552     */
1553    if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1554        struct iovec iovec[2];
1555
1556
1557        hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1558        hdr.flags |= VHOST_USER_REPLY_MASK;
1559
1560        payload.u64 = !!ret;
1561        hdr.size = sizeof(payload.u64);
1562
1563        iovec[0].iov_base = &hdr;
1564        iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1565        iovec[1].iov_base = &payload;
1566        iovec[1].iov_len = hdr.size;
1567
1568        if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) {
1569            error_report_err(local_err);
1570            goto err;
1571        }
1572    }
1573
1574    goto fdcleanup;
1575
1576err:
1577    close_slave_channel(u);
1578    rc = G_SOURCE_REMOVE;
1579
1580fdcleanup:
1581    if (fd) {
1582        for (i = 0; i < fdsize; i++) {
1583            close(fd[i]);
1584        }
1585    }
1586    return rc;
1587}
1588
1589static int vhost_setup_slave_channel(struct vhost_dev *dev)
1590{
1591    VhostUserMsg msg = {
1592        .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1593        .hdr.flags = VHOST_USER_VERSION,
1594    };
1595    struct vhost_user *u = dev->opaque;
1596    int sv[2], ret = 0;
1597    bool reply_supported = virtio_has_feature(dev->protocol_features,
1598                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
1599    Error *local_err = NULL;
1600    QIOChannel *ioc;
1601
1602    if (!virtio_has_feature(dev->protocol_features,
1603                            VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1604        return 0;
1605    }
1606
1607    if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1608        error_report("socketpair() failed");
1609        return -1;
1610    }
1611
1612    ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1613    if (!ioc) {
1614        error_report_err(local_err);
1615        return -1;
1616    }
1617    u->slave_ioc = ioc;
1618    slave_update_read_handler(dev, NULL);
1619
1620    if (reply_supported) {
1621        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1622    }
1623
1624    ret = vhost_user_write(dev, &msg, &sv[1], 1);
1625    if (ret) {
1626        goto out;
1627    }
1628
1629    if (reply_supported) {
1630        ret = process_message_reply(dev, &msg);
1631    }
1632
1633out:
1634    close(sv[1]);
1635    if (ret) {
1636        close_slave_channel(u);
1637    }
1638
1639    return ret;
1640}
1641
1642#ifdef CONFIG_LINUX
1643/*
1644 * Called back from the postcopy fault thread when a fault is received on our
1645 * ufd.
1646 * TODO: This is Linux specific
1647 */
1648static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1649                                             void *ufd)
1650{
1651    struct vhost_dev *dev = pcfd->data;
1652    struct vhost_user *u = dev->opaque;
1653    struct uffd_msg *msg = ufd;
1654    uint64_t faultaddr = msg->arg.pagefault.address;
1655    RAMBlock *rb = NULL;
1656    uint64_t rb_offset;
1657    int i;
1658
1659    trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1660                                            dev->mem->nregions);
1661    for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1662        trace_vhost_user_postcopy_fault_handler_loop(i,
1663                u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1664        if (faultaddr >= u->postcopy_client_bases[i]) {
1665            /* Ofset of the fault address in the vhost region */
1666            uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1667            if (region_offset < dev->mem->regions[i].memory_size) {
1668                rb_offset = region_offset + u->region_rb_offset[i];
1669                trace_vhost_user_postcopy_fault_handler_found(i,
1670                        region_offset, rb_offset);
1671                rb = u->region_rb[i];
1672                return postcopy_request_shared_page(pcfd, rb, faultaddr,
1673                                                    rb_offset);
1674            }
1675        }
1676    }
1677    error_report("%s: Failed to find region for fault %" PRIx64,
1678                 __func__, faultaddr);
1679    return -1;
1680}
1681
1682static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1683                                     uint64_t offset)
1684{
1685    struct vhost_dev *dev = pcfd->data;
1686    struct vhost_user *u = dev->opaque;
1687    int i;
1688
1689    trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1690
1691    if (!u) {
1692        return 0;
1693    }
1694    /* Translate the offset into an address in the clients address space */
1695    for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1696        if (u->region_rb[i] == rb &&
1697            offset >= u->region_rb_offset[i] &&
1698            offset < (u->region_rb_offset[i] +
1699                      dev->mem->regions[i].memory_size)) {
1700            uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1701                                   u->postcopy_client_bases[i];
1702            trace_vhost_user_postcopy_waker_found(client_addr);
1703            return postcopy_wake_shared(pcfd, client_addr, rb);
1704        }
1705    }
1706
1707    trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1708    return 0;
1709}
1710#endif
1711
1712/*
1713 * Called at the start of an inbound postcopy on reception of the
1714 * 'advise' command.
1715 */
1716static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1717{
1718#ifdef CONFIG_LINUX
1719    struct vhost_user *u = dev->opaque;
1720    CharBackend *chr = u->user->chr;
1721    int ufd;
1722    VhostUserMsg msg = {
1723        .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1724        .hdr.flags = VHOST_USER_VERSION,
1725    };
1726
1727    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1728        error_setg(errp, "Failed to send postcopy_advise to vhost");
1729        return -1;
1730    }
1731
1732    if (vhost_user_read(dev, &msg) < 0) {
1733        error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1734        return -1;
1735    }
1736
1737    if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1738        error_setg(errp, "Unexpected msg type. Expected %d received %d",
1739                     VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1740        return -1;
1741    }
1742
1743    if (msg.hdr.size) {
1744        error_setg(errp, "Received bad msg size.");
1745        return -1;
1746    }
1747    ufd = qemu_chr_fe_get_msgfd(chr);
1748    if (ufd < 0) {
1749        error_setg(errp, "%s: Failed to get ufd", __func__);
1750        return -1;
1751    }
1752    qemu_set_nonblock(ufd);
1753
1754    /* register ufd with userfault thread */
1755    u->postcopy_fd.fd = ufd;
1756    u->postcopy_fd.data = dev;
1757    u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
1758    u->postcopy_fd.waker = vhost_user_postcopy_waker;
1759    u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1760    postcopy_register_shared_ufd(&u->postcopy_fd);
1761    return 0;
1762#else
1763    error_setg(errp, "Postcopy not supported on non-Linux systems");
1764    return -1;
1765#endif
1766}
1767
1768/*
1769 * Called at the switch to postcopy on reception of the 'listen' command.
1770 */
1771static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1772{
1773    struct vhost_user *u = dev->opaque;
1774    int ret;
1775    VhostUserMsg msg = {
1776        .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1777        .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1778    };
1779    u->postcopy_listen = true;
1780    trace_vhost_user_postcopy_listen();
1781    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1782        error_setg(errp, "Failed to send postcopy_listen to vhost");
1783        return -1;
1784    }
1785
1786    ret = process_message_reply(dev, &msg);
1787    if (ret) {
1788        error_setg(errp, "Failed to receive reply to postcopy_listen");
1789        return ret;
1790    }
1791
1792    return 0;
1793}
1794
1795/*
1796 * Called at the end of postcopy
1797 */
1798static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1799{
1800    VhostUserMsg msg = {
1801        .hdr.request = VHOST_USER_POSTCOPY_END,
1802        .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1803    };
1804    int ret;
1805    struct vhost_user *u = dev->opaque;
1806
1807    trace_vhost_user_postcopy_end_entry();
1808    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1809        error_setg(errp, "Failed to send postcopy_end to vhost");
1810        return -1;
1811    }
1812
1813    ret = process_message_reply(dev, &msg);
1814    if (ret) {
1815        error_setg(errp, "Failed to receive reply to postcopy_end");
1816        return ret;
1817    }
1818    postcopy_unregister_shared_ufd(&u->postcopy_fd);
1819    close(u->postcopy_fd.fd);
1820    u->postcopy_fd.handler = NULL;
1821
1822    trace_vhost_user_postcopy_end_exit();
1823
1824    return 0;
1825}
1826
1827static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1828                                        void *opaque)
1829{
1830    struct PostcopyNotifyData *pnd = opaque;
1831    struct vhost_user *u = container_of(notifier, struct vhost_user,
1832                                         postcopy_notifier);
1833    struct vhost_dev *dev = u->dev;
1834
1835    switch (pnd->reason) {
1836    case POSTCOPY_NOTIFY_PROBE:
1837        if (!virtio_has_feature(dev->protocol_features,
1838                                VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1839            /* TODO: Get the device name into this error somehow */
1840            error_setg(pnd->errp,
1841                       "vhost-user backend not capable of postcopy");
1842            return -ENOENT;
1843        }
1844        break;
1845
1846    case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1847        return vhost_user_postcopy_advise(dev, pnd->errp);
1848
1849    case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1850        return vhost_user_postcopy_listen(dev, pnd->errp);
1851
1852    case POSTCOPY_NOTIFY_INBOUND_END:
1853        return vhost_user_postcopy_end(dev, pnd->errp);
1854
1855    default:
1856        /* We ignore notifications we don't know */
1857        break;
1858    }
1859
1860    return 0;
1861}
1862
1863static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
1864                                   Error **errp)
1865{
1866    uint64_t features, protocol_features, ram_slots;
1867    struct vhost_user *u;
1868    int err;
1869
1870    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1871
1872    u = g_new0(struct vhost_user, 1);
1873    u->user = opaque;
1874    u->dev = dev;
1875    dev->opaque = u;
1876
1877    err = vhost_user_get_features(dev, &features);
1878    if (err < 0) {
1879        return err;
1880    }
1881
1882    if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1883        dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1884
1885        err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
1886                                 &protocol_features);
1887        if (err < 0) {
1888            return -EPROTO;
1889        }
1890
1891        dev->protocol_features =
1892            protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
1893
1894        if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1895            /* Don't acknowledge CONFIG feature if device doesn't support it */
1896            dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1897        } else if (!(protocol_features &
1898                    (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
1899            error_setg(errp, "Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1900                       "but backend does not support it.");
1901            return -EINVAL;
1902        }
1903
1904        err = vhost_user_set_protocol_features(dev, dev->protocol_features);
1905        if (err < 0) {
1906            return -EPROTO;
1907        }
1908
1909        /* query the max queues we support if backend supports Multiple Queue */
1910        if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
1911            err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
1912                                     &dev->max_queues);
1913            if (err < 0) {
1914                return -EPROTO;
1915            }
1916        } else {
1917            dev->max_queues = 1;
1918        }
1919
1920        if (dev->num_queues && dev->max_queues < dev->num_queues) {
1921            error_setg(errp, "The maximum number of queues supported by the "
1922                       "backend is %" PRIu64, dev->max_queues);
1923            return -EINVAL;
1924        }
1925
1926        if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
1927                !(virtio_has_feature(dev->protocol_features,
1928                    VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
1929                 virtio_has_feature(dev->protocol_features,
1930                    VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
1931            error_setg(errp, "IOMMU support requires reply-ack and "
1932                       "slave-req protocol features.");
1933            return -EINVAL;
1934        }
1935
1936        /* get max memory regions if backend supports configurable RAM slots */
1937        if (!virtio_has_feature(dev->protocol_features,
1938                                VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
1939            u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
1940        } else {
1941            err = vhost_user_get_max_memslots(dev, &ram_slots);
1942            if (err < 0) {
1943                return -EPROTO;
1944            }
1945
1946            if (ram_slots < u->user->memory_slots) {
1947                error_setg(errp, "The backend specified a max ram slots limit "
1948                           "of %" PRIu64", when the prior validated limit was "
1949                           "%d. This limit should never decrease.", ram_slots,
1950                           u->user->memory_slots);
1951                return -EINVAL;
1952            }
1953
1954            u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
1955        }
1956    }
1957
1958    if (dev->migration_blocker == NULL &&
1959        !virtio_has_feature(dev->protocol_features,
1960                            VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
1961        error_setg(&dev->migration_blocker,
1962                   "Migration disabled: vhost-user backend lacks "
1963                   "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
1964    }
1965
1966    if (dev->vq_index == 0) {
1967        err = vhost_setup_slave_channel(dev);
1968        if (err < 0) {
1969            return -EPROTO;
1970        }
1971    }
1972
1973    u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
1974    postcopy_add_notifier(&u->postcopy_notifier);
1975
1976    return 0;
1977}
1978
1979static int vhost_user_backend_cleanup(struct vhost_dev *dev)
1980{
1981    struct vhost_user *u;
1982
1983    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1984
1985    u = dev->opaque;
1986    if (u->postcopy_notifier.notify) {
1987        postcopy_remove_notifier(&u->postcopy_notifier);
1988        u->postcopy_notifier.notify = NULL;
1989    }
1990    u->postcopy_listen = false;
1991    if (u->postcopy_fd.handler) {
1992        postcopy_unregister_shared_ufd(&u->postcopy_fd);
1993        close(u->postcopy_fd.fd);
1994        u->postcopy_fd.handler = NULL;
1995    }
1996    if (u->slave_ioc) {
1997        close_slave_channel(u);
1998    }
1999    g_free(u->region_rb);
2000    u->region_rb = NULL;
2001    g_free(u->region_rb_offset);
2002    u->region_rb_offset = NULL;
2003    u->region_rb_len = 0;
2004    g_free(u);
2005    dev->opaque = 0;
2006
2007    return 0;
2008}
2009
2010static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2011{
2012    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2013
2014    return idx;
2015}
2016
2017static int vhost_user_memslots_limit(struct vhost_dev *dev)
2018{
2019    struct vhost_user *u = dev->opaque;
2020
2021    return u->user->memory_slots;
2022}
2023
2024static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2025{
2026    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2027
2028    return virtio_has_feature(dev->protocol_features,
2029                              VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2030}
2031
2032static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2033{
2034    VhostUserMsg msg = { };
2035
2036    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2037
2038    /* If guest supports GUEST_ANNOUNCE do nothing */
2039    if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2040        return 0;
2041    }
2042
2043    /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2044    if (virtio_has_feature(dev->protocol_features,
2045                           VHOST_USER_PROTOCOL_F_RARP)) {
2046        msg.hdr.request = VHOST_USER_SEND_RARP;
2047        msg.hdr.flags = VHOST_USER_VERSION;
2048        memcpy((char *)&msg.payload.u64, mac_addr, 6);
2049        msg.hdr.size = sizeof(msg.payload.u64);
2050
2051        return vhost_user_write(dev, &msg, NULL, 0);
2052    }
2053    return -1;
2054}
2055
2056static bool vhost_user_can_merge(struct vhost_dev *dev,
2057                                 uint64_t start1, uint64_t size1,
2058                                 uint64_t start2, uint64_t size2)
2059{
2060    ram_addr_t offset;
2061    int mfd, rfd;
2062
2063    (void)vhost_user_get_mr_data(start1, &offset, &mfd);
2064    (void)vhost_user_get_mr_data(start2, &offset, &rfd);
2065
2066    return mfd == rfd;
2067}
2068
2069static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2070{
2071    VhostUserMsg msg;
2072    bool reply_supported = virtio_has_feature(dev->protocol_features,
2073                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
2074
2075    if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2076        return 0;
2077    }
2078
2079    msg.hdr.request = VHOST_USER_NET_SET_MTU;
2080    msg.payload.u64 = mtu;
2081    msg.hdr.size = sizeof(msg.payload.u64);
2082    msg.hdr.flags = VHOST_USER_VERSION;
2083    if (reply_supported) {
2084        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2085    }
2086
2087    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2088        return -1;
2089    }
2090
2091    /* If reply_ack supported, slave has to ack specified MTU is valid */
2092    if (reply_supported) {
2093        return process_message_reply(dev, &msg);
2094    }
2095
2096    return 0;
2097}
2098
2099static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2100                                            struct vhost_iotlb_msg *imsg)
2101{
2102    VhostUserMsg msg = {
2103        .hdr.request = VHOST_USER_IOTLB_MSG,
2104        .hdr.size = sizeof(msg.payload.iotlb),
2105        .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2106        .payload.iotlb = *imsg,
2107    };
2108
2109    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2110        return -EFAULT;
2111    }
2112
2113    return process_message_reply(dev, &msg);
2114}
2115
2116
2117static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2118{
2119    /* No-op as the receive channel is not dedicated to IOTLB messages. */
2120}
2121
2122static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
2123                                 uint32_t config_len, Error **errp)
2124{
2125    VhostUserMsg msg = {
2126        .hdr.request = VHOST_USER_GET_CONFIG,
2127        .hdr.flags = VHOST_USER_VERSION,
2128        .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
2129    };
2130
2131    if (!virtio_has_feature(dev->protocol_features,
2132                VHOST_USER_PROTOCOL_F_CONFIG)) {
2133        error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2134        return -EINVAL;
2135    }
2136
2137    assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
2138
2139    msg.payload.config.offset = 0;
2140    msg.payload.config.size = config_len;
2141    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2142        return -EPROTO;
2143    }
2144
2145    if (vhost_user_read(dev, &msg) < 0) {
2146        return -EPROTO;
2147    }
2148
2149    if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
2150        error_setg(errp,
2151                   "Received unexpected msg type. Expected %d received %d",
2152                   VHOST_USER_GET_CONFIG, msg.hdr.request);
2153        return -EINVAL;
2154    }
2155
2156    if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
2157        error_setg(errp, "Received bad msg size.");
2158        return -EINVAL;
2159    }
2160
2161    memcpy(config, msg.payload.config.region, config_len);
2162
2163    return 0;
2164}
2165
2166static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2167                                 uint32_t offset, uint32_t size, uint32_t flags)
2168{
2169    uint8_t *p;
2170    bool reply_supported = virtio_has_feature(dev->protocol_features,
2171                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
2172
2173    VhostUserMsg msg = {
2174        .hdr.request = VHOST_USER_SET_CONFIG,
2175        .hdr.flags = VHOST_USER_VERSION,
2176        .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
2177    };
2178
2179    if (!virtio_has_feature(dev->protocol_features,
2180                VHOST_USER_PROTOCOL_F_CONFIG)) {
2181        return -1;
2182    }
2183
2184    if (reply_supported) {
2185        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2186    }
2187
2188    if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2189        return -1;
2190    }
2191
2192    msg.payload.config.offset = offset,
2193    msg.payload.config.size = size,
2194    msg.payload.config.flags = flags,
2195    p = msg.payload.config.region;
2196    memcpy(p, data, size);
2197
2198    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2199        return -1;
2200    }
2201
2202    if (reply_supported) {
2203        return process_message_reply(dev, &msg);
2204    }
2205
2206    return 0;
2207}
2208
2209static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2210                                            void *session_info,
2211                                            uint64_t *session_id)
2212{
2213    bool crypto_session = virtio_has_feature(dev->protocol_features,
2214                                       VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2215    CryptoDevBackendSymSessionInfo *sess_info = session_info;
2216    VhostUserMsg msg = {
2217        .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2218        .hdr.flags = VHOST_USER_VERSION,
2219        .hdr.size = sizeof(msg.payload.session),
2220    };
2221
2222    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2223
2224    if (!crypto_session) {
2225        error_report("vhost-user trying to send unhandled ioctl");
2226        return -1;
2227    }
2228
2229    memcpy(&msg.payload.session.session_setup_data, sess_info,
2230              sizeof(CryptoDevBackendSymSessionInfo));
2231    if (sess_info->key_len) {
2232        memcpy(&msg.payload.session.key, sess_info->cipher_key,
2233               sess_info->key_len);
2234    }
2235    if (sess_info->auth_key_len > 0) {
2236        memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
2237               sess_info->auth_key_len);
2238    }
2239    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2240        error_report("vhost_user_write() return -1, create session failed");
2241        return -1;
2242    }
2243
2244    if (vhost_user_read(dev, &msg) < 0) {
2245        error_report("vhost_user_read() return -1, create session failed");
2246        return -1;
2247    }
2248
2249    if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2250        error_report("Received unexpected msg type. Expected %d received %d",
2251                     VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2252        return -1;
2253    }
2254
2255    if (msg.hdr.size != sizeof(msg.payload.session)) {
2256        error_report("Received bad msg size.");
2257        return -1;
2258    }
2259
2260    if (msg.payload.session.session_id < 0) {
2261        error_report("Bad session id: %" PRId64 "",
2262                              msg.payload.session.session_id);
2263        return -1;
2264    }
2265    *session_id = msg.payload.session.session_id;
2266
2267    return 0;
2268}
2269
2270static int
2271vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2272{
2273    bool crypto_session = virtio_has_feature(dev->protocol_features,
2274                                       VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2275    VhostUserMsg msg = {
2276        .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2277        .hdr.flags = VHOST_USER_VERSION,
2278        .hdr.size = sizeof(msg.payload.u64),
2279    };
2280    msg.payload.u64 = session_id;
2281
2282    if (!crypto_session) {
2283        error_report("vhost-user trying to send unhandled ioctl");
2284        return -1;
2285    }
2286
2287    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2288        error_report("vhost_user_write() return -1, close session failed");
2289        return -1;
2290    }
2291
2292    return 0;
2293}
2294
2295static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
2296                                          MemoryRegionSection *section)
2297{
2298    bool result;
2299
2300    result = memory_region_get_fd(section->mr) >= 0;
2301
2302    return result;
2303}
2304
2305static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2306                                      uint16_t queue_size,
2307                                      struct vhost_inflight *inflight)
2308{
2309    void *addr;
2310    int fd;
2311    struct vhost_user *u = dev->opaque;
2312    CharBackend *chr = u->user->chr;
2313    VhostUserMsg msg = {
2314        .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2315        .hdr.flags = VHOST_USER_VERSION,
2316        .payload.inflight.num_queues = dev->nvqs,
2317        .payload.inflight.queue_size = queue_size,
2318        .hdr.size = sizeof(msg.payload.inflight),
2319    };
2320
2321    if (!virtio_has_feature(dev->protocol_features,
2322                            VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2323        return 0;
2324    }
2325
2326    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2327        return -1;
2328    }
2329
2330    if (vhost_user_read(dev, &msg) < 0) {
2331        return -1;
2332    }
2333
2334    if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2335        error_report("Received unexpected msg type. "
2336                     "Expected %d received %d",
2337                     VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2338        return -1;
2339    }
2340
2341    if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2342        error_report("Received bad msg size.");
2343        return -1;
2344    }
2345
2346    if (!msg.payload.inflight.mmap_size) {
2347        return 0;
2348    }
2349
2350    fd = qemu_chr_fe_get_msgfd(chr);
2351    if (fd < 0) {
2352        error_report("Failed to get mem fd");
2353        return -1;
2354    }
2355
2356    addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2357                MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2358
2359    if (addr == MAP_FAILED) {
2360        error_report("Failed to mmap mem fd");
2361        close(fd);
2362        return -1;
2363    }
2364
2365    inflight->addr = addr;
2366    inflight->fd = fd;
2367    inflight->size = msg.payload.inflight.mmap_size;
2368    inflight->offset = msg.payload.inflight.mmap_offset;
2369    inflight->queue_size = queue_size;
2370
2371    return 0;
2372}
2373
2374static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2375                                      struct vhost_inflight *inflight)
2376{
2377    VhostUserMsg msg = {
2378        .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2379        .hdr.flags = VHOST_USER_VERSION,
2380        .payload.inflight.mmap_size = inflight->size,
2381        .payload.inflight.mmap_offset = inflight->offset,
2382        .payload.inflight.num_queues = dev->nvqs,
2383        .payload.inflight.queue_size = inflight->queue_size,
2384        .hdr.size = sizeof(msg.payload.inflight),
2385    };
2386
2387    if (!virtio_has_feature(dev->protocol_features,
2388                            VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2389        return 0;
2390    }
2391
2392    if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
2393        return -1;
2394    }
2395
2396    return 0;
2397}
2398
2399bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
2400{
2401    if (user->chr) {
2402        error_setg(errp, "Cannot initialize vhost-user state");
2403        return false;
2404    }
2405    user->chr = chr;
2406    user->memory_slots = 0;
2407    return true;
2408}
2409
2410void vhost_user_cleanup(VhostUserState *user)
2411{
2412    int i;
2413
2414    if (!user->chr) {
2415        return;
2416    }
2417
2418    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2419        if (user->notifier[i].addr) {
2420            object_unparent(OBJECT(&user->notifier[i].mr));
2421            munmap(user->notifier[i].addr, qemu_real_host_page_size);
2422            user->notifier[i].addr = NULL;
2423        }
2424    }
2425    user->chr = NULL;
2426}
2427
2428const VhostOps user_ops = {
2429        .backend_type = VHOST_BACKEND_TYPE_USER,
2430        .vhost_backend_init = vhost_user_backend_init,
2431        .vhost_backend_cleanup = vhost_user_backend_cleanup,
2432        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
2433        .vhost_set_log_base = vhost_user_set_log_base,
2434        .vhost_set_mem_table = vhost_user_set_mem_table,
2435        .vhost_set_vring_addr = vhost_user_set_vring_addr,
2436        .vhost_set_vring_endian = vhost_user_set_vring_endian,
2437        .vhost_set_vring_num = vhost_user_set_vring_num,
2438        .vhost_set_vring_base = vhost_user_set_vring_base,
2439        .vhost_get_vring_base = vhost_user_get_vring_base,
2440        .vhost_set_vring_kick = vhost_user_set_vring_kick,
2441        .vhost_set_vring_call = vhost_user_set_vring_call,
2442        .vhost_set_features = vhost_user_set_features,
2443        .vhost_get_features = vhost_user_get_features,
2444        .vhost_set_owner = vhost_user_set_owner,
2445        .vhost_reset_device = vhost_user_reset_device,
2446        .vhost_get_vq_index = vhost_user_get_vq_index,
2447        .vhost_set_vring_enable = vhost_user_set_vring_enable,
2448        .vhost_requires_shm_log = vhost_user_requires_shm_log,
2449        .vhost_migration_done = vhost_user_migration_done,
2450        .vhost_backend_can_merge = vhost_user_can_merge,
2451        .vhost_net_set_mtu = vhost_user_net_set_mtu,
2452        .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
2453        .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
2454        .vhost_get_config = vhost_user_get_config,
2455        .vhost_set_config = vhost_user_set_config,
2456        .vhost_crypto_create_session = vhost_user_crypto_create_session,
2457        .vhost_crypto_close_session = vhost_user_crypto_close_session,
2458        .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
2459        .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
2460        .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
2461};
2462