qemu/hw/virtio/vhost-user.c
<<
>>
Prefs
   1/*
   2 * vhost-user
   3 *
   4 * Copyright (c) 2013 Virtual Open Systems Sarl.
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include "qemu/osdep.h"
  12#include "qapi/error.h"
  13#include "hw/virtio/vhost.h"
  14#include "hw/virtio/vhost-user.h"
  15#include "hw/virtio/vhost-backend.h"
  16#include "hw/virtio/virtio.h"
  17#include "hw/virtio/virtio-net.h"
  18#include "chardev/char-fe.h"
  19#include "sysemu/kvm.h"
  20#include "qemu/error-report.h"
  21#include "qemu/sockets.h"
  22#include "sysemu/cryptodev.h"
  23#include "migration/migration.h"
  24#include "migration/postcopy-ram.h"
  25#include "trace.h"
  26
  27#include <sys/ioctl.h>
  28#include <sys/socket.h>
  29#include <sys/un.h>
  30
  31#include "standard-headers/linux/vhost_types.h"
  32
  33#ifdef CONFIG_LINUX
  34#include <linux/userfaultfd.h>
  35#endif
  36
  37#define VHOST_MEMORY_MAX_NREGIONS    8
  38#define VHOST_USER_F_PROTOCOL_FEATURES 30
  39#define VHOST_USER_SLAVE_MAX_FDS     8
  40
  41/*
  42 * Maximum size of virtio device config space
  43 */
  44#define VHOST_USER_MAX_CONFIG_SIZE 256
  45
  46enum VhostUserProtocolFeature {
  47    VHOST_USER_PROTOCOL_F_MQ = 0,
  48    VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
  49    VHOST_USER_PROTOCOL_F_RARP = 2,
  50    VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
  51    VHOST_USER_PROTOCOL_F_NET_MTU = 4,
  52    VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
  53    VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
  54    VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
  55    VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
  56    VHOST_USER_PROTOCOL_F_CONFIG = 9,
  57    VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
  58    VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
  59    VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
  60    VHOST_USER_PROTOCOL_F_MAX
  61};
  62
  63#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
  64
  65typedef enum VhostUserRequest {
  66    VHOST_USER_NONE = 0,
  67    VHOST_USER_GET_FEATURES = 1,
  68    VHOST_USER_SET_FEATURES = 2,
  69    VHOST_USER_SET_OWNER = 3,
  70    VHOST_USER_RESET_OWNER = 4,
  71    VHOST_USER_SET_MEM_TABLE = 5,
  72    VHOST_USER_SET_LOG_BASE = 6,
  73    VHOST_USER_SET_LOG_FD = 7,
  74    VHOST_USER_SET_VRING_NUM = 8,
  75    VHOST_USER_SET_VRING_ADDR = 9,
  76    VHOST_USER_SET_VRING_BASE = 10,
  77    VHOST_USER_GET_VRING_BASE = 11,
  78    VHOST_USER_SET_VRING_KICK = 12,
  79    VHOST_USER_SET_VRING_CALL = 13,
  80    VHOST_USER_SET_VRING_ERR = 14,
  81    VHOST_USER_GET_PROTOCOL_FEATURES = 15,
  82    VHOST_USER_SET_PROTOCOL_FEATURES = 16,
  83    VHOST_USER_GET_QUEUE_NUM = 17,
  84    VHOST_USER_SET_VRING_ENABLE = 18,
  85    VHOST_USER_SEND_RARP = 19,
  86    VHOST_USER_NET_SET_MTU = 20,
  87    VHOST_USER_SET_SLAVE_REQ_FD = 21,
  88    VHOST_USER_IOTLB_MSG = 22,
  89    VHOST_USER_SET_VRING_ENDIAN = 23,
  90    VHOST_USER_GET_CONFIG = 24,
  91    VHOST_USER_SET_CONFIG = 25,
  92    VHOST_USER_CREATE_CRYPTO_SESSION = 26,
  93    VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
  94    VHOST_USER_POSTCOPY_ADVISE  = 28,
  95    VHOST_USER_POSTCOPY_LISTEN  = 29,
  96    VHOST_USER_POSTCOPY_END     = 30,
  97    VHOST_USER_GET_INFLIGHT_FD = 31,
  98    VHOST_USER_SET_INFLIGHT_FD = 32,
  99    VHOST_USER_GPU_SET_SOCKET = 33,
 100    VHOST_USER_MAX
 101} VhostUserRequest;
 102
 103typedef enum VhostUserSlaveRequest {
 104    VHOST_USER_SLAVE_NONE = 0,
 105    VHOST_USER_SLAVE_IOTLB_MSG = 1,
 106    VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
 107    VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
 108    VHOST_USER_SLAVE_MAX
 109}  VhostUserSlaveRequest;
 110
 111typedef struct VhostUserMemoryRegion {
 112    uint64_t guest_phys_addr;
 113    uint64_t memory_size;
 114    uint64_t userspace_addr;
 115    uint64_t mmap_offset;
 116} VhostUserMemoryRegion;
 117
 118typedef struct VhostUserMemory {
 119    uint32_t nregions;
 120    uint32_t padding;
 121    VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
 122} VhostUserMemory;
 123
 124typedef struct VhostUserLog {
 125    uint64_t mmap_size;
 126    uint64_t mmap_offset;
 127} VhostUserLog;
 128
 129typedef struct VhostUserConfig {
 130    uint32_t offset;
 131    uint32_t size;
 132    uint32_t flags;
 133    uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
 134} VhostUserConfig;
 135
 136#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN    512
 137#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN  64
 138
 139typedef struct VhostUserCryptoSession {
 140    /* session id for success, -1 on errors */
 141    int64_t session_id;
 142    CryptoDevBackendSymSessionInfo session_setup_data;
 143    uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
 144    uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
 145} VhostUserCryptoSession;
 146
 147static VhostUserConfig c __attribute__ ((unused));
 148#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
 149                                   + sizeof(c.size) \
 150                                   + sizeof(c.flags))
 151
 152typedef struct VhostUserVringArea {
 153    uint64_t u64;
 154    uint64_t size;
 155    uint64_t offset;
 156} VhostUserVringArea;
 157
 158typedef struct VhostUserInflight {
 159    uint64_t mmap_size;
 160    uint64_t mmap_offset;
 161    uint16_t num_queues;
 162    uint16_t queue_size;
 163} VhostUserInflight;
 164
 165typedef struct {
 166    VhostUserRequest request;
 167
 168#define VHOST_USER_VERSION_MASK     (0x3)
 169#define VHOST_USER_REPLY_MASK       (0x1<<2)
 170#define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
 171    uint32_t flags;
 172    uint32_t size; /* the following payload size */
 173} QEMU_PACKED VhostUserHeader;
 174
 175typedef union {
 176#define VHOST_USER_VRING_IDX_MASK   (0xff)
 177#define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
 178        uint64_t u64;
 179        struct vhost_vring_state state;
 180        struct vhost_vring_addr addr;
 181        VhostUserMemory memory;
 182        VhostUserLog log;
 183        struct vhost_iotlb_msg iotlb;
 184        VhostUserConfig config;
 185        VhostUserCryptoSession session;
 186        VhostUserVringArea area;
 187        VhostUserInflight inflight;
 188} VhostUserPayload;
 189
 190typedef struct VhostUserMsg {
 191    VhostUserHeader hdr;
 192    VhostUserPayload payload;
 193} QEMU_PACKED VhostUserMsg;
 194
 195static VhostUserMsg m __attribute__ ((unused));
 196#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
 197
 198#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
 199
 200/* The version of the protocol we support */
 201#define VHOST_USER_VERSION    (0x1)
 202
 203struct vhost_user {
 204    struct vhost_dev *dev;
 205    /* Shared between vhost devs of the same virtio device */
 206    VhostUserState *user;
 207    int slave_fd;
 208    NotifierWithReturn postcopy_notifier;
 209    struct PostCopyFD  postcopy_fd;
 210    uint64_t           postcopy_client_bases[VHOST_MEMORY_MAX_NREGIONS];
 211    /* Length of the region_rb and region_rb_offset arrays */
 212    size_t             region_rb_len;
 213    /* RAMBlock associated with a given region */
 214    RAMBlock         **region_rb;
 215    /* The offset from the start of the RAMBlock to the start of the
 216     * vhost region.
 217     */
 218    ram_addr_t        *region_rb_offset;
 219
 220    /* True once we've entered postcopy_listen */
 221    bool               postcopy_listen;
 222};
 223
 224static bool ioeventfd_enabled(void)
 225{
 226    return !kvm_enabled() || kvm_eventfds_enabled();
 227}
 228
 229static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
 230{
 231    struct vhost_user *u = dev->opaque;
 232    CharBackend *chr = u->user->chr;
 233    uint8_t *p = (uint8_t *) msg;
 234    int r, size = VHOST_USER_HDR_SIZE;
 235
 236    r = qemu_chr_fe_read_all(chr, p, size);
 237    if (r != size) {
 238        error_report("Failed to read msg header. Read %d instead of %d."
 239                     " Original request %d.", r, size, msg->hdr.request);
 240        return -1;
 241    }
 242
 243    /* validate received flags */
 244    if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
 245        error_report("Failed to read msg header."
 246                " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
 247                VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
 248        return -1;
 249    }
 250
 251    return 0;
 252}
 253
 254static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
 255{
 256    struct vhost_user *u = dev->opaque;
 257    CharBackend *chr = u->user->chr;
 258    uint8_t *p = (uint8_t *) msg;
 259    int r, size;
 260
 261    if (vhost_user_read_header(dev, msg) < 0) {
 262        return -1;
 263    }
 264
 265    /* validate message size is sane */
 266    if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
 267        error_report("Failed to read msg header."
 268                " Size %d exceeds the maximum %zu.", msg->hdr.size,
 269                VHOST_USER_PAYLOAD_SIZE);
 270        return -1;
 271    }
 272
 273    if (msg->hdr.size) {
 274        p += VHOST_USER_HDR_SIZE;
 275        size = msg->hdr.size;
 276        r = qemu_chr_fe_read_all(chr, p, size);
 277        if (r != size) {
 278            error_report("Failed to read msg payload."
 279                         " Read %d instead of %d.", r, msg->hdr.size);
 280            return -1;
 281        }
 282    }
 283
 284    return 0;
 285}
 286
 287static int process_message_reply(struct vhost_dev *dev,
 288                                 const VhostUserMsg *msg)
 289{
 290    VhostUserMsg msg_reply;
 291
 292    if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
 293        return 0;
 294    }
 295
 296    if (vhost_user_read(dev, &msg_reply) < 0) {
 297        return -1;
 298    }
 299
 300    if (msg_reply.hdr.request != msg->hdr.request) {
 301        error_report("Received unexpected msg type."
 302                     "Expected %d received %d",
 303                     msg->hdr.request, msg_reply.hdr.request);
 304        return -1;
 305    }
 306
 307    return msg_reply.payload.u64 ? -1 : 0;
 308}
 309
 310static bool vhost_user_one_time_request(VhostUserRequest request)
 311{
 312    switch (request) {
 313    case VHOST_USER_SET_OWNER:
 314    case VHOST_USER_RESET_OWNER:
 315    case VHOST_USER_SET_MEM_TABLE:
 316    case VHOST_USER_GET_QUEUE_NUM:
 317    case VHOST_USER_NET_SET_MTU:
 318        return true;
 319    default:
 320        return false;
 321    }
 322}
 323
 324/* most non-init callers ignore the error */
 325static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
 326                            int *fds, int fd_num)
 327{
 328    struct vhost_user *u = dev->opaque;
 329    CharBackend *chr = u->user->chr;
 330    int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
 331
 332    /*
 333     * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
 334     * we just need send it once in the first time. For later such
 335     * request, we just ignore it.
 336     */
 337    if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
 338        msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
 339        return 0;
 340    }
 341
 342    if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
 343        error_report("Failed to set msg fds.");
 344        return -1;
 345    }
 346
 347    ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
 348    if (ret != size) {
 349        error_report("Failed to write msg."
 350                     " Wrote %d instead of %d.", ret, size);
 351        return -1;
 352    }
 353
 354    return 0;
 355}
 356
 357int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
 358{
 359    VhostUserMsg msg = {
 360        .hdr.request = VHOST_USER_GPU_SET_SOCKET,
 361        .hdr.flags = VHOST_USER_VERSION,
 362    };
 363
 364    return vhost_user_write(dev, &msg, &fd, 1);
 365}
 366
 367static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
 368                                   struct vhost_log *log)
 369{
 370    int fds[VHOST_MEMORY_MAX_NREGIONS];
 371    size_t fd_num = 0;
 372    bool shmfd = virtio_has_feature(dev->protocol_features,
 373                                    VHOST_USER_PROTOCOL_F_LOG_SHMFD);
 374    VhostUserMsg msg = {
 375        .hdr.request = VHOST_USER_SET_LOG_BASE,
 376        .hdr.flags = VHOST_USER_VERSION,
 377        .payload.log.mmap_size = log->size * sizeof(*(log->log)),
 378        .payload.log.mmap_offset = 0,
 379        .hdr.size = sizeof(msg.payload.log),
 380    };
 381
 382    if (shmfd && log->fd != -1) {
 383        fds[fd_num++] = log->fd;
 384    }
 385
 386    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
 387        return -1;
 388    }
 389
 390    if (shmfd) {
 391        msg.hdr.size = 0;
 392        if (vhost_user_read(dev, &msg) < 0) {
 393            return -1;
 394        }
 395
 396        if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
 397            error_report("Received unexpected msg type. "
 398                         "Expected %d received %d",
 399                         VHOST_USER_SET_LOG_BASE, msg.hdr.request);
 400            return -1;
 401        }
 402    }
 403
 404    return 0;
 405}
 406
 407static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
 408                                             struct vhost_memory *mem)
 409{
 410    struct vhost_user *u = dev->opaque;
 411    int fds[VHOST_MEMORY_MAX_NREGIONS];
 412    int i, fd;
 413    size_t fd_num = 0;
 414    VhostUserMsg msg_reply;
 415    int region_i, msg_i;
 416
 417    VhostUserMsg msg = {
 418        .hdr.request = VHOST_USER_SET_MEM_TABLE,
 419        .hdr.flags = VHOST_USER_VERSION,
 420    };
 421
 422    if (u->region_rb_len < dev->mem->nregions) {
 423        u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
 424        u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
 425                                      dev->mem->nregions);
 426        memset(&(u->region_rb[u->region_rb_len]), '\0',
 427               sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
 428        memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
 429               sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
 430        u->region_rb_len = dev->mem->nregions;
 431    }
 432
 433    for (i = 0; i < dev->mem->nregions; ++i) {
 434        struct vhost_memory_region *reg = dev->mem->regions + i;
 435        ram_addr_t offset;
 436        MemoryRegion *mr;
 437
 438        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
 439        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
 440                                     &offset);
 441        fd = memory_region_get_fd(mr);
 442        if (fd > 0) {
 443            trace_vhost_user_set_mem_table_withfd(fd_num, mr->name,
 444                                                  reg->memory_size,
 445                                                  reg->guest_phys_addr,
 446                                                  reg->userspace_addr, offset);
 447            u->region_rb_offset[i] = offset;
 448            u->region_rb[i] = mr->ram_block;
 449            msg.payload.memory.regions[fd_num].userspace_addr =
 450                reg->userspace_addr;
 451            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
 452            msg.payload.memory.regions[fd_num].guest_phys_addr =
 453                reg->guest_phys_addr;
 454            msg.payload.memory.regions[fd_num].mmap_offset = offset;
 455            assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
 456            fds[fd_num++] = fd;
 457        } else {
 458            u->region_rb_offset[i] = 0;
 459            u->region_rb[i] = NULL;
 460        }
 461    }
 462
 463    msg.payload.memory.nregions = fd_num;
 464
 465    if (!fd_num) {
 466        error_report("Failed initializing vhost-user memory map, "
 467                     "consider using -object memory-backend-file share=on");
 468        return -1;
 469    }
 470
 471    msg.hdr.size = sizeof(msg.payload.memory.nregions);
 472    msg.hdr.size += sizeof(msg.payload.memory.padding);
 473    msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
 474
 475    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
 476        return -1;
 477    }
 478
 479    if (vhost_user_read(dev, &msg_reply) < 0) {
 480        return -1;
 481    }
 482
 483    if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
 484        error_report("%s: Received unexpected msg type."
 485                     "Expected %d received %d", __func__,
 486                     VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
 487        return -1;
 488    }
 489    /* We're using the same structure, just reusing one of the
 490     * fields, so it should be the same size.
 491     */
 492    if (msg_reply.hdr.size != msg.hdr.size) {
 493        error_report("%s: Unexpected size for postcopy reply "
 494                     "%d vs %d", __func__, msg_reply.hdr.size, msg.hdr.size);
 495        return -1;
 496    }
 497
 498    memset(u->postcopy_client_bases, 0,
 499           sizeof(uint64_t) * VHOST_MEMORY_MAX_NREGIONS);
 500
 501    /* They're in the same order as the regions that were sent
 502     * but some of the regions were skipped (above) if they
 503     * didn't have fd's
 504    */
 505    for (msg_i = 0, region_i = 0;
 506         region_i < dev->mem->nregions;
 507        region_i++) {
 508        if (msg_i < fd_num &&
 509            msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
 510            dev->mem->regions[region_i].guest_phys_addr) {
 511            u->postcopy_client_bases[region_i] =
 512                msg_reply.payload.memory.regions[msg_i].userspace_addr;
 513            trace_vhost_user_set_mem_table_postcopy(
 514                msg_reply.payload.memory.regions[msg_i].userspace_addr,
 515                msg.payload.memory.regions[msg_i].userspace_addr,
 516                msg_i, region_i);
 517            msg_i++;
 518        }
 519    }
 520    if (msg_i != fd_num) {
 521        error_report("%s: postcopy reply not fully consumed "
 522                     "%d vs %zd",
 523                     __func__, msg_i, fd_num);
 524        return -1;
 525    }
 526    /* Now we've registered this with the postcopy code, we ack to the client,
 527     * because now we're in the position to be able to deal with any faults
 528     * it generates.
 529     */
 530    /* TODO: Use this for failure cases as well with a bad value */
 531    msg.hdr.size = sizeof(msg.payload.u64);
 532    msg.payload.u64 = 0; /* OK */
 533    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 534        return -1;
 535    }
 536
 537    return 0;
 538}
 539
 540static int vhost_user_set_mem_table(struct vhost_dev *dev,
 541                                    struct vhost_memory *mem)
 542{
 543    struct vhost_user *u = dev->opaque;
 544    int fds[VHOST_MEMORY_MAX_NREGIONS];
 545    int i, fd;
 546    size_t fd_num = 0;
 547    bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
 548    bool reply_supported = virtio_has_feature(dev->protocol_features,
 549                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
 550
 551    if (do_postcopy) {
 552        /* Postcopy has enough differences that it's best done in it's own
 553         * version
 554         */
 555        return vhost_user_set_mem_table_postcopy(dev, mem);
 556    }
 557
 558    VhostUserMsg msg = {
 559        .hdr.request = VHOST_USER_SET_MEM_TABLE,
 560        .hdr.flags = VHOST_USER_VERSION,
 561    };
 562
 563    if (reply_supported) {
 564        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
 565    }
 566
 567    for (i = 0; i < dev->mem->nregions; ++i) {
 568        struct vhost_memory_region *reg = dev->mem->regions + i;
 569        ram_addr_t offset;
 570        MemoryRegion *mr;
 571
 572        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
 573        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
 574                                     &offset);
 575        fd = memory_region_get_fd(mr);
 576        if (fd > 0) {
 577            if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
 578                error_report("Failed preparing vhost-user memory table msg");
 579                return -1;
 580            }
 581            msg.payload.memory.regions[fd_num].userspace_addr =
 582                reg->userspace_addr;
 583            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
 584            msg.payload.memory.regions[fd_num].guest_phys_addr =
 585                reg->guest_phys_addr;
 586            msg.payload.memory.regions[fd_num].mmap_offset = offset;
 587            fds[fd_num++] = fd;
 588        }
 589    }
 590
 591    msg.payload.memory.nregions = fd_num;
 592
 593    if (!fd_num) {
 594        error_report("Failed initializing vhost-user memory map, "
 595                     "consider using -object memory-backend-file share=on");
 596        return -1;
 597    }
 598
 599    msg.hdr.size = sizeof(msg.payload.memory.nregions);
 600    msg.hdr.size += sizeof(msg.payload.memory.padding);
 601    msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
 602
 603    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
 604        return -1;
 605    }
 606
 607    if (reply_supported) {
 608        return process_message_reply(dev, &msg);
 609    }
 610
 611    return 0;
 612}
 613
 614static int vhost_user_set_vring_addr(struct vhost_dev *dev,
 615                                     struct vhost_vring_addr *addr)
 616{
 617    VhostUserMsg msg = {
 618        .hdr.request = VHOST_USER_SET_VRING_ADDR,
 619        .hdr.flags = VHOST_USER_VERSION,
 620        .payload.addr = *addr,
 621        .hdr.size = sizeof(msg.payload.addr),
 622    };
 623
 624    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 625        return -1;
 626    }
 627
 628    return 0;
 629}
 630
 631static int vhost_user_set_vring_endian(struct vhost_dev *dev,
 632                                       struct vhost_vring_state *ring)
 633{
 634    bool cross_endian = virtio_has_feature(dev->protocol_features,
 635                                           VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
 636    VhostUserMsg msg = {
 637        .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
 638        .hdr.flags = VHOST_USER_VERSION,
 639        .payload.state = *ring,
 640        .hdr.size = sizeof(msg.payload.state),
 641    };
 642
 643    if (!cross_endian) {
 644        error_report("vhost-user trying to send unhandled ioctl");
 645        return -1;
 646    }
 647
 648    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 649        return -1;
 650    }
 651
 652    return 0;
 653}
 654
 655static int vhost_set_vring(struct vhost_dev *dev,
 656                           unsigned long int request,
 657                           struct vhost_vring_state *ring)
 658{
 659    VhostUserMsg msg = {
 660        .hdr.request = request,
 661        .hdr.flags = VHOST_USER_VERSION,
 662        .payload.state = *ring,
 663        .hdr.size = sizeof(msg.payload.state),
 664    };
 665
 666    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 667        return -1;
 668    }
 669
 670    return 0;
 671}
 672
 673static int vhost_user_set_vring_num(struct vhost_dev *dev,
 674                                    struct vhost_vring_state *ring)
 675{
 676    return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
 677}
 678
 679static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
 680                                             int queue_idx)
 681{
 682    struct vhost_user *u = dev->opaque;
 683    VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
 684    VirtIODevice *vdev = dev->vdev;
 685
 686    if (n->addr && !n->set) {
 687        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
 688        n->set = true;
 689    }
 690}
 691
 692static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
 693                                            int queue_idx)
 694{
 695    struct vhost_user *u = dev->opaque;
 696    VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
 697    VirtIODevice *vdev = dev->vdev;
 698
 699    if (n->addr && n->set) {
 700        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
 701        n->set = false;
 702    }
 703}
 704
 705static int vhost_user_set_vring_base(struct vhost_dev *dev,
 706                                     struct vhost_vring_state *ring)
 707{
 708    vhost_user_host_notifier_restore(dev, ring->index);
 709
 710    return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
 711}
 712
 713static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
 714{
 715    int i;
 716
 717    if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
 718        return -1;
 719    }
 720
 721    for (i = 0; i < dev->nvqs; ++i) {
 722        struct vhost_vring_state state = {
 723            .index = dev->vq_index + i,
 724            .num   = enable,
 725        };
 726
 727        vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
 728    }
 729
 730    return 0;
 731}
 732
 733static int vhost_user_get_vring_base(struct vhost_dev *dev,
 734                                     struct vhost_vring_state *ring)
 735{
 736    VhostUserMsg msg = {
 737        .hdr.request = VHOST_USER_GET_VRING_BASE,
 738        .hdr.flags = VHOST_USER_VERSION,
 739        .payload.state = *ring,
 740        .hdr.size = sizeof(msg.payload.state),
 741    };
 742
 743    vhost_user_host_notifier_remove(dev, ring->index);
 744
 745    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 746        return -1;
 747    }
 748
 749    if (vhost_user_read(dev, &msg) < 0) {
 750        return -1;
 751    }
 752
 753    if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
 754        error_report("Received unexpected msg type. Expected %d received %d",
 755                     VHOST_USER_GET_VRING_BASE, msg.hdr.request);
 756        return -1;
 757    }
 758
 759    if (msg.hdr.size != sizeof(msg.payload.state)) {
 760        error_report("Received bad msg size.");
 761        return -1;
 762    }
 763
 764    *ring = msg.payload.state;
 765
 766    return 0;
 767}
 768
 769static int vhost_set_vring_file(struct vhost_dev *dev,
 770                                VhostUserRequest request,
 771                                struct vhost_vring_file *file)
 772{
 773    int fds[VHOST_MEMORY_MAX_NREGIONS];
 774    size_t fd_num = 0;
 775    VhostUserMsg msg = {
 776        .hdr.request = request,
 777        .hdr.flags = VHOST_USER_VERSION,
 778        .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
 779        .hdr.size = sizeof(msg.payload.u64),
 780    };
 781
 782    if (ioeventfd_enabled() && file->fd > 0) {
 783        fds[fd_num++] = file->fd;
 784    } else {
 785        msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
 786    }
 787
 788    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
 789        return -1;
 790    }
 791
 792    return 0;
 793}
 794
 795static int vhost_user_set_vring_kick(struct vhost_dev *dev,
 796                                     struct vhost_vring_file *file)
 797{
 798    return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
 799}
 800
 801static int vhost_user_set_vring_call(struct vhost_dev *dev,
 802                                     struct vhost_vring_file *file)
 803{
 804    return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
 805}
 806
 807static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
 808{
 809    VhostUserMsg msg = {
 810        .hdr.request = request,
 811        .hdr.flags = VHOST_USER_VERSION,
 812        .payload.u64 = u64,
 813        .hdr.size = sizeof(msg.payload.u64),
 814    };
 815
 816    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 817        return -1;
 818    }
 819
 820    return 0;
 821}
 822
 823static int vhost_user_set_features(struct vhost_dev *dev,
 824                                   uint64_t features)
 825{
 826    return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
 827}
 828
 829static int vhost_user_set_protocol_features(struct vhost_dev *dev,
 830                                            uint64_t features)
 831{
 832    return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
 833}
 834
 835static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
 836{
 837    VhostUserMsg msg = {
 838        .hdr.request = request,
 839        .hdr.flags = VHOST_USER_VERSION,
 840    };
 841
 842    if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
 843        return 0;
 844    }
 845
 846    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 847        return -1;
 848    }
 849
 850    if (vhost_user_read(dev, &msg) < 0) {
 851        return -1;
 852    }
 853
 854    if (msg.hdr.request != request) {
 855        error_report("Received unexpected msg type. Expected %d received %d",
 856                     request, msg.hdr.request);
 857        return -1;
 858    }
 859
 860    if (msg.hdr.size != sizeof(msg.payload.u64)) {
 861        error_report("Received bad msg size.");
 862        return -1;
 863    }
 864
 865    *u64 = msg.payload.u64;
 866
 867    return 0;
 868}
 869
 870static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
 871{
 872    return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
 873}
 874
 875static int vhost_user_set_owner(struct vhost_dev *dev)
 876{
 877    VhostUserMsg msg = {
 878        .hdr.request = VHOST_USER_SET_OWNER,
 879        .hdr.flags = VHOST_USER_VERSION,
 880    };
 881
 882    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 883        return -1;
 884    }
 885
 886    return 0;
 887}
 888
 889static int vhost_user_reset_device(struct vhost_dev *dev)
 890{
 891    VhostUserMsg msg = {
 892        .hdr.request = VHOST_USER_RESET_OWNER,
 893        .hdr.flags = VHOST_USER_VERSION,
 894    };
 895
 896    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
 897        return -1;
 898    }
 899
 900    return 0;
 901}
 902
 903static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
 904{
 905    int ret = -1;
 906
 907    if (!dev->config_ops) {
 908        return -1;
 909    }
 910
 911    if (dev->config_ops->vhost_dev_config_notifier) {
 912        ret = dev->config_ops->vhost_dev_config_notifier(dev);
 913    }
 914
 915    return ret;
 916}
 917
 918static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
 919                                                       VhostUserVringArea *area,
 920                                                       int fd)
 921{
 922    int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
 923    size_t page_size = qemu_real_host_page_size;
 924    struct vhost_user *u = dev->opaque;
 925    VhostUserState *user = u->user;
 926    VirtIODevice *vdev = dev->vdev;
 927    VhostUserHostNotifier *n;
 928    void *addr;
 929    char *name;
 930
 931    if (!virtio_has_feature(dev->protocol_features,
 932                            VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
 933        vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
 934        return -1;
 935    }
 936
 937    n = &user->notifier[queue_idx];
 938
 939    if (n->addr) {
 940        virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
 941        object_unparent(OBJECT(&n->mr));
 942        munmap(n->addr, page_size);
 943        n->addr = NULL;
 944    }
 945
 946    if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
 947        return 0;
 948    }
 949
 950    /* Sanity check. */
 951    if (area->size != page_size) {
 952        return -1;
 953    }
 954
 955    addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 956                fd, area->offset);
 957    if (addr == MAP_FAILED) {
 958        return -1;
 959    }
 960
 961    name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
 962                           user, queue_idx);
 963    memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
 964                                      page_size, addr);
 965    g_free(name);
 966
 967    if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
 968        munmap(addr, page_size);
 969        return -1;
 970    }
 971
 972    n->addr = addr;
 973    n->set = true;
 974
 975    return 0;
 976}
 977
 978static void slave_read(void *opaque)
 979{
 980    struct vhost_dev *dev = opaque;
 981    struct vhost_user *u = dev->opaque;
 982    VhostUserHeader hdr = { 0, };
 983    VhostUserPayload payload = { 0, };
 984    int size, ret = 0;
 985    struct iovec iov;
 986    struct msghdr msgh;
 987    int fd[VHOST_USER_SLAVE_MAX_FDS];
 988    char control[CMSG_SPACE(sizeof(fd))];
 989    struct cmsghdr *cmsg;
 990    int i, fdsize = 0;
 991
 992    memset(&msgh, 0, sizeof(msgh));
 993    msgh.msg_iov = &iov;
 994    msgh.msg_iovlen = 1;
 995    msgh.msg_control = control;
 996    msgh.msg_controllen = sizeof(control);
 997
 998    memset(fd, -1, sizeof(fd));
 999
1000    /* Read header */
1001    iov.iov_base = &hdr;
1002    iov.iov_len = VHOST_USER_HDR_SIZE;
1003
1004    do {
1005        size = recvmsg(u->slave_fd, &msgh, 0);
1006    } while (size < 0 && (errno == EINTR || errno == EAGAIN));
1007
1008    if (size != VHOST_USER_HDR_SIZE) {
1009        error_report("Failed to read from slave.");
1010        goto err;
1011    }
1012
1013    if (msgh.msg_flags & MSG_CTRUNC) {
1014        error_report("Truncated message.");
1015        goto err;
1016    }
1017
1018    for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
1019         cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
1020            if (cmsg->cmsg_level == SOL_SOCKET &&
1021                cmsg->cmsg_type == SCM_RIGHTS) {
1022                    fdsize = cmsg->cmsg_len - CMSG_LEN(0);
1023                    memcpy(fd, CMSG_DATA(cmsg), fdsize);
1024                    break;
1025            }
1026    }
1027
1028    if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
1029        error_report("Failed to read msg header."
1030                " Size %d exceeds the maximum %zu.", hdr.size,
1031                VHOST_USER_PAYLOAD_SIZE);
1032        goto err;
1033    }
1034
1035    /* Read payload */
1036    do {
1037        size = read(u->slave_fd, &payload, hdr.size);
1038    } while (size < 0 && (errno == EINTR || errno == EAGAIN));
1039
1040    if (size != hdr.size) {
1041        error_report("Failed to read payload from slave.");
1042        goto err;
1043    }
1044
1045    switch (hdr.request) {
1046    case VHOST_USER_SLAVE_IOTLB_MSG:
1047        ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
1048        break;
1049    case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1050        ret = vhost_user_slave_handle_config_change(dev);
1051        break;
1052    case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1053        ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
1054                                                          fd[0]);
1055        break;
1056    default:
1057        error_report("Received unexpected msg type.");
1058        ret = -EINVAL;
1059    }
1060
1061    /* Close the remaining file descriptors. */
1062    for (i = 0; i < fdsize; i++) {
1063        if (fd[i] != -1) {
1064            close(fd[i]);
1065        }
1066    }
1067
1068    /*
1069     * REPLY_ACK feature handling. Other reply types has to be managed
1070     * directly in their request handlers.
1071     */
1072    if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1073        struct iovec iovec[2];
1074
1075
1076        hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1077        hdr.flags |= VHOST_USER_REPLY_MASK;
1078
1079        payload.u64 = !!ret;
1080        hdr.size = sizeof(payload.u64);
1081
1082        iovec[0].iov_base = &hdr;
1083        iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1084        iovec[1].iov_base = &payload;
1085        iovec[1].iov_len = hdr.size;
1086
1087        do {
1088            size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec));
1089        } while (size < 0 && (errno == EINTR || errno == EAGAIN));
1090
1091        if (size != VHOST_USER_HDR_SIZE + hdr.size) {
1092            error_report("Failed to send msg reply to slave.");
1093            goto err;
1094        }
1095    }
1096
1097    return;
1098
1099err:
1100    qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
1101    close(u->slave_fd);
1102    u->slave_fd = -1;
1103    for (i = 0; i < fdsize; i++) {
1104        if (fd[i] != -1) {
1105            close(fd[i]);
1106        }
1107    }
1108    return;
1109}
1110
1111static int vhost_setup_slave_channel(struct vhost_dev *dev)
1112{
1113    VhostUserMsg msg = {
1114        .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1115        .hdr.flags = VHOST_USER_VERSION,
1116    };
1117    struct vhost_user *u = dev->opaque;
1118    int sv[2], ret = 0;
1119    bool reply_supported = virtio_has_feature(dev->protocol_features,
1120                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
1121
1122    if (!virtio_has_feature(dev->protocol_features,
1123                            VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1124        return 0;
1125    }
1126
1127    if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1128        error_report("socketpair() failed");
1129        return -1;
1130    }
1131
1132    u->slave_fd = sv[0];
1133    qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev);
1134
1135    if (reply_supported) {
1136        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1137    }
1138
1139    ret = vhost_user_write(dev, &msg, &sv[1], 1);
1140    if (ret) {
1141        goto out;
1142    }
1143
1144    if (reply_supported) {
1145        ret = process_message_reply(dev, &msg);
1146    }
1147
1148out:
1149    close(sv[1]);
1150    if (ret) {
1151        qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
1152        close(u->slave_fd);
1153        u->slave_fd = -1;
1154    }
1155
1156    return ret;
1157}
1158
1159#ifdef CONFIG_LINUX
1160/*
1161 * Called back from the postcopy fault thread when a fault is received on our
1162 * ufd.
1163 * TODO: This is Linux specific
1164 */
1165static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1166                                             void *ufd)
1167{
1168    struct vhost_dev *dev = pcfd->data;
1169    struct vhost_user *u = dev->opaque;
1170    struct uffd_msg *msg = ufd;
1171    uint64_t faultaddr = msg->arg.pagefault.address;
1172    RAMBlock *rb = NULL;
1173    uint64_t rb_offset;
1174    int i;
1175
1176    trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1177                                            dev->mem->nregions);
1178    for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1179        trace_vhost_user_postcopy_fault_handler_loop(i,
1180                u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1181        if (faultaddr >= u->postcopy_client_bases[i]) {
1182            /* Ofset of the fault address in the vhost region */
1183            uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1184            if (region_offset < dev->mem->regions[i].memory_size) {
1185                rb_offset = region_offset + u->region_rb_offset[i];
1186                trace_vhost_user_postcopy_fault_handler_found(i,
1187                        region_offset, rb_offset);
1188                rb = u->region_rb[i];
1189                return postcopy_request_shared_page(pcfd, rb, faultaddr,
1190                                                    rb_offset);
1191            }
1192        }
1193    }
1194    error_report("%s: Failed to find region for fault %" PRIx64,
1195                 __func__, faultaddr);
1196    return -1;
1197}
1198
1199static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1200                                     uint64_t offset)
1201{
1202    struct vhost_dev *dev = pcfd->data;
1203    struct vhost_user *u = dev->opaque;
1204    int i;
1205
1206    trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1207
1208    if (!u) {
1209        return 0;
1210    }
1211    /* Translate the offset into an address in the clients address space */
1212    for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1213        if (u->region_rb[i] == rb &&
1214            offset >= u->region_rb_offset[i] &&
1215            offset < (u->region_rb_offset[i] +
1216                      dev->mem->regions[i].memory_size)) {
1217            uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1218                                   u->postcopy_client_bases[i];
1219            trace_vhost_user_postcopy_waker_found(client_addr);
1220            return postcopy_wake_shared(pcfd, client_addr, rb);
1221        }
1222    }
1223
1224    trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1225    return 0;
1226}
1227#endif
1228
1229/*
1230 * Called at the start of an inbound postcopy on reception of the
1231 * 'advise' command.
1232 */
1233static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1234{
1235#ifdef CONFIG_LINUX
1236    struct vhost_user *u = dev->opaque;
1237    CharBackend *chr = u->user->chr;
1238    int ufd;
1239    VhostUserMsg msg = {
1240        .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1241        .hdr.flags = VHOST_USER_VERSION,
1242    };
1243
1244    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1245        error_setg(errp, "Failed to send postcopy_advise to vhost");
1246        return -1;
1247    }
1248
1249    if (vhost_user_read(dev, &msg) < 0) {
1250        error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1251        return -1;
1252    }
1253
1254    if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1255        error_setg(errp, "Unexpected msg type. Expected %d received %d",
1256                     VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1257        return -1;
1258    }
1259
1260    if (msg.hdr.size) {
1261        error_setg(errp, "Received bad msg size.");
1262        return -1;
1263    }
1264    ufd = qemu_chr_fe_get_msgfd(chr);
1265    if (ufd < 0) {
1266        error_setg(errp, "%s: Failed to get ufd", __func__);
1267        return -1;
1268    }
1269    qemu_set_nonblock(ufd);
1270
1271    /* register ufd with userfault thread */
1272    u->postcopy_fd.fd = ufd;
1273    u->postcopy_fd.data = dev;
1274    u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
1275    u->postcopy_fd.waker = vhost_user_postcopy_waker;
1276    u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1277    postcopy_register_shared_ufd(&u->postcopy_fd);
1278    return 0;
1279#else
1280    error_setg(errp, "Postcopy not supported on non-Linux systems");
1281    return -1;
1282#endif
1283}
1284
1285/*
1286 * Called at the switch to postcopy on reception of the 'listen' command.
1287 */
1288static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1289{
1290    struct vhost_user *u = dev->opaque;
1291    int ret;
1292    VhostUserMsg msg = {
1293        .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1294        .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1295    };
1296    u->postcopy_listen = true;
1297    trace_vhost_user_postcopy_listen();
1298    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1299        error_setg(errp, "Failed to send postcopy_listen to vhost");
1300        return -1;
1301    }
1302
1303    ret = process_message_reply(dev, &msg);
1304    if (ret) {
1305        error_setg(errp, "Failed to receive reply to postcopy_listen");
1306        return ret;
1307    }
1308
1309    return 0;
1310}
1311
1312/*
1313 * Called at the end of postcopy
1314 */
1315static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1316{
1317    VhostUserMsg msg = {
1318        .hdr.request = VHOST_USER_POSTCOPY_END,
1319        .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1320    };
1321    int ret;
1322    struct vhost_user *u = dev->opaque;
1323
1324    trace_vhost_user_postcopy_end_entry();
1325    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1326        error_setg(errp, "Failed to send postcopy_end to vhost");
1327        return -1;
1328    }
1329
1330    ret = process_message_reply(dev, &msg);
1331    if (ret) {
1332        error_setg(errp, "Failed to receive reply to postcopy_end");
1333        return ret;
1334    }
1335    postcopy_unregister_shared_ufd(&u->postcopy_fd);
1336    close(u->postcopy_fd.fd);
1337    u->postcopy_fd.handler = NULL;
1338
1339    trace_vhost_user_postcopy_end_exit();
1340
1341    return 0;
1342}
1343
1344static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1345                                        void *opaque)
1346{
1347    struct PostcopyNotifyData *pnd = opaque;
1348    struct vhost_user *u = container_of(notifier, struct vhost_user,
1349                                         postcopy_notifier);
1350    struct vhost_dev *dev = u->dev;
1351
1352    switch (pnd->reason) {
1353    case POSTCOPY_NOTIFY_PROBE:
1354        if (!virtio_has_feature(dev->protocol_features,
1355                                VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1356            /* TODO: Get the device name into this error somehow */
1357            error_setg(pnd->errp,
1358                       "vhost-user backend not capable of postcopy");
1359            return -ENOENT;
1360        }
1361        break;
1362
1363    case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1364        return vhost_user_postcopy_advise(dev, pnd->errp);
1365
1366    case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1367        return vhost_user_postcopy_listen(dev, pnd->errp);
1368
1369    case POSTCOPY_NOTIFY_INBOUND_END:
1370        return vhost_user_postcopy_end(dev, pnd->errp);
1371
1372    default:
1373        /* We ignore notifications we don't know */
1374        break;
1375    }
1376
1377    return 0;
1378}
1379
1380static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque)
1381{
1382    uint64_t features, protocol_features;
1383    struct vhost_user *u;
1384    int err;
1385
1386    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1387
1388    u = g_new0(struct vhost_user, 1);
1389    u->user = opaque;
1390    u->slave_fd = -1;
1391    u->dev = dev;
1392    dev->opaque = u;
1393
1394    err = vhost_user_get_features(dev, &features);
1395    if (err < 0) {
1396        return err;
1397    }
1398
1399    if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1400        dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1401
1402        err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
1403                                 &protocol_features);
1404        if (err < 0) {
1405            return err;
1406        }
1407
1408        dev->protocol_features =
1409            protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
1410
1411        if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1412            /* Don't acknowledge CONFIG feature if device doesn't support it */
1413            dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1414        } else if (!(protocol_features &
1415                    (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
1416            error_report("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1417                    "but backend does not support it.");
1418            return -1;
1419        }
1420
1421        err = vhost_user_set_protocol_features(dev, dev->protocol_features);
1422        if (err < 0) {
1423            return err;
1424        }
1425
1426        /* query the max queues we support if backend supports Multiple Queue */
1427        if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
1428            err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
1429                                     &dev->max_queues);
1430            if (err < 0) {
1431                return err;
1432            }
1433        }
1434
1435        if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
1436                !(virtio_has_feature(dev->protocol_features,
1437                    VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
1438                 virtio_has_feature(dev->protocol_features,
1439                    VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
1440            error_report("IOMMU support requires reply-ack and "
1441                         "slave-req protocol features.");
1442            return -1;
1443        }
1444    }
1445
1446    if (dev->migration_blocker == NULL &&
1447        !virtio_has_feature(dev->protocol_features,
1448                            VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
1449        error_setg(&dev->migration_blocker,
1450                   "Migration disabled: vhost-user backend lacks "
1451                   "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
1452    }
1453
1454    err = vhost_setup_slave_channel(dev);
1455    if (err < 0) {
1456        return err;
1457    }
1458
1459    u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
1460    postcopy_add_notifier(&u->postcopy_notifier);
1461
1462    return 0;
1463}
1464
1465static int vhost_user_backend_cleanup(struct vhost_dev *dev)
1466{
1467    struct vhost_user *u;
1468
1469    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1470
1471    u = dev->opaque;
1472    if (u->postcopy_notifier.notify) {
1473        postcopy_remove_notifier(&u->postcopy_notifier);
1474        u->postcopy_notifier.notify = NULL;
1475    }
1476    u->postcopy_listen = false;
1477    if (u->postcopy_fd.handler) {
1478        postcopy_unregister_shared_ufd(&u->postcopy_fd);
1479        close(u->postcopy_fd.fd);
1480        u->postcopy_fd.handler = NULL;
1481    }
1482    if (u->slave_fd >= 0) {
1483        qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
1484        close(u->slave_fd);
1485        u->slave_fd = -1;
1486    }
1487    g_free(u->region_rb);
1488    u->region_rb = NULL;
1489    g_free(u->region_rb_offset);
1490    u->region_rb_offset = NULL;
1491    u->region_rb_len = 0;
1492    g_free(u);
1493    dev->opaque = 0;
1494
1495    return 0;
1496}
1497
1498static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
1499{
1500    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
1501
1502    return idx;
1503}
1504
1505static int vhost_user_memslots_limit(struct vhost_dev *dev)
1506{
1507    return VHOST_MEMORY_MAX_NREGIONS;
1508}
1509
1510static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
1511{
1512    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1513
1514    return virtio_has_feature(dev->protocol_features,
1515                              VHOST_USER_PROTOCOL_F_LOG_SHMFD);
1516}
1517
1518static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
1519{
1520    VhostUserMsg msg = { };
1521
1522    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1523
1524    /* If guest supports GUEST_ANNOUNCE do nothing */
1525    if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
1526        return 0;
1527    }
1528
1529    /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
1530    if (virtio_has_feature(dev->protocol_features,
1531                           VHOST_USER_PROTOCOL_F_RARP)) {
1532        msg.hdr.request = VHOST_USER_SEND_RARP;
1533        msg.hdr.flags = VHOST_USER_VERSION;
1534        memcpy((char *)&msg.payload.u64, mac_addr, 6);
1535        msg.hdr.size = sizeof(msg.payload.u64);
1536
1537        return vhost_user_write(dev, &msg, NULL, 0);
1538    }
1539    return -1;
1540}
1541
1542static bool vhost_user_can_merge(struct vhost_dev *dev,
1543                                 uint64_t start1, uint64_t size1,
1544                                 uint64_t start2, uint64_t size2)
1545{
1546    ram_addr_t offset;
1547    int mfd, rfd;
1548    MemoryRegion *mr;
1549
1550    mr = memory_region_from_host((void *)(uintptr_t)start1, &offset);
1551    mfd = memory_region_get_fd(mr);
1552
1553    mr = memory_region_from_host((void *)(uintptr_t)start2, &offset);
1554    rfd = memory_region_get_fd(mr);
1555
1556    return mfd == rfd;
1557}
1558
1559static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
1560{
1561    VhostUserMsg msg;
1562    bool reply_supported = virtio_has_feature(dev->protocol_features,
1563                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
1564
1565    if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
1566        return 0;
1567    }
1568
1569    msg.hdr.request = VHOST_USER_NET_SET_MTU;
1570    msg.payload.u64 = mtu;
1571    msg.hdr.size = sizeof(msg.payload.u64);
1572    msg.hdr.flags = VHOST_USER_VERSION;
1573    if (reply_supported) {
1574        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1575    }
1576
1577    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1578        return -1;
1579    }
1580
1581    /* If reply_ack supported, slave has to ack specified MTU is valid */
1582    if (reply_supported) {
1583        return process_message_reply(dev, &msg);
1584    }
1585
1586    return 0;
1587}
1588
1589static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
1590                                            struct vhost_iotlb_msg *imsg)
1591{
1592    VhostUserMsg msg = {
1593        .hdr.request = VHOST_USER_IOTLB_MSG,
1594        .hdr.size = sizeof(msg.payload.iotlb),
1595        .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1596        .payload.iotlb = *imsg,
1597    };
1598
1599    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1600        return -EFAULT;
1601    }
1602
1603    return process_message_reply(dev, &msg);
1604}
1605
1606
1607static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
1608{
1609    /* No-op as the receive channel is not dedicated to IOTLB messages. */
1610}
1611
1612static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
1613                                 uint32_t config_len)
1614{
1615    VhostUserMsg msg = {
1616        .hdr.request = VHOST_USER_GET_CONFIG,
1617        .hdr.flags = VHOST_USER_VERSION,
1618        .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
1619    };
1620
1621    if (!virtio_has_feature(dev->protocol_features,
1622                VHOST_USER_PROTOCOL_F_CONFIG)) {
1623        return -1;
1624    }
1625
1626    if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
1627        return -1;
1628    }
1629
1630    msg.payload.config.offset = 0;
1631    msg.payload.config.size = config_len;
1632    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1633        return -1;
1634    }
1635
1636    if (vhost_user_read(dev, &msg) < 0) {
1637        return -1;
1638    }
1639
1640    if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
1641        error_report("Received unexpected msg type. Expected %d received %d",
1642                     VHOST_USER_GET_CONFIG, msg.hdr.request);
1643        return -1;
1644    }
1645
1646    if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
1647        error_report("Received bad msg size.");
1648        return -1;
1649    }
1650
1651    memcpy(config, msg.payload.config.region, config_len);
1652
1653    return 0;
1654}
1655
1656static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
1657                                 uint32_t offset, uint32_t size, uint32_t flags)
1658{
1659    uint8_t *p;
1660    bool reply_supported = virtio_has_feature(dev->protocol_features,
1661                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
1662
1663    VhostUserMsg msg = {
1664        .hdr.request = VHOST_USER_SET_CONFIG,
1665        .hdr.flags = VHOST_USER_VERSION,
1666        .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
1667    };
1668
1669    if (!virtio_has_feature(dev->protocol_features,
1670                VHOST_USER_PROTOCOL_F_CONFIG)) {
1671        return -1;
1672    }
1673
1674    if (reply_supported) {
1675        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1676    }
1677
1678    if (size > VHOST_USER_MAX_CONFIG_SIZE) {
1679        return -1;
1680    }
1681
1682    msg.payload.config.offset = offset,
1683    msg.payload.config.size = size,
1684    msg.payload.config.flags = flags,
1685    p = msg.payload.config.region;
1686    memcpy(p, data, size);
1687
1688    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1689        return -1;
1690    }
1691
1692    if (reply_supported) {
1693        return process_message_reply(dev, &msg);
1694    }
1695
1696    return 0;
1697}
1698
1699static int vhost_user_crypto_create_session(struct vhost_dev *dev,
1700                                            void *session_info,
1701                                            uint64_t *session_id)
1702{
1703    bool crypto_session = virtio_has_feature(dev->protocol_features,
1704                                       VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
1705    CryptoDevBackendSymSessionInfo *sess_info = session_info;
1706    VhostUserMsg msg = {
1707        .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
1708        .hdr.flags = VHOST_USER_VERSION,
1709        .hdr.size = sizeof(msg.payload.session),
1710    };
1711
1712    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1713
1714    if (!crypto_session) {
1715        error_report("vhost-user trying to send unhandled ioctl");
1716        return -1;
1717    }
1718
1719    memcpy(&msg.payload.session.session_setup_data, sess_info,
1720              sizeof(CryptoDevBackendSymSessionInfo));
1721    if (sess_info->key_len) {
1722        memcpy(&msg.payload.session.key, sess_info->cipher_key,
1723               sess_info->key_len);
1724    }
1725    if (sess_info->auth_key_len > 0) {
1726        memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
1727               sess_info->auth_key_len);
1728    }
1729    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1730        error_report("vhost_user_write() return -1, create session failed");
1731        return -1;
1732    }
1733
1734    if (vhost_user_read(dev, &msg) < 0) {
1735        error_report("vhost_user_read() return -1, create session failed");
1736        return -1;
1737    }
1738
1739    if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
1740        error_report("Received unexpected msg type. Expected %d received %d",
1741                     VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
1742        return -1;
1743    }
1744
1745    if (msg.hdr.size != sizeof(msg.payload.session)) {
1746        error_report("Received bad msg size.");
1747        return -1;
1748    }
1749
1750    if (msg.payload.session.session_id < 0) {
1751        error_report("Bad session id: %" PRId64 "",
1752                              msg.payload.session.session_id);
1753        return -1;
1754    }
1755    *session_id = msg.payload.session.session_id;
1756
1757    return 0;
1758}
1759
1760static int
1761vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
1762{
1763    bool crypto_session = virtio_has_feature(dev->protocol_features,
1764                                       VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
1765    VhostUserMsg msg = {
1766        .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
1767        .hdr.flags = VHOST_USER_VERSION,
1768        .hdr.size = sizeof(msg.payload.u64),
1769    };
1770    msg.payload.u64 = session_id;
1771
1772    if (!crypto_session) {
1773        error_report("vhost-user trying to send unhandled ioctl");
1774        return -1;
1775    }
1776
1777    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1778        error_report("vhost_user_write() return -1, close session failed");
1779        return -1;
1780    }
1781
1782    return 0;
1783}
1784
1785static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
1786                                          MemoryRegionSection *section)
1787{
1788    bool result;
1789
1790    result = memory_region_get_fd(section->mr) >= 0;
1791
1792    return result;
1793}
1794
1795static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
1796                                      uint16_t queue_size,
1797                                      struct vhost_inflight *inflight)
1798{
1799    void *addr;
1800    int fd;
1801    struct vhost_user *u = dev->opaque;
1802    CharBackend *chr = u->user->chr;
1803    VhostUserMsg msg = {
1804        .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
1805        .hdr.flags = VHOST_USER_VERSION,
1806        .payload.inflight.num_queues = dev->nvqs,
1807        .payload.inflight.queue_size = queue_size,
1808        .hdr.size = sizeof(msg.payload.inflight),
1809    };
1810
1811    if (!virtio_has_feature(dev->protocol_features,
1812                            VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
1813        return 0;
1814    }
1815
1816    if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1817        return -1;
1818    }
1819
1820    if (vhost_user_read(dev, &msg) < 0) {
1821        return -1;
1822    }
1823
1824    if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
1825        error_report("Received unexpected msg type. "
1826                     "Expected %d received %d",
1827                     VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
1828        return -1;
1829    }
1830
1831    if (msg.hdr.size != sizeof(msg.payload.inflight)) {
1832        error_report("Received bad msg size.");
1833        return -1;
1834    }
1835
1836    if (!msg.payload.inflight.mmap_size) {
1837        return 0;
1838    }
1839
1840    fd = qemu_chr_fe_get_msgfd(chr);
1841    if (fd < 0) {
1842        error_report("Failed to get mem fd");
1843        return -1;
1844    }
1845
1846    addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
1847                MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
1848
1849    if (addr == MAP_FAILED) {
1850        error_report("Failed to mmap mem fd");
1851        close(fd);
1852        return -1;
1853    }
1854
1855    inflight->addr = addr;
1856    inflight->fd = fd;
1857    inflight->size = msg.payload.inflight.mmap_size;
1858    inflight->offset = msg.payload.inflight.mmap_offset;
1859    inflight->queue_size = queue_size;
1860
1861    return 0;
1862}
1863
1864static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
1865                                      struct vhost_inflight *inflight)
1866{
1867    VhostUserMsg msg = {
1868        .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
1869        .hdr.flags = VHOST_USER_VERSION,
1870        .payload.inflight.mmap_size = inflight->size,
1871        .payload.inflight.mmap_offset = inflight->offset,
1872        .payload.inflight.num_queues = dev->nvqs,
1873        .payload.inflight.queue_size = inflight->queue_size,
1874        .hdr.size = sizeof(msg.payload.inflight),
1875    };
1876
1877    if (!virtio_has_feature(dev->protocol_features,
1878                            VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
1879        return 0;
1880    }
1881
1882    if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
1883        return -1;
1884    }
1885
1886    return 0;
1887}
1888
1889bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
1890{
1891    if (user->chr) {
1892        error_setg(errp, "Cannot initialize vhost-user state");
1893        return false;
1894    }
1895    user->chr = chr;
1896    return true;
1897}
1898
1899void vhost_user_cleanup(VhostUserState *user)
1900{
1901    int i;
1902
1903    if (!user->chr) {
1904        return;
1905    }
1906
1907    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1908        if (user->notifier[i].addr) {
1909            object_unparent(OBJECT(&user->notifier[i].mr));
1910            munmap(user->notifier[i].addr, qemu_real_host_page_size);
1911            user->notifier[i].addr = NULL;
1912        }
1913    }
1914    user->chr = NULL;
1915}
1916
1917const VhostOps user_ops = {
1918        .backend_type = VHOST_BACKEND_TYPE_USER,
1919        .vhost_backend_init = vhost_user_backend_init,
1920        .vhost_backend_cleanup = vhost_user_backend_cleanup,
1921        .vhost_backend_memslots_limit = vhost_user_memslots_limit,
1922        .vhost_set_log_base = vhost_user_set_log_base,
1923        .vhost_set_mem_table = vhost_user_set_mem_table,
1924        .vhost_set_vring_addr = vhost_user_set_vring_addr,
1925        .vhost_set_vring_endian = vhost_user_set_vring_endian,
1926        .vhost_set_vring_num = vhost_user_set_vring_num,
1927        .vhost_set_vring_base = vhost_user_set_vring_base,
1928        .vhost_get_vring_base = vhost_user_get_vring_base,
1929        .vhost_set_vring_kick = vhost_user_set_vring_kick,
1930        .vhost_set_vring_call = vhost_user_set_vring_call,
1931        .vhost_set_features = vhost_user_set_features,
1932        .vhost_get_features = vhost_user_get_features,
1933        .vhost_set_owner = vhost_user_set_owner,
1934        .vhost_reset_device = vhost_user_reset_device,
1935        .vhost_get_vq_index = vhost_user_get_vq_index,
1936        .vhost_set_vring_enable = vhost_user_set_vring_enable,
1937        .vhost_requires_shm_log = vhost_user_requires_shm_log,
1938        .vhost_migration_done = vhost_user_migration_done,
1939        .vhost_backend_can_merge = vhost_user_can_merge,
1940        .vhost_net_set_mtu = vhost_user_net_set_mtu,
1941        .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
1942        .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
1943        .vhost_get_config = vhost_user_get_config,
1944        .vhost_set_config = vhost_user_set_config,
1945        .vhost_crypto_create_session = vhost_user_crypto_create_session,
1946        .vhost_crypto_close_session = vhost_user_crypto_close_session,
1947        .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
1948        .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
1949        .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
1950};
1951