qemu/contrib/libvhost-user/libvhost-user.c
<<
>>
Prefs
   1/*
   2 * Vhost User library
   3 *
   4 * Copyright IBM, Corp. 2007
   5 * Copyright (c) 2016 Red Hat, Inc.
   6 *
   7 * Authors:
   8 *  Anthony Liguori <aliguori@us.ibm.com>
   9 *  Marc-André Lureau <mlureau@redhat.com>
  10 *  Victor Kaplansky <victork@redhat.com>
  11 *
  12 * This work is licensed under the terms of the GNU GPL, version 2 or
  13 * later.  See the COPYING file in the top-level directory.
  14 */
  15
  16/* this code avoids GLib dependency */
  17#include <stdlib.h>
  18#include <stdio.h>
  19#include <unistd.h>
  20#include <stdarg.h>
  21#include <errno.h>
  22#include <string.h>
  23#include <assert.h>
  24#include <inttypes.h>
  25#include <sys/types.h>
  26#include <sys/socket.h>
  27#include <sys/eventfd.h>
  28#include <sys/mman.h>
  29#include "qemu/compiler.h"
  30
  31#if defined(__linux__)
  32#include <sys/syscall.h>
  33#include <fcntl.h>
  34#include <sys/ioctl.h>
  35#include <linux/vhost.h>
  36
  37#ifdef __NR_userfaultfd
  38#include <linux/userfaultfd.h>
  39#endif
  40
  41#endif
  42
  43#include "qemu/atomic.h"
  44#include "qemu/osdep.h"
  45#include "qemu/memfd.h"
  46
  47#include "libvhost-user.h"
  48
  49/* usually provided by GLib */
  50#ifndef MIN
  51#define MIN(x, y) ({                            \
  52            typeof(x) _min1 = (x);              \
  53            typeof(y) _min2 = (y);              \
  54            (void) (&_min1 == &_min2);          \
  55            _min1 < _min2 ? _min1 : _min2; })
  56#endif
  57
  58/* Round number down to multiple */
  59#define ALIGN_DOWN(n, m) ((n) / (m) * (m))
  60
  61/* Round number up to multiple */
  62#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
  63
  64/* Align each region to cache line size in inflight buffer */
  65#define INFLIGHT_ALIGNMENT 64
  66
  67/* The version of inflight buffer */
  68#define INFLIGHT_VERSION 1
  69
  70#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
  71
  72/* The version of the protocol we support */
  73#define VHOST_USER_VERSION 1
  74#define LIBVHOST_USER_DEBUG 0
  75
  76#define DPRINT(...)                             \
  77    do {                                        \
  78        if (LIBVHOST_USER_DEBUG) {              \
  79            fprintf(stderr, __VA_ARGS__);        \
  80        }                                       \
  81    } while (0)
  82
  83static inline
  84bool has_feature(uint64_t features, unsigned int fbit)
  85{
  86    assert(fbit < 64);
  87    return !!(features & (1ULL << fbit));
  88}
  89
  90static inline
  91bool vu_has_feature(VuDev *dev,
  92                    unsigned int fbit)
  93{
  94    return has_feature(dev->features, fbit);
  95}
  96
  97static const char *
  98vu_request_to_string(unsigned int req)
  99{
 100#define REQ(req) [req] = #req
 101    static const char *vu_request_str[] = {
 102        REQ(VHOST_USER_NONE),
 103        REQ(VHOST_USER_GET_FEATURES),
 104        REQ(VHOST_USER_SET_FEATURES),
 105        REQ(VHOST_USER_SET_OWNER),
 106        REQ(VHOST_USER_RESET_OWNER),
 107        REQ(VHOST_USER_SET_MEM_TABLE),
 108        REQ(VHOST_USER_SET_LOG_BASE),
 109        REQ(VHOST_USER_SET_LOG_FD),
 110        REQ(VHOST_USER_SET_VRING_NUM),
 111        REQ(VHOST_USER_SET_VRING_ADDR),
 112        REQ(VHOST_USER_SET_VRING_BASE),
 113        REQ(VHOST_USER_GET_VRING_BASE),
 114        REQ(VHOST_USER_SET_VRING_KICK),
 115        REQ(VHOST_USER_SET_VRING_CALL),
 116        REQ(VHOST_USER_SET_VRING_ERR),
 117        REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
 118        REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
 119        REQ(VHOST_USER_GET_QUEUE_NUM),
 120        REQ(VHOST_USER_SET_VRING_ENABLE),
 121        REQ(VHOST_USER_SEND_RARP),
 122        REQ(VHOST_USER_NET_SET_MTU),
 123        REQ(VHOST_USER_SET_SLAVE_REQ_FD),
 124        REQ(VHOST_USER_IOTLB_MSG),
 125        REQ(VHOST_USER_SET_VRING_ENDIAN),
 126        REQ(VHOST_USER_GET_CONFIG),
 127        REQ(VHOST_USER_SET_CONFIG),
 128        REQ(VHOST_USER_POSTCOPY_ADVISE),
 129        REQ(VHOST_USER_POSTCOPY_LISTEN),
 130        REQ(VHOST_USER_POSTCOPY_END),
 131        REQ(VHOST_USER_GET_INFLIGHT_FD),
 132        REQ(VHOST_USER_SET_INFLIGHT_FD),
 133        REQ(VHOST_USER_MAX),
 134    };
 135#undef REQ
 136
 137    if (req < VHOST_USER_MAX) {
 138        return vu_request_str[req];
 139    } else {
 140        return "unknown";
 141    }
 142}
 143
 144static void
 145vu_panic(VuDev *dev, const char *msg, ...)
 146{
 147    char *buf = NULL;
 148    va_list ap;
 149
 150    va_start(ap, msg);
 151    if (vasprintf(&buf, msg, ap) < 0) {
 152        buf = NULL;
 153    }
 154    va_end(ap);
 155
 156    dev->broken = true;
 157    dev->panic(dev, buf);
 158    free(buf);
 159
 160    /* FIXME: find a way to call virtio_error? */
 161}
 162
 163/* Translate guest physical address to our virtual address.  */
 164void *
 165vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
 166{
 167    int i;
 168
 169    if (*plen == 0) {
 170        return NULL;
 171    }
 172
 173    /* Find matching memory region.  */
 174    for (i = 0; i < dev->nregions; i++) {
 175        VuDevRegion *r = &dev->regions[i];
 176
 177        if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
 178            if ((guest_addr + *plen) > (r->gpa + r->size)) {
 179                *plen = r->gpa + r->size - guest_addr;
 180            }
 181            return (void *)(uintptr_t)
 182                guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
 183        }
 184    }
 185
 186    return NULL;
 187}
 188
 189/* Translate qemu virtual address to our virtual address.  */
 190static void *
 191qva_to_va(VuDev *dev, uint64_t qemu_addr)
 192{
 193    int i;
 194
 195    /* Find matching memory region.  */
 196    for (i = 0; i < dev->nregions; i++) {
 197        VuDevRegion *r = &dev->regions[i];
 198
 199        if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
 200            return (void *)(uintptr_t)
 201                qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
 202        }
 203    }
 204
 205    return NULL;
 206}
 207
 208static void
 209vmsg_close_fds(VhostUserMsg *vmsg)
 210{
 211    int i;
 212
 213    for (i = 0; i < vmsg->fd_num; i++) {
 214        close(vmsg->fds[i]);
 215    }
 216}
 217
 218/* A test to see if we have userfault available */
 219static bool
 220have_userfault(void)
 221{
 222#if defined(__linux__) && defined(__NR_userfaultfd) &&\
 223        defined(UFFD_FEATURE_MISSING_SHMEM) &&\
 224        defined(UFFD_FEATURE_MISSING_HUGETLBFS)
 225    /* Now test the kernel we're running on really has the features */
 226    int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 227    struct uffdio_api api_struct;
 228    if (ufd < 0) {
 229        return false;
 230    }
 231
 232    api_struct.api = UFFD_API;
 233    api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
 234                          UFFD_FEATURE_MISSING_HUGETLBFS;
 235    if (ioctl(ufd, UFFDIO_API, &api_struct)) {
 236        close(ufd);
 237        return false;
 238    }
 239    close(ufd);
 240    return true;
 241
 242#else
 243    return false;
 244#endif
 245}
 246
 247static bool
 248vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 249{
 250    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
 251    struct iovec iov = {
 252        .iov_base = (char *)vmsg,
 253        .iov_len = VHOST_USER_HDR_SIZE,
 254    };
 255    struct msghdr msg = {
 256        .msg_iov = &iov,
 257        .msg_iovlen = 1,
 258        .msg_control = control,
 259        .msg_controllen = sizeof(control),
 260    };
 261    size_t fd_size;
 262    struct cmsghdr *cmsg;
 263    int rc;
 264
 265    do {
 266        rc = recvmsg(conn_fd, &msg, 0);
 267    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 268
 269    if (rc < 0) {
 270        vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
 271        return false;
 272    }
 273
 274    vmsg->fd_num = 0;
 275    for (cmsg = CMSG_FIRSTHDR(&msg);
 276         cmsg != NULL;
 277         cmsg = CMSG_NXTHDR(&msg, cmsg))
 278    {
 279        if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
 280            fd_size = cmsg->cmsg_len - CMSG_LEN(0);
 281            vmsg->fd_num = fd_size / sizeof(int);
 282            memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
 283            break;
 284        }
 285    }
 286
 287    if (vmsg->size > sizeof(vmsg->payload)) {
 288        vu_panic(dev,
 289                 "Error: too big message request: %d, size: vmsg->size: %u, "
 290                 "while sizeof(vmsg->payload) = %zu\n",
 291                 vmsg->request, vmsg->size, sizeof(vmsg->payload));
 292        goto fail;
 293    }
 294
 295    if (vmsg->size) {
 296        do {
 297            rc = read(conn_fd, &vmsg->payload, vmsg->size);
 298        } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 299
 300        if (rc <= 0) {
 301            vu_panic(dev, "Error while reading: %s", strerror(errno));
 302            goto fail;
 303        }
 304
 305        assert(rc == vmsg->size);
 306    }
 307
 308    return true;
 309
 310fail:
 311    vmsg_close_fds(vmsg);
 312
 313    return false;
 314}
 315
 316static bool
 317vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 318{
 319    int rc;
 320    uint8_t *p = (uint8_t *)vmsg;
 321    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
 322    struct iovec iov = {
 323        .iov_base = (char *)vmsg,
 324        .iov_len = VHOST_USER_HDR_SIZE,
 325    };
 326    struct msghdr msg = {
 327        .msg_iov = &iov,
 328        .msg_iovlen = 1,
 329        .msg_control = control,
 330    };
 331    struct cmsghdr *cmsg;
 332
 333    memset(control, 0, sizeof(control));
 334    assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
 335    if (vmsg->fd_num > 0) {
 336        size_t fdsize = vmsg->fd_num * sizeof(int);
 337        msg.msg_controllen = CMSG_SPACE(fdsize);
 338        cmsg = CMSG_FIRSTHDR(&msg);
 339        cmsg->cmsg_len = CMSG_LEN(fdsize);
 340        cmsg->cmsg_level = SOL_SOCKET;
 341        cmsg->cmsg_type = SCM_RIGHTS;
 342        memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
 343    } else {
 344        msg.msg_controllen = 0;
 345    }
 346
 347    do {
 348        rc = sendmsg(conn_fd, &msg, 0);
 349    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 350
 351    if (vmsg->size) {
 352        do {
 353            if (vmsg->data) {
 354                rc = write(conn_fd, vmsg->data, vmsg->size);
 355            } else {
 356                rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
 357            }
 358        } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 359    }
 360
 361    if (rc <= 0) {
 362        vu_panic(dev, "Error while writing: %s", strerror(errno));
 363        return false;
 364    }
 365
 366    return true;
 367}
 368
 369static bool
 370vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 371{
 372    /* Set the version in the flags when sending the reply */
 373    vmsg->flags &= ~VHOST_USER_VERSION_MASK;
 374    vmsg->flags |= VHOST_USER_VERSION;
 375    vmsg->flags |= VHOST_USER_REPLY_MASK;
 376
 377    return vu_message_write(dev, conn_fd, vmsg);
 378}
 379
 380static bool
 381vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
 382{
 383    VhostUserMsg msg_reply;
 384
 385    if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
 386        return true;
 387    }
 388
 389    if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
 390        return false;
 391    }
 392
 393    if (msg_reply.request != vmsg->request) {
 394        DPRINT("Received unexpected msg type. Expected %d received %d",
 395               vmsg->request, msg_reply.request);
 396        return false;
 397    }
 398
 399    return msg_reply.payload.u64 == 0;
 400}
 401
 402/* Kick the log_call_fd if required. */
 403static void
 404vu_log_kick(VuDev *dev)
 405{
 406    if (dev->log_call_fd != -1) {
 407        DPRINT("Kicking the QEMU's log...\n");
 408        if (eventfd_write(dev->log_call_fd, 1) < 0) {
 409            vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
 410        }
 411    }
 412}
 413
 414static void
 415vu_log_page(uint8_t *log_table, uint64_t page)
 416{
 417    DPRINT("Logged dirty guest page: %"PRId64"\n", page);
 418    atomic_or(&log_table[page / 8], 1 << (page % 8));
 419}
 420
 421static void
 422vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
 423{
 424    uint64_t page;
 425
 426    if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
 427        !dev->log_table || !length) {
 428        return;
 429    }
 430
 431    assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
 432
 433    page = address / VHOST_LOG_PAGE;
 434    while (page * VHOST_LOG_PAGE < address + length) {
 435        vu_log_page(dev->log_table, page);
 436        page += VHOST_LOG_PAGE;
 437    }
 438
 439    vu_log_kick(dev);
 440}
 441
 442static void
 443vu_kick_cb(VuDev *dev, int condition, void *data)
 444{
 445    int index = (intptr_t)data;
 446    VuVirtq *vq = &dev->vq[index];
 447    int sock = vq->kick_fd;
 448    eventfd_t kick_data;
 449    ssize_t rc;
 450
 451    rc = eventfd_read(sock, &kick_data);
 452    if (rc == -1) {
 453        vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
 454        dev->remove_watch(dev, dev->vq[index].kick_fd);
 455    } else {
 456        DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
 457               kick_data, vq->handler, index);
 458        if (vq->handler) {
 459            vq->handler(dev, index);
 460        }
 461    }
 462}
 463
 464static bool
 465vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
 466{
 467    vmsg->payload.u64 =
 468        1ULL << VHOST_F_LOG_ALL |
 469        1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
 470
 471    if (dev->iface->get_features) {
 472        vmsg->payload.u64 |= dev->iface->get_features(dev);
 473    }
 474
 475    vmsg->size = sizeof(vmsg->payload.u64);
 476    vmsg->fd_num = 0;
 477
 478    DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
 479
 480    return true;
 481}
 482
 483static void
 484vu_set_enable_all_rings(VuDev *dev, bool enabled)
 485{
 486    int i;
 487
 488    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
 489        dev->vq[i].enable = enabled;
 490    }
 491}
 492
 493static bool
 494vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
 495{
 496    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
 497
 498    dev->features = vmsg->payload.u64;
 499
 500    if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
 501        vu_set_enable_all_rings(dev, true);
 502    }
 503
 504    if (dev->iface->set_features) {
 505        dev->iface->set_features(dev, dev->features);
 506    }
 507
 508    return false;
 509}
 510
 511static bool
 512vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
 513{
 514    return false;
 515}
 516
 517static void
 518vu_close_log(VuDev *dev)
 519{
 520    if (dev->log_table) {
 521        if (munmap(dev->log_table, dev->log_size) != 0) {
 522            perror("close log munmap() error");
 523        }
 524
 525        dev->log_table = NULL;
 526    }
 527    if (dev->log_call_fd != -1) {
 528        close(dev->log_call_fd);
 529        dev->log_call_fd = -1;
 530    }
 531}
 532
 533static bool
 534vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
 535{
 536    vu_set_enable_all_rings(dev, false);
 537
 538    return false;
 539}
 540
 541static bool
 542vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
 543{
 544    int i;
 545    VhostUserMemory *memory = &vmsg->payload.memory;
 546    dev->nregions = memory->nregions;
 547
 548    DPRINT("Nregions: %d\n", memory->nregions);
 549    for (i = 0; i < dev->nregions; i++) {
 550        void *mmap_addr;
 551        VhostUserMemoryRegion *msg_region = &memory->regions[i];
 552        VuDevRegion *dev_region = &dev->regions[i];
 553
 554        DPRINT("Region %d\n", i);
 555        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
 556               msg_region->guest_phys_addr);
 557        DPRINT("    memory_size:     0x%016"PRIx64"\n",
 558               msg_region->memory_size);
 559        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
 560               msg_region->userspace_addr);
 561        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
 562               msg_region->mmap_offset);
 563
 564        dev_region->gpa = msg_region->guest_phys_addr;
 565        dev_region->size = msg_region->memory_size;
 566        dev_region->qva = msg_region->userspace_addr;
 567        dev_region->mmap_offset = msg_region->mmap_offset;
 568
 569        /* We don't use offset argument of mmap() since the
 570         * mapped address has to be page aligned, and we use huge
 571         * pages.
 572         * In postcopy we're using PROT_NONE here to catch anyone
 573         * accessing it before we userfault
 574         */
 575        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
 576                         PROT_NONE, MAP_SHARED,
 577                         vmsg->fds[i], 0);
 578
 579        if (mmap_addr == MAP_FAILED) {
 580            vu_panic(dev, "region mmap error: %s", strerror(errno));
 581        } else {
 582            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
 583            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
 584                   dev_region->mmap_addr);
 585        }
 586
 587        /* Return the address to QEMU so that it can translate the ufd
 588         * fault addresses back.
 589         */
 590        msg_region->userspace_addr = (uintptr_t)(mmap_addr +
 591                                                 dev_region->mmap_offset);
 592        close(vmsg->fds[i]);
 593    }
 594
 595    /* Send the message back to qemu with the addresses filled in */
 596    vmsg->fd_num = 0;
 597    if (!vu_send_reply(dev, dev->sock, vmsg)) {
 598        vu_panic(dev, "failed to respond to set-mem-table for postcopy");
 599        return false;
 600    }
 601
 602    /* Wait for QEMU to confirm that it's registered the handler for the
 603     * faults.
 604     */
 605    if (!vu_message_read(dev, dev->sock, vmsg) ||
 606        vmsg->size != sizeof(vmsg->payload.u64) ||
 607        vmsg->payload.u64 != 0) {
 608        vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
 609        return false;
 610    }
 611
 612    /* OK, now we can go and register the memory and generate faults */
 613    for (i = 0; i < dev->nregions; i++) {
 614        VuDevRegion *dev_region = &dev->regions[i];
 615        int ret;
 616#ifdef UFFDIO_REGISTER
 617        /* We should already have an open ufd. Mark each memory
 618         * range as ufd.
 619         * Discard any mapping we have here; note I can't use MADV_REMOVE
 620         * or fallocate to make the hole since I don't want to lose
 621         * data that's already arrived in the shared process.
 622         * TODO: How to do hugepage
 623         */
 624        ret = madvise((void *)dev_region->mmap_addr,
 625                      dev_region->size + dev_region->mmap_offset,
 626                      MADV_DONTNEED);
 627        if (ret) {
 628            fprintf(stderr,
 629                    "%s: Failed to madvise(DONTNEED) region %d: %s\n",
 630                    __func__, i, strerror(errno));
 631        }
 632        /* Turn off transparent hugepages so we dont get lose wakeups
 633         * in neighbouring pages.
 634         * TODO: Turn this backon later.
 635         */
 636        ret = madvise((void *)dev_region->mmap_addr,
 637                      dev_region->size + dev_region->mmap_offset,
 638                      MADV_NOHUGEPAGE);
 639        if (ret) {
 640            /* Note: This can happen legally on kernels that are configured
 641             * without madvise'able hugepages
 642             */
 643            fprintf(stderr,
 644                    "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
 645                    __func__, i, strerror(errno));
 646        }
 647        struct uffdio_register reg_struct;
 648        reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
 649        reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
 650        reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
 651
 652        if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
 653            vu_panic(dev, "%s: Failed to userfault region %d "
 654                          "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
 655                     __func__, i,
 656                     dev_region->mmap_addr,
 657                     dev_region->size, dev_region->mmap_offset,
 658                     dev->postcopy_ufd, strerror(errno));
 659            return false;
 660        }
 661        if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
 662            vu_panic(dev, "%s Region (%d) doesn't support COPY",
 663                     __func__, i);
 664            return false;
 665        }
 666        DPRINT("%s: region %d: Registered userfault for %llx + %llx\n",
 667                __func__, i, reg_struct.range.start, reg_struct.range.len);
 668        /* Now it's registered we can let the client at it */
 669        if (mprotect((void *)dev_region->mmap_addr,
 670                     dev_region->size + dev_region->mmap_offset,
 671                     PROT_READ | PROT_WRITE)) {
 672            vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
 673                     i, strerror(errno));
 674            return false;
 675        }
 676        /* TODO: Stash 'zero' support flags somewhere */
 677#endif
 678    }
 679
 680    return false;
 681}
 682
 683static bool
 684vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
 685{
 686    int i;
 687    VhostUserMemory *memory = &vmsg->payload.memory;
 688
 689    for (i = 0; i < dev->nregions; i++) {
 690        VuDevRegion *r = &dev->regions[i];
 691        void *m = (void *) (uintptr_t) r->mmap_addr;
 692
 693        if (m) {
 694            munmap(m, r->size + r->mmap_offset);
 695        }
 696    }
 697    dev->nregions = memory->nregions;
 698
 699    if (dev->postcopy_listening) {
 700        return vu_set_mem_table_exec_postcopy(dev, vmsg);
 701    }
 702
 703    DPRINT("Nregions: %d\n", memory->nregions);
 704    for (i = 0; i < dev->nregions; i++) {
 705        void *mmap_addr;
 706        VhostUserMemoryRegion *msg_region = &memory->regions[i];
 707        VuDevRegion *dev_region = &dev->regions[i];
 708
 709        DPRINT("Region %d\n", i);
 710        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
 711               msg_region->guest_phys_addr);
 712        DPRINT("    memory_size:     0x%016"PRIx64"\n",
 713               msg_region->memory_size);
 714        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
 715               msg_region->userspace_addr);
 716        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
 717               msg_region->mmap_offset);
 718
 719        dev_region->gpa = msg_region->guest_phys_addr;
 720        dev_region->size = msg_region->memory_size;
 721        dev_region->qva = msg_region->userspace_addr;
 722        dev_region->mmap_offset = msg_region->mmap_offset;
 723
 724        /* We don't use offset argument of mmap() since the
 725         * mapped address has to be page aligned, and we use huge
 726         * pages.  */
 727        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
 728                         PROT_READ | PROT_WRITE, MAP_SHARED,
 729                         vmsg->fds[i], 0);
 730
 731        if (mmap_addr == MAP_FAILED) {
 732            vu_panic(dev, "region mmap error: %s", strerror(errno));
 733        } else {
 734            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
 735            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
 736                   dev_region->mmap_addr);
 737        }
 738
 739        close(vmsg->fds[i]);
 740    }
 741
 742    return false;
 743}
 744
 745static bool
 746vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 747{
 748    int fd;
 749    uint64_t log_mmap_size, log_mmap_offset;
 750    void *rc;
 751
 752    if (vmsg->fd_num != 1 ||
 753        vmsg->size != sizeof(vmsg->payload.log)) {
 754        vu_panic(dev, "Invalid log_base message");
 755        return true;
 756    }
 757
 758    fd = vmsg->fds[0];
 759    log_mmap_offset = vmsg->payload.log.mmap_offset;
 760    log_mmap_size = vmsg->payload.log.mmap_size;
 761    DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
 762    DPRINT("Log mmap_size:   %"PRId64"\n", log_mmap_size);
 763
 764    rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
 765              log_mmap_offset);
 766    close(fd);
 767    if (rc == MAP_FAILED) {
 768        perror("log mmap error");
 769    }
 770
 771    if (dev->log_table) {
 772        munmap(dev->log_table, dev->log_size);
 773    }
 774    dev->log_table = rc;
 775    dev->log_size = log_mmap_size;
 776
 777    vmsg->size = sizeof(vmsg->payload.u64);
 778    vmsg->fd_num = 0;
 779
 780    return true;
 781}
 782
 783static bool
 784vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
 785{
 786    if (vmsg->fd_num != 1) {
 787        vu_panic(dev, "Invalid log_fd message");
 788        return false;
 789    }
 790
 791    if (dev->log_call_fd != -1) {
 792        close(dev->log_call_fd);
 793    }
 794    dev->log_call_fd = vmsg->fds[0];
 795    DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
 796
 797    return false;
 798}
 799
 800static bool
 801vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
 802{
 803    unsigned int index = vmsg->payload.state.index;
 804    unsigned int num = vmsg->payload.state.num;
 805
 806    DPRINT("State.index: %d\n", index);
 807    DPRINT("State.num:   %d\n", num);
 808    dev->vq[index].vring.num = num;
 809
 810    return false;
 811}
 812
 813static bool
 814vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
 815{
 816    struct vhost_vring_addr *vra = &vmsg->payload.addr;
 817    unsigned int index = vra->index;
 818    VuVirtq *vq = &dev->vq[index];
 819
 820    DPRINT("vhost_vring_addr:\n");
 821    DPRINT("    index:  %d\n", vra->index);
 822    DPRINT("    flags:  %d\n", vra->flags);
 823    DPRINT("    desc_user_addr:   0x%016" PRIx64 "\n", vra->desc_user_addr);
 824    DPRINT("    used_user_addr:   0x%016" PRIx64 "\n", vra->used_user_addr);
 825    DPRINT("    avail_user_addr:  0x%016" PRIx64 "\n", vra->avail_user_addr);
 826    DPRINT("    log_guest_addr:   0x%016" PRIx64 "\n", vra->log_guest_addr);
 827
 828    vq->vring.flags = vra->flags;
 829    vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
 830    vq->vring.used = qva_to_va(dev, vra->used_user_addr);
 831    vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
 832    vq->vring.log_guest_addr = vra->log_guest_addr;
 833
 834    DPRINT("Setting virtq addresses:\n");
 835    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
 836    DPRINT("    vring_used  at %p\n", vq->vring.used);
 837    DPRINT("    vring_avail at %p\n", vq->vring.avail);
 838
 839    if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
 840        vu_panic(dev, "Invalid vring_addr message");
 841        return false;
 842    }
 843
 844    vq->used_idx = vq->vring.used->idx;
 845
 846    if (vq->last_avail_idx != vq->used_idx) {
 847        bool resume = dev->iface->queue_is_processed_in_order &&
 848            dev->iface->queue_is_processed_in_order(dev, index);
 849
 850        DPRINT("Last avail index != used index: %u != %u%s\n",
 851               vq->last_avail_idx, vq->used_idx,
 852               resume ? ", resuming" : "");
 853
 854        if (resume) {
 855            vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
 856        }
 857    }
 858
 859    return false;
 860}
 861
 862static bool
 863vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 864{
 865    unsigned int index = vmsg->payload.state.index;
 866    unsigned int num = vmsg->payload.state.num;
 867
 868    DPRINT("State.index: %d\n", index);
 869    DPRINT("State.num:   %d\n", num);
 870    dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
 871
 872    return false;
 873}
 874
 875static bool
 876vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 877{
 878    unsigned int index = vmsg->payload.state.index;
 879
 880    DPRINT("State.index: %d\n", index);
 881    vmsg->payload.state.num = dev->vq[index].last_avail_idx;
 882    vmsg->size = sizeof(vmsg->payload.state);
 883
 884    dev->vq[index].started = false;
 885    if (dev->iface->queue_set_started) {
 886        dev->iface->queue_set_started(dev, index, false);
 887    }
 888
 889    if (dev->vq[index].call_fd != -1) {
 890        close(dev->vq[index].call_fd);
 891        dev->vq[index].call_fd = -1;
 892    }
 893    if (dev->vq[index].kick_fd != -1) {
 894        dev->remove_watch(dev, dev->vq[index].kick_fd);
 895        close(dev->vq[index].kick_fd);
 896        dev->vq[index].kick_fd = -1;
 897    }
 898
 899    return true;
 900}
 901
 902static bool
 903vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
 904{
 905    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
 906
 907    if (index >= VHOST_MAX_NR_VIRTQUEUE) {
 908        vmsg_close_fds(vmsg);
 909        vu_panic(dev, "Invalid queue index: %u", index);
 910        return false;
 911    }
 912
 913    if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK ||
 914        vmsg->fd_num != 1) {
 915        vmsg_close_fds(vmsg);
 916        vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
 917        return false;
 918    }
 919
 920    return true;
 921}
 922
 923static int
 924inflight_desc_compare(const void *a, const void *b)
 925{
 926    VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
 927                        *desc1 = (VuVirtqInflightDesc *)b;
 928
 929    if (desc1->counter > desc0->counter &&
 930        (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
 931        return 1;
 932    }
 933
 934    return -1;
 935}
 936
 937static int
 938vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
 939{
 940    int i = 0;
 941
 942    if (!has_feature(dev->protocol_features,
 943        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
 944        return 0;
 945    }
 946
 947    if (unlikely(!vq->inflight)) {
 948        return -1;
 949    }
 950
 951    if (unlikely(!vq->inflight->version)) {
 952        /* initialize the buffer */
 953        vq->inflight->version = INFLIGHT_VERSION;
 954        return 0;
 955    }
 956
 957    vq->used_idx = vq->vring.used->idx;
 958    vq->resubmit_num = 0;
 959    vq->resubmit_list = NULL;
 960    vq->counter = 0;
 961
 962    if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
 963        vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
 964
 965        barrier();
 966
 967        vq->inflight->used_idx = vq->used_idx;
 968    }
 969
 970    for (i = 0; i < vq->inflight->desc_num; i++) {
 971        if (vq->inflight->desc[i].inflight == 1) {
 972            vq->inuse++;
 973        }
 974    }
 975
 976    vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
 977
 978    if (vq->inuse) {
 979        vq->resubmit_list = malloc(sizeof(VuVirtqInflightDesc) * vq->inuse);
 980        if (!vq->resubmit_list) {
 981            return -1;
 982        }
 983
 984        for (i = 0; i < vq->inflight->desc_num; i++) {
 985            if (vq->inflight->desc[i].inflight) {
 986                vq->resubmit_list[vq->resubmit_num].index = i;
 987                vq->resubmit_list[vq->resubmit_num].counter =
 988                                        vq->inflight->desc[i].counter;
 989                vq->resubmit_num++;
 990            }
 991        }
 992
 993        if (vq->resubmit_num > 1) {
 994            qsort(vq->resubmit_list, vq->resubmit_num,
 995                  sizeof(VuVirtqInflightDesc), inflight_desc_compare);
 996        }
 997        vq->counter = vq->resubmit_list[0].counter + 1;
 998    }
 999
1000    /* in case of I/O hang after reconnecting */
1001    if (eventfd_write(vq->kick_fd, 1)) {
1002        return -1;
1003    }
1004
1005    return 0;
1006}
1007
1008static bool
1009vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1010{
1011    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1012
1013    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1014
1015    if (!vu_check_queue_msg_file(dev, vmsg)) {
1016        return false;
1017    }
1018
1019    if (dev->vq[index].kick_fd != -1) {
1020        dev->remove_watch(dev, dev->vq[index].kick_fd);
1021        close(dev->vq[index].kick_fd);
1022        dev->vq[index].kick_fd = -1;
1023    }
1024
1025    dev->vq[index].kick_fd = vmsg->fds[0];
1026    DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
1027
1028    dev->vq[index].started = true;
1029    if (dev->iface->queue_set_started) {
1030        dev->iface->queue_set_started(dev, index, true);
1031    }
1032
1033    if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1034        dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1035                       vu_kick_cb, (void *)(long)index);
1036
1037        DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1038               dev->vq[index].kick_fd, index);
1039    }
1040
1041    if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1042        vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1043    }
1044
1045    return false;
1046}
1047
1048void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1049                          vu_queue_handler_cb handler)
1050{
1051    int qidx = vq - dev->vq;
1052
1053    vq->handler = handler;
1054    if (vq->kick_fd >= 0) {
1055        if (handler) {
1056            dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1057                           vu_kick_cb, (void *)(long)qidx);
1058        } else {
1059            dev->remove_watch(dev, vq->kick_fd);
1060        }
1061    }
1062}
1063
1064bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1065                                int size, int offset)
1066{
1067    int qidx = vq - dev->vq;
1068    int fd_num = 0;
1069    VhostUserMsg vmsg = {
1070        .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1071        .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1072        .size = sizeof(vmsg.payload.area),
1073        .payload.area = {
1074            .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1075            .size = size,
1076            .offset = offset,
1077        },
1078    };
1079
1080    if (fd == -1) {
1081        vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1082    } else {
1083        vmsg.fds[fd_num++] = fd;
1084    }
1085
1086    vmsg.fd_num = fd_num;
1087
1088    if (!has_feature(dev->protocol_features,
1089                     VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
1090        return false;
1091    }
1092
1093    if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
1094        return false;
1095    }
1096
1097    return vu_process_message_reply(dev, &vmsg);
1098}
1099
1100static bool
1101vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1102{
1103    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1104
1105    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1106
1107    if (!vu_check_queue_msg_file(dev, vmsg)) {
1108        return false;
1109    }
1110
1111    if (dev->vq[index].call_fd != -1) {
1112        close(dev->vq[index].call_fd);
1113        dev->vq[index].call_fd = -1;
1114    }
1115
1116    dev->vq[index].call_fd = vmsg->fds[0];
1117
1118    /* in case of I/O hang after reconnecting */
1119    if (eventfd_write(vmsg->fds[0], 1)) {
1120        return -1;
1121    }
1122
1123    DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
1124
1125    return false;
1126}
1127
1128static bool
1129vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1130{
1131    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1132
1133    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1134
1135    if (!vu_check_queue_msg_file(dev, vmsg)) {
1136        return false;
1137    }
1138
1139    if (dev->vq[index].err_fd != -1) {
1140        close(dev->vq[index].err_fd);
1141        dev->vq[index].err_fd = -1;
1142    }
1143
1144    dev->vq[index].err_fd = vmsg->fds[0];
1145
1146    return false;
1147}
1148
1149static bool
1150vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1151{
1152    uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1153                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1154                        1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1155                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD;
1156
1157    if (have_userfault()) {
1158        features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1159    }
1160
1161    if (dev->iface->get_protocol_features) {
1162        features |= dev->iface->get_protocol_features(dev);
1163    }
1164
1165    vmsg->payload.u64 = features;
1166    vmsg->size = sizeof(vmsg->payload.u64);
1167    vmsg->fd_num = 0;
1168
1169    return true;
1170}
1171
1172static bool
1173vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1174{
1175    uint64_t features = vmsg->payload.u64;
1176
1177    DPRINT("u64: 0x%016"PRIx64"\n", features);
1178
1179    dev->protocol_features = vmsg->payload.u64;
1180
1181    if (dev->iface->set_protocol_features) {
1182        dev->iface->set_protocol_features(dev, features);
1183    }
1184
1185    return false;
1186}
1187
1188static bool
1189vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1190{
1191    DPRINT("Function %s() not implemented yet.\n", __func__);
1192    return false;
1193}
1194
1195static bool
1196vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1197{
1198    unsigned int index = vmsg->payload.state.index;
1199    unsigned int enable = vmsg->payload.state.num;
1200
1201    DPRINT("State.index: %d\n", index);
1202    DPRINT("State.enable:   %d\n", enable);
1203
1204    if (index >= VHOST_MAX_NR_VIRTQUEUE) {
1205        vu_panic(dev, "Invalid vring_enable index: %u", index);
1206        return false;
1207    }
1208
1209    dev->vq[index].enable = enable;
1210    return false;
1211}
1212
1213static bool
1214vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1215{
1216    if (vmsg->fd_num != 1) {
1217        vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
1218        return false;
1219    }
1220
1221    if (dev->slave_fd != -1) {
1222        close(dev->slave_fd);
1223    }
1224    dev->slave_fd = vmsg->fds[0];
1225    DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
1226
1227    return false;
1228}
1229
1230static bool
1231vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1232{
1233    int ret = -1;
1234
1235    if (dev->iface->get_config) {
1236        ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1237                                     vmsg->payload.config.size);
1238    }
1239
1240    if (ret) {
1241        /* resize to zero to indicate an error to master */
1242        vmsg->size = 0;
1243    }
1244
1245    return true;
1246}
1247
1248static bool
1249vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1250{
1251    int ret = -1;
1252
1253    if (dev->iface->set_config) {
1254        ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1255                                     vmsg->payload.config.offset,
1256                                     vmsg->payload.config.size,
1257                                     vmsg->payload.config.flags);
1258        if (ret) {
1259            vu_panic(dev, "Set virtio configuration space failed");
1260        }
1261    }
1262
1263    return false;
1264}
1265
1266static bool
1267vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1268{
1269    dev->postcopy_ufd = -1;
1270#ifdef UFFDIO_API
1271    struct uffdio_api api_struct;
1272
1273    dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1274    vmsg->size = 0;
1275#endif
1276
1277    if (dev->postcopy_ufd == -1) {
1278        vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1279        goto out;
1280    }
1281
1282#ifdef UFFDIO_API
1283    api_struct.api = UFFD_API;
1284    api_struct.features = 0;
1285    if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1286        vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1287        close(dev->postcopy_ufd);
1288        dev->postcopy_ufd = -1;
1289        goto out;
1290    }
1291    /* TODO: Stash feature flags somewhere */
1292#endif
1293
1294out:
1295    /* Return a ufd to the QEMU */
1296    vmsg->fd_num = 1;
1297    vmsg->fds[0] = dev->postcopy_ufd;
1298    return true; /* = send a reply */
1299}
1300
1301static bool
1302vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1303{
1304    vmsg->payload.u64 = -1;
1305    vmsg->size = sizeof(vmsg->payload.u64);
1306
1307    if (dev->nregions) {
1308        vu_panic(dev, "Regions already registered at postcopy-listen");
1309        return true;
1310    }
1311    dev->postcopy_listening = true;
1312
1313    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
1314    vmsg->payload.u64 = 0; /* Success */
1315    return true;
1316}
1317
1318static bool
1319vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1320{
1321    DPRINT("%s: Entry\n", __func__);
1322    dev->postcopy_listening = false;
1323    if (dev->postcopy_ufd > 0) {
1324        close(dev->postcopy_ufd);
1325        dev->postcopy_ufd = -1;
1326        DPRINT("%s: Done close\n", __func__);
1327    }
1328
1329    vmsg->fd_num = 0;
1330    vmsg->payload.u64 = 0;
1331    vmsg->size = sizeof(vmsg->payload.u64);
1332    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
1333    DPRINT("%s: exit\n", __func__);
1334    return true;
1335}
1336
1337static inline uint64_t
1338vu_inflight_queue_size(uint16_t queue_size)
1339{
1340    return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1341           sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1342}
1343
1344static bool
1345vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1346{
1347    int fd;
1348    void *addr;
1349    uint64_t mmap_size;
1350    uint16_t num_queues, queue_size;
1351
1352    if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1353        vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1354        vmsg->payload.inflight.mmap_size = 0;
1355        return true;
1356    }
1357
1358    num_queues = vmsg->payload.inflight.num_queues;
1359    queue_size = vmsg->payload.inflight.queue_size;
1360
1361    DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1362    DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1363
1364    mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1365
1366    addr = qemu_memfd_alloc("vhost-inflight", mmap_size,
1367                            F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1368                            &fd, NULL);
1369
1370    if (!addr) {
1371        vu_panic(dev, "Failed to alloc vhost inflight area");
1372        vmsg->payload.inflight.mmap_size = 0;
1373        return true;
1374    }
1375
1376    memset(addr, 0, mmap_size);
1377
1378    dev->inflight_info.addr = addr;
1379    dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1380    dev->inflight_info.fd = vmsg->fds[0] = fd;
1381    vmsg->fd_num = 1;
1382    vmsg->payload.inflight.mmap_offset = 0;
1383
1384    DPRINT("send inflight mmap_size: %"PRId64"\n",
1385           vmsg->payload.inflight.mmap_size);
1386    DPRINT("send inflight mmap offset: %"PRId64"\n",
1387           vmsg->payload.inflight.mmap_offset);
1388
1389    return true;
1390}
1391
1392static bool
1393vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1394{
1395    int fd, i;
1396    uint64_t mmap_size, mmap_offset;
1397    uint16_t num_queues, queue_size;
1398    void *rc;
1399
1400    if (vmsg->fd_num != 1 ||
1401        vmsg->size != sizeof(vmsg->payload.inflight)) {
1402        vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1403                 vmsg->size, vmsg->fd_num);
1404        return false;
1405    }
1406
1407    fd = vmsg->fds[0];
1408    mmap_size = vmsg->payload.inflight.mmap_size;
1409    mmap_offset = vmsg->payload.inflight.mmap_offset;
1410    num_queues = vmsg->payload.inflight.num_queues;
1411    queue_size = vmsg->payload.inflight.queue_size;
1412
1413    DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1414    DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1415    DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1416    DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1417
1418    rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1419              fd, mmap_offset);
1420
1421    if (rc == MAP_FAILED) {
1422        vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1423        return false;
1424    }
1425
1426    if (dev->inflight_info.fd) {
1427        close(dev->inflight_info.fd);
1428    }
1429
1430    if (dev->inflight_info.addr) {
1431        munmap(dev->inflight_info.addr, dev->inflight_info.size);
1432    }
1433
1434    dev->inflight_info.fd = fd;
1435    dev->inflight_info.addr = rc;
1436    dev->inflight_info.size = mmap_size;
1437
1438    for (i = 0; i < num_queues; i++) {
1439        dev->vq[i].inflight = (VuVirtqInflight *)rc;
1440        dev->vq[i].inflight->desc_num = queue_size;
1441        rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1442    }
1443
1444    return false;
1445}
1446
1447static bool
1448vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
1449{
1450    int do_reply = 0;
1451
1452    /* Print out generic part of the request. */
1453    DPRINT("================ Vhost user message ================\n");
1454    DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
1455           vmsg->request);
1456    DPRINT("Flags:   0x%x\n", vmsg->flags);
1457    DPRINT("Size:    %d\n", vmsg->size);
1458
1459    if (vmsg->fd_num) {
1460        int i;
1461        DPRINT("Fds:");
1462        for (i = 0; i < vmsg->fd_num; i++) {
1463            DPRINT(" %d", vmsg->fds[i]);
1464        }
1465        DPRINT("\n");
1466    }
1467
1468    if (dev->iface->process_msg &&
1469        dev->iface->process_msg(dev, vmsg, &do_reply)) {
1470        return do_reply;
1471    }
1472
1473    switch (vmsg->request) {
1474    case VHOST_USER_GET_FEATURES:
1475        return vu_get_features_exec(dev, vmsg);
1476    case VHOST_USER_SET_FEATURES:
1477        return vu_set_features_exec(dev, vmsg);
1478    case VHOST_USER_GET_PROTOCOL_FEATURES:
1479        return vu_get_protocol_features_exec(dev, vmsg);
1480    case VHOST_USER_SET_PROTOCOL_FEATURES:
1481        return vu_set_protocol_features_exec(dev, vmsg);
1482    case VHOST_USER_SET_OWNER:
1483        return vu_set_owner_exec(dev, vmsg);
1484    case VHOST_USER_RESET_OWNER:
1485        return vu_reset_device_exec(dev, vmsg);
1486    case VHOST_USER_SET_MEM_TABLE:
1487        return vu_set_mem_table_exec(dev, vmsg);
1488    case VHOST_USER_SET_LOG_BASE:
1489        return vu_set_log_base_exec(dev, vmsg);
1490    case VHOST_USER_SET_LOG_FD:
1491        return vu_set_log_fd_exec(dev, vmsg);
1492    case VHOST_USER_SET_VRING_NUM:
1493        return vu_set_vring_num_exec(dev, vmsg);
1494    case VHOST_USER_SET_VRING_ADDR:
1495        return vu_set_vring_addr_exec(dev, vmsg);
1496    case VHOST_USER_SET_VRING_BASE:
1497        return vu_set_vring_base_exec(dev, vmsg);
1498    case VHOST_USER_GET_VRING_BASE:
1499        return vu_get_vring_base_exec(dev, vmsg);
1500    case VHOST_USER_SET_VRING_KICK:
1501        return vu_set_vring_kick_exec(dev, vmsg);
1502    case VHOST_USER_SET_VRING_CALL:
1503        return vu_set_vring_call_exec(dev, vmsg);
1504    case VHOST_USER_SET_VRING_ERR:
1505        return vu_set_vring_err_exec(dev, vmsg);
1506    case VHOST_USER_GET_QUEUE_NUM:
1507        return vu_get_queue_num_exec(dev, vmsg);
1508    case VHOST_USER_SET_VRING_ENABLE:
1509        return vu_set_vring_enable_exec(dev, vmsg);
1510    case VHOST_USER_SET_SLAVE_REQ_FD:
1511        return vu_set_slave_req_fd(dev, vmsg);
1512    case VHOST_USER_GET_CONFIG:
1513        return vu_get_config(dev, vmsg);
1514    case VHOST_USER_SET_CONFIG:
1515        return vu_set_config(dev, vmsg);
1516    case VHOST_USER_NONE:
1517        /* if you need processing before exit, override iface->process_msg */
1518        exit(0);
1519    case VHOST_USER_POSTCOPY_ADVISE:
1520        return vu_set_postcopy_advise(dev, vmsg);
1521    case VHOST_USER_POSTCOPY_LISTEN:
1522        return vu_set_postcopy_listen(dev, vmsg);
1523    case VHOST_USER_POSTCOPY_END:
1524        return vu_set_postcopy_end(dev, vmsg);
1525    case VHOST_USER_GET_INFLIGHT_FD:
1526        return vu_get_inflight_fd(dev, vmsg);
1527    case VHOST_USER_SET_INFLIGHT_FD:
1528        return vu_set_inflight_fd(dev, vmsg);
1529    default:
1530        vmsg_close_fds(vmsg);
1531        vu_panic(dev, "Unhandled request: %d", vmsg->request);
1532    }
1533
1534    return false;
1535}
1536
1537bool
1538vu_dispatch(VuDev *dev)
1539{
1540    VhostUserMsg vmsg = { 0, };
1541    int reply_requested;
1542    bool success = false;
1543
1544    if (!vu_message_read(dev, dev->sock, &vmsg)) {
1545        goto end;
1546    }
1547
1548    reply_requested = vu_process_message(dev, &vmsg);
1549    if (!reply_requested) {
1550        success = true;
1551        goto end;
1552    }
1553
1554    if (!vu_send_reply(dev, dev->sock, &vmsg)) {
1555        goto end;
1556    }
1557
1558    success = true;
1559
1560end:
1561    free(vmsg.data);
1562    return success;
1563}
1564
1565void
1566vu_deinit(VuDev *dev)
1567{
1568    int i;
1569
1570    for (i = 0; i < dev->nregions; i++) {
1571        VuDevRegion *r = &dev->regions[i];
1572        void *m = (void *) (uintptr_t) r->mmap_addr;
1573        if (m != MAP_FAILED) {
1574            munmap(m, r->size + r->mmap_offset);
1575        }
1576    }
1577    dev->nregions = 0;
1578
1579    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
1580        VuVirtq *vq = &dev->vq[i];
1581
1582        if (vq->call_fd != -1) {
1583            close(vq->call_fd);
1584            vq->call_fd = -1;
1585        }
1586
1587        if (vq->kick_fd != -1) {
1588            close(vq->kick_fd);
1589            vq->kick_fd = -1;
1590        }
1591
1592        if (vq->err_fd != -1) {
1593            close(vq->err_fd);
1594            vq->err_fd = -1;
1595        }
1596
1597        if (vq->resubmit_list) {
1598            free(vq->resubmit_list);
1599            vq->resubmit_list = NULL;
1600        }
1601
1602        vq->inflight = NULL;
1603    }
1604
1605    if (dev->inflight_info.addr) {
1606        munmap(dev->inflight_info.addr, dev->inflight_info.size);
1607        dev->inflight_info.addr = NULL;
1608    }
1609
1610    if (dev->inflight_info.fd > 0) {
1611        close(dev->inflight_info.fd);
1612        dev->inflight_info.fd = -1;
1613    }
1614
1615    vu_close_log(dev);
1616    if (dev->slave_fd != -1) {
1617        close(dev->slave_fd);
1618        dev->slave_fd = -1;
1619    }
1620
1621    if (dev->sock != -1) {
1622        close(dev->sock);
1623    }
1624}
1625
1626void
1627vu_init(VuDev *dev,
1628        int socket,
1629        vu_panic_cb panic,
1630        vu_set_watch_cb set_watch,
1631        vu_remove_watch_cb remove_watch,
1632        const VuDevIface *iface)
1633{
1634    int i;
1635
1636    assert(socket >= 0);
1637    assert(set_watch);
1638    assert(remove_watch);
1639    assert(iface);
1640    assert(panic);
1641
1642    memset(dev, 0, sizeof(*dev));
1643
1644    dev->sock = socket;
1645    dev->panic = panic;
1646    dev->set_watch = set_watch;
1647    dev->remove_watch = remove_watch;
1648    dev->iface = iface;
1649    dev->log_call_fd = -1;
1650    dev->slave_fd = -1;
1651    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
1652        dev->vq[i] = (VuVirtq) {
1653            .call_fd = -1, .kick_fd = -1, .err_fd = -1,
1654            .notification = true,
1655        };
1656    }
1657}
1658
1659VuVirtq *
1660vu_get_queue(VuDev *dev, int qidx)
1661{
1662    assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
1663    return &dev->vq[qidx];
1664}
1665
1666bool
1667vu_queue_enabled(VuDev *dev, VuVirtq *vq)
1668{
1669    return vq->enable;
1670}
1671
1672bool
1673vu_queue_started(const VuDev *dev, const VuVirtq *vq)
1674{
1675    return vq->started;
1676}
1677
1678static inline uint16_t
1679vring_avail_flags(VuVirtq *vq)
1680{
1681    return vq->vring.avail->flags;
1682}
1683
1684static inline uint16_t
1685vring_avail_idx(VuVirtq *vq)
1686{
1687    vq->shadow_avail_idx = vq->vring.avail->idx;
1688
1689    return vq->shadow_avail_idx;
1690}
1691
1692static inline uint16_t
1693vring_avail_ring(VuVirtq *vq, int i)
1694{
1695    return vq->vring.avail->ring[i];
1696}
1697
1698static inline uint16_t
1699vring_get_used_event(VuVirtq *vq)
1700{
1701    return vring_avail_ring(vq, vq->vring.num);
1702}
1703
1704static int
1705virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
1706{
1707    uint16_t num_heads = vring_avail_idx(vq) - idx;
1708
1709    /* Check it isn't doing very strange things with descriptor numbers. */
1710    if (num_heads > vq->vring.num) {
1711        vu_panic(dev, "Guest moved used index from %u to %u",
1712                 idx, vq->shadow_avail_idx);
1713        return -1;
1714    }
1715    if (num_heads) {
1716        /* On success, callers read a descriptor at vq->last_avail_idx.
1717         * Make sure descriptor read does not bypass avail index read. */
1718        smp_rmb();
1719    }
1720
1721    return num_heads;
1722}
1723
1724static bool
1725virtqueue_get_head(VuDev *dev, VuVirtq *vq,
1726                   unsigned int idx, unsigned int *head)
1727{
1728    /* Grab the next descriptor number they're advertising, and increment
1729     * the index we've seen. */
1730    *head = vring_avail_ring(vq, idx % vq->vring.num);
1731
1732    /* If their number is silly, that's a fatal mistake. */
1733    if (*head >= vq->vring.num) {
1734        vu_panic(dev, "Guest says index %u is available", head);
1735        return false;
1736    }
1737
1738    return true;
1739}
1740
1741static int
1742virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
1743                             uint64_t addr, size_t len)
1744{
1745    struct vring_desc *ori_desc;
1746    uint64_t read_len;
1747
1748    if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
1749        return -1;
1750    }
1751
1752    if (len == 0) {
1753        return -1;
1754    }
1755
1756    while (len) {
1757        read_len = len;
1758        ori_desc = vu_gpa_to_va(dev, &read_len, addr);
1759        if (!ori_desc) {
1760            return -1;
1761        }
1762
1763        memcpy(desc, ori_desc, read_len);
1764        len -= read_len;
1765        addr += read_len;
1766        desc += read_len;
1767    }
1768
1769    return 0;
1770}
1771
1772enum {
1773    VIRTQUEUE_READ_DESC_ERROR = -1,
1774    VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1775    VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1776};
1777
1778static int
1779virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
1780                         int i, unsigned int max, unsigned int *next)
1781{
1782    /* If this descriptor says it doesn't chain, we're done. */
1783    if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
1784        return VIRTQUEUE_READ_DESC_DONE;
1785    }
1786
1787    /* Check they're not leading us off end of descriptors. */
1788    *next = desc[i].next;
1789    /* Make sure compiler knows to grab that: we don't want it changing! */
1790    smp_wmb();
1791
1792    if (*next >= max) {
1793        vu_panic(dev, "Desc next is %u", next);
1794        return VIRTQUEUE_READ_DESC_ERROR;
1795    }
1796
1797    return VIRTQUEUE_READ_DESC_MORE;
1798}
1799
1800void
1801vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
1802                         unsigned int *out_bytes,
1803                         unsigned max_in_bytes, unsigned max_out_bytes)
1804{
1805    unsigned int idx;
1806    unsigned int total_bufs, in_total, out_total;
1807    int rc;
1808
1809    idx = vq->last_avail_idx;
1810
1811    total_bufs = in_total = out_total = 0;
1812    if (unlikely(dev->broken) ||
1813        unlikely(!vq->vring.avail)) {
1814        goto done;
1815    }
1816
1817    while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
1818        unsigned int max, desc_len, num_bufs, indirect = 0;
1819        uint64_t desc_addr, read_len;
1820        struct vring_desc *desc;
1821        struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
1822        unsigned int i;
1823
1824        max = vq->vring.num;
1825        num_bufs = total_bufs;
1826        if (!virtqueue_get_head(dev, vq, idx++, &i)) {
1827            goto err;
1828        }
1829        desc = vq->vring.desc;
1830
1831        if (desc[i].flags & VRING_DESC_F_INDIRECT) {
1832            if (desc[i].len % sizeof(struct vring_desc)) {
1833                vu_panic(dev, "Invalid size for indirect buffer table");
1834                goto err;
1835            }
1836
1837            /* If we've got too many, that implies a descriptor loop. */
1838            if (num_bufs >= max) {
1839                vu_panic(dev, "Looped descriptor");
1840                goto err;
1841            }
1842
1843            /* loop over the indirect descriptor table */
1844            indirect = 1;
1845            desc_addr = desc[i].addr;
1846            desc_len = desc[i].len;
1847            max = desc_len / sizeof(struct vring_desc);
1848            read_len = desc_len;
1849            desc = vu_gpa_to_va(dev, &read_len, desc_addr);
1850            if (unlikely(desc && read_len != desc_len)) {
1851                /* Failed to use zero copy */
1852                desc = NULL;
1853                if (!virtqueue_read_indirect_desc(dev, desc_buf,
1854                                                  desc_addr,
1855                                                  desc_len)) {
1856                    desc = desc_buf;
1857                }
1858            }
1859            if (!desc) {
1860                vu_panic(dev, "Invalid indirect buffer table");
1861                goto err;
1862            }
1863            num_bufs = i = 0;
1864        }
1865
1866        do {
1867            /* If we've got too many, that implies a descriptor loop. */
1868            if (++num_bufs > max) {
1869                vu_panic(dev, "Looped descriptor");
1870                goto err;
1871            }
1872
1873            if (desc[i].flags & VRING_DESC_F_WRITE) {
1874                in_total += desc[i].len;
1875            } else {
1876                out_total += desc[i].len;
1877            }
1878            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1879                goto done;
1880            }
1881            rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
1882        } while (rc == VIRTQUEUE_READ_DESC_MORE);
1883
1884        if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1885            goto err;
1886        }
1887
1888        if (!indirect) {
1889            total_bufs = num_bufs;
1890        } else {
1891            total_bufs++;
1892        }
1893    }
1894    if (rc < 0) {
1895        goto err;
1896    }
1897done:
1898    if (in_bytes) {
1899        *in_bytes = in_total;
1900    }
1901    if (out_bytes) {
1902        *out_bytes = out_total;
1903    }
1904    return;
1905
1906err:
1907    in_total = out_total = 0;
1908    goto done;
1909}
1910
1911bool
1912vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
1913                     unsigned int out_bytes)
1914{
1915    unsigned int in_total, out_total;
1916
1917    vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
1918                             in_bytes, out_bytes);
1919
1920    return in_bytes <= in_total && out_bytes <= out_total;
1921}
1922
1923/* Fetch avail_idx from VQ memory only when we really need to know if
1924 * guest has added some buffers. */
1925bool
1926vu_queue_empty(VuDev *dev, VuVirtq *vq)
1927{
1928    if (unlikely(dev->broken) ||
1929        unlikely(!vq->vring.avail)) {
1930        return true;
1931    }
1932
1933    if (vq->shadow_avail_idx != vq->last_avail_idx) {
1934        return false;
1935    }
1936
1937    return vring_avail_idx(vq) == vq->last_avail_idx;
1938}
1939
1940static bool
1941vring_notify(VuDev *dev, VuVirtq *vq)
1942{
1943    uint16_t old, new;
1944    bool v;
1945
1946    /* We need to expose used array entries before checking used event. */
1947    smp_mb();
1948
1949    /* Always notify when queue is empty (when feature acknowledge) */
1950    if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1951        !vq->inuse && vu_queue_empty(dev, vq)) {
1952        return true;
1953    }
1954
1955    if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1956        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1957    }
1958
1959    v = vq->signalled_used_valid;
1960    vq->signalled_used_valid = true;
1961    old = vq->signalled_used;
1962    new = vq->signalled_used = vq->used_idx;
1963    return !v || vring_need_event(vring_get_used_event(vq), new, old);
1964}
1965
1966void
1967vu_queue_notify(VuDev *dev, VuVirtq *vq)
1968{
1969    if (unlikely(dev->broken) ||
1970        unlikely(!vq->vring.avail)) {
1971        return;
1972    }
1973
1974    if (!vring_notify(dev, vq)) {
1975        DPRINT("skipped notify...\n");
1976        return;
1977    }
1978
1979    if (eventfd_write(vq->call_fd, 1) < 0) {
1980        vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
1981    }
1982}
1983
1984static inline void
1985vring_used_flags_set_bit(VuVirtq *vq, int mask)
1986{
1987    uint16_t *flags;
1988
1989    flags = (uint16_t *)((char*)vq->vring.used +
1990                         offsetof(struct vring_used, flags));
1991    *flags |= mask;
1992}
1993
1994static inline void
1995vring_used_flags_unset_bit(VuVirtq *vq, int mask)
1996{
1997    uint16_t *flags;
1998
1999    flags = (uint16_t *)((char*)vq->vring.used +
2000                         offsetof(struct vring_used, flags));
2001    *flags &= ~mask;
2002}
2003
2004static inline void
2005vring_set_avail_event(VuVirtq *vq, uint16_t val)
2006{
2007    if (!vq->notification) {
2008        return;
2009    }
2010
2011    *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
2012}
2013
2014void
2015vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2016{
2017    vq->notification = enable;
2018    if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2019        vring_set_avail_event(vq, vring_avail_idx(vq));
2020    } else if (enable) {
2021        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2022    } else {
2023        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2024    }
2025    if (enable) {
2026        /* Expose avail event/used flags before caller checks the avail idx. */
2027        smp_mb();
2028    }
2029}
2030
2031static void
2032virtqueue_map_desc(VuDev *dev,
2033                   unsigned int *p_num_sg, struct iovec *iov,
2034                   unsigned int max_num_sg, bool is_write,
2035                   uint64_t pa, size_t sz)
2036{
2037    unsigned num_sg = *p_num_sg;
2038
2039    assert(num_sg <= max_num_sg);
2040
2041    if (!sz) {
2042        vu_panic(dev, "virtio: zero sized buffers are not allowed");
2043        return;
2044    }
2045
2046    while (sz) {
2047        uint64_t len = sz;
2048
2049        if (num_sg == max_num_sg) {
2050            vu_panic(dev, "virtio: too many descriptors in indirect table");
2051            return;
2052        }
2053
2054        iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2055        if (iov[num_sg].iov_base == NULL) {
2056            vu_panic(dev, "virtio: invalid address for buffers");
2057            return;
2058        }
2059        iov[num_sg].iov_len = len;
2060        num_sg++;
2061        sz -= len;
2062        pa += len;
2063    }
2064
2065    *p_num_sg = num_sg;
2066}
2067
2068static void *
2069virtqueue_alloc_element(size_t sz,
2070                                     unsigned out_num, unsigned in_num)
2071{
2072    VuVirtqElement *elem;
2073    size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2074    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2075    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2076
2077    assert(sz >= sizeof(VuVirtqElement));
2078    elem = malloc(out_sg_end);
2079    elem->out_num = out_num;
2080    elem->in_num = in_num;
2081    elem->in_sg = (void *)elem + in_sg_ofs;
2082    elem->out_sg = (void *)elem + out_sg_ofs;
2083    return elem;
2084}
2085
2086static void *
2087vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2088{
2089    struct vring_desc *desc = vq->vring.desc;
2090    uint64_t desc_addr, read_len;
2091    unsigned int desc_len;
2092    unsigned int max = vq->vring.num;
2093    unsigned int i = idx;
2094    VuVirtqElement *elem;
2095    unsigned int out_num = 0, in_num = 0;
2096    struct iovec iov[VIRTQUEUE_MAX_SIZE];
2097    struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2098    int rc;
2099
2100    if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2101        if (desc[i].len % sizeof(struct vring_desc)) {
2102            vu_panic(dev, "Invalid size for indirect buffer table");
2103        }
2104
2105        /* loop over the indirect descriptor table */
2106        desc_addr = desc[i].addr;
2107        desc_len = desc[i].len;
2108        max = desc_len / sizeof(struct vring_desc);
2109        read_len = desc_len;
2110        desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2111        if (unlikely(desc && read_len != desc_len)) {
2112            /* Failed to use zero copy */
2113            desc = NULL;
2114            if (!virtqueue_read_indirect_desc(dev, desc_buf,
2115                                              desc_addr,
2116                                              desc_len)) {
2117                desc = desc_buf;
2118            }
2119        }
2120        if (!desc) {
2121            vu_panic(dev, "Invalid indirect buffer table");
2122            return NULL;
2123        }
2124        i = 0;
2125    }
2126
2127    /* Collect all the descriptors */
2128    do {
2129        if (desc[i].flags & VRING_DESC_F_WRITE) {
2130            virtqueue_map_desc(dev, &in_num, iov + out_num,
2131                               VIRTQUEUE_MAX_SIZE - out_num, true,
2132                               desc[i].addr, desc[i].len);
2133        } else {
2134            if (in_num) {
2135                vu_panic(dev, "Incorrect order for descriptors");
2136                return NULL;
2137            }
2138            virtqueue_map_desc(dev, &out_num, iov,
2139                               VIRTQUEUE_MAX_SIZE, false,
2140                               desc[i].addr, desc[i].len);
2141        }
2142
2143        /* If we've got too many, that implies a descriptor loop. */
2144        if ((in_num + out_num) > max) {
2145            vu_panic(dev, "Looped descriptor");
2146        }
2147        rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2148    } while (rc == VIRTQUEUE_READ_DESC_MORE);
2149
2150    if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2151        vu_panic(dev, "read descriptor error");
2152        return NULL;
2153    }
2154
2155    /* Now copy what we have collected and mapped */
2156    elem = virtqueue_alloc_element(sz, out_num, in_num);
2157    elem->index = idx;
2158    for (i = 0; i < out_num; i++) {
2159        elem->out_sg[i] = iov[i];
2160    }
2161    for (i = 0; i < in_num; i++) {
2162        elem->in_sg[i] = iov[out_num + i];
2163    }
2164
2165    return elem;
2166}
2167
2168static int
2169vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2170{
2171    if (!has_feature(dev->protocol_features,
2172        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2173        return 0;
2174    }
2175
2176    if (unlikely(!vq->inflight)) {
2177        return -1;
2178    }
2179
2180    vq->inflight->desc[desc_idx].counter = vq->counter++;
2181    vq->inflight->desc[desc_idx].inflight = 1;
2182
2183    return 0;
2184}
2185
2186static int
2187vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2188{
2189    if (!has_feature(dev->protocol_features,
2190        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2191        return 0;
2192    }
2193
2194    if (unlikely(!vq->inflight)) {
2195        return -1;
2196    }
2197
2198    vq->inflight->last_batch_head = desc_idx;
2199
2200    return 0;
2201}
2202
2203static int
2204vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2205{
2206    if (!has_feature(dev->protocol_features,
2207        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2208        return 0;
2209    }
2210
2211    if (unlikely(!vq->inflight)) {
2212        return -1;
2213    }
2214
2215    barrier();
2216
2217    vq->inflight->desc[desc_idx].inflight = 0;
2218
2219    barrier();
2220
2221    vq->inflight->used_idx = vq->used_idx;
2222
2223    return 0;
2224}
2225
2226void *
2227vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2228{
2229    int i;
2230    unsigned int head;
2231    VuVirtqElement *elem;
2232
2233    if (unlikely(dev->broken) ||
2234        unlikely(!vq->vring.avail)) {
2235        return NULL;
2236    }
2237
2238    if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2239        i = (--vq->resubmit_num);
2240        elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2241
2242        if (!vq->resubmit_num) {
2243            free(vq->resubmit_list);
2244            vq->resubmit_list = NULL;
2245        }
2246
2247        return elem;
2248    }
2249
2250    if (vu_queue_empty(dev, vq)) {
2251        return NULL;
2252    }
2253    /*
2254     * Needed after virtio_queue_empty(), see comment in
2255     * virtqueue_num_heads().
2256     */
2257    smp_rmb();
2258
2259    if (vq->inuse >= vq->vring.num) {
2260        vu_panic(dev, "Virtqueue size exceeded");
2261        return NULL;
2262    }
2263
2264    if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2265        return NULL;
2266    }
2267
2268    if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2269        vring_set_avail_event(vq, vq->last_avail_idx);
2270    }
2271
2272    elem = vu_queue_map_desc(dev, vq, head, sz);
2273
2274    if (!elem) {
2275        return NULL;
2276    }
2277
2278    vq->inuse++;
2279
2280    vu_queue_inflight_get(dev, vq, head);
2281
2282    return elem;
2283}
2284
2285static void
2286vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2287                        size_t len)
2288{
2289    vq->inuse--;
2290    /* unmap, when DMA support is added */
2291}
2292
2293void
2294vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2295               size_t len)
2296{
2297    vq->last_avail_idx--;
2298    vu_queue_detach_element(dev, vq, elem, len);
2299}
2300
2301bool
2302vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2303{
2304    if (num > vq->inuse) {
2305        return false;
2306    }
2307    vq->last_avail_idx -= num;
2308    vq->inuse -= num;
2309    return true;
2310}
2311
2312static inline
2313void vring_used_write(VuDev *dev, VuVirtq *vq,
2314                      struct vring_used_elem *uelem, int i)
2315{
2316    struct vring_used *used = vq->vring.used;
2317
2318    used->ring[i] = *uelem;
2319    vu_log_write(dev, vq->vring.log_guest_addr +
2320                 offsetof(struct vring_used, ring[i]),
2321                 sizeof(used->ring[i]));
2322}
2323
2324
2325static void
2326vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
2327                  const VuVirtqElement *elem,
2328                  unsigned int len)
2329{
2330    struct vring_desc *desc = vq->vring.desc;
2331    unsigned int i, max, min, desc_len;
2332    uint64_t desc_addr, read_len;
2333    struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2334    unsigned num_bufs = 0;
2335
2336    max = vq->vring.num;
2337    i = elem->index;
2338
2339    if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2340        if (desc[i].len % sizeof(struct vring_desc)) {
2341            vu_panic(dev, "Invalid size for indirect buffer table");
2342        }
2343
2344        /* loop over the indirect descriptor table */
2345        desc_addr = desc[i].addr;
2346        desc_len = desc[i].len;
2347        max = desc_len / sizeof(struct vring_desc);
2348        read_len = desc_len;
2349        desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2350        if (unlikely(desc && read_len != desc_len)) {
2351            /* Failed to use zero copy */
2352            desc = NULL;
2353            if (!virtqueue_read_indirect_desc(dev, desc_buf,
2354                                              desc_addr,
2355                                              desc_len)) {
2356                desc = desc_buf;
2357            }
2358        }
2359        if (!desc) {
2360            vu_panic(dev, "Invalid indirect buffer table");
2361            return;
2362        }
2363        i = 0;
2364    }
2365
2366    do {
2367        if (++num_bufs > max) {
2368            vu_panic(dev, "Looped descriptor");
2369            return;
2370        }
2371
2372        if (desc[i].flags & VRING_DESC_F_WRITE) {
2373            min = MIN(desc[i].len, len);
2374            vu_log_write(dev, desc[i].addr, min);
2375            len -= min;
2376        }
2377
2378    } while (len > 0 &&
2379             (virtqueue_read_next_desc(dev, desc, i, max, &i)
2380              == VIRTQUEUE_READ_DESC_MORE));
2381}
2382
2383void
2384vu_queue_fill(VuDev *dev, VuVirtq *vq,
2385              const VuVirtqElement *elem,
2386              unsigned int len, unsigned int idx)
2387{
2388    struct vring_used_elem uelem;
2389
2390    if (unlikely(dev->broken) ||
2391        unlikely(!vq->vring.avail)) {
2392        return;
2393    }
2394
2395    vu_log_queue_fill(dev, vq, elem, len);
2396
2397    idx = (idx + vq->used_idx) % vq->vring.num;
2398
2399    uelem.id = elem->index;
2400    uelem.len = len;
2401    vring_used_write(dev, vq, &uelem, idx);
2402}
2403
2404static inline
2405void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
2406{
2407    vq->vring.used->idx = val;
2408    vu_log_write(dev,
2409                 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
2410                 sizeof(vq->vring.used->idx));
2411
2412    vq->used_idx = val;
2413}
2414
2415void
2416vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
2417{
2418    uint16_t old, new;
2419
2420    if (unlikely(dev->broken) ||
2421        unlikely(!vq->vring.avail)) {
2422        return;
2423    }
2424
2425    /* Make sure buffer is written before we update index. */
2426    smp_wmb();
2427
2428    old = vq->used_idx;
2429    new = old + count;
2430    vring_used_idx_set(dev, vq, new);
2431    vq->inuse -= count;
2432    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
2433        vq->signalled_used_valid = false;
2434    }
2435}
2436
2437void
2438vu_queue_push(VuDev *dev, VuVirtq *vq,
2439              const VuVirtqElement *elem, unsigned int len)
2440{
2441    vu_queue_fill(dev, vq, elem, len, 0);
2442    vu_queue_inflight_pre_put(dev, vq, elem->index);
2443    vu_queue_flush(dev, vq, 1);
2444    vu_queue_inflight_post_put(dev, vq, elem->index);
2445}
2446