qemu/contrib/libvhost-user/libvhost-user.c
<<
>>
Prefs
   1/*
   2 * Vhost User library
   3 *
   4 * Copyright IBM, Corp. 2007
   5 * Copyright (c) 2016 Red Hat, Inc.
   6 *
   7 * Authors:
   8 *  Anthony Liguori <aliguori@us.ibm.com>
   9 *  Marc-André Lureau <mlureau@redhat.com>
  10 *  Victor Kaplansky <victork@redhat.com>
  11 *
  12 * This work is licensed under the terms of the GNU GPL, version 2 or
  13 * later.  See the COPYING file in the top-level directory.
  14 */
  15
  16/* this code avoids GLib dependency */
  17#include <stdlib.h>
  18#include <stdio.h>
  19#include <unistd.h>
  20#include <stdarg.h>
  21#include <errno.h>
  22#include <string.h>
  23#include <assert.h>
  24#include <inttypes.h>
  25#include <sys/types.h>
  26#include <sys/socket.h>
  27#include <sys/eventfd.h>
  28#include <sys/mman.h>
  29#include "qemu/compiler.h"
  30
  31#if defined(__linux__)
  32#include <sys/syscall.h>
  33#include <fcntl.h>
  34#include <sys/ioctl.h>
  35#include <linux/vhost.h>
  36
  37#ifdef __NR_userfaultfd
  38#include <linux/userfaultfd.h>
  39#endif
  40
  41#endif
  42
  43#include "qemu/atomic.h"
  44#include "qemu/osdep.h"
  45#include "qemu/memfd.h"
  46
  47#include "libvhost-user.h"
  48
  49/* usually provided by GLib */
  50#ifndef MIN
  51#define MIN(x, y) ({                            \
  52            typeof(x) _min1 = (x);              \
  53            typeof(y) _min2 = (y);              \
  54            (void) (&_min1 == &_min2);          \
  55            _min1 < _min2 ? _min1 : _min2; })
  56#endif
  57
  58/* Round number down to multiple */
  59#define ALIGN_DOWN(n, m) ((n) / (m) * (m))
  60
  61/* Round number up to multiple */
  62#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
  63
  64/* Align each region to cache line size in inflight buffer */
  65#define INFLIGHT_ALIGNMENT 64
  66
  67/* The version of inflight buffer */
  68#define INFLIGHT_VERSION 1
  69
  70#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
  71
  72/* The version of the protocol we support */
  73#define VHOST_USER_VERSION 1
  74#define LIBVHOST_USER_DEBUG 0
  75
  76#define DPRINT(...)                             \
  77    do {                                        \
  78        if (LIBVHOST_USER_DEBUG) {              \
  79            fprintf(stderr, __VA_ARGS__);        \
  80        }                                       \
  81    } while (0)
  82
  83static inline
  84bool has_feature(uint64_t features, unsigned int fbit)
  85{
  86    assert(fbit < 64);
  87    return !!(features & (1ULL << fbit));
  88}
  89
  90static inline
  91bool vu_has_feature(VuDev *dev,
  92                    unsigned int fbit)
  93{
  94    return has_feature(dev->features, fbit);
  95}
  96
  97static const char *
  98vu_request_to_string(unsigned int req)
  99{
 100#define REQ(req) [req] = #req
 101    static const char *vu_request_str[] = {
 102        REQ(VHOST_USER_NONE),
 103        REQ(VHOST_USER_GET_FEATURES),
 104        REQ(VHOST_USER_SET_FEATURES),
 105        REQ(VHOST_USER_SET_OWNER),
 106        REQ(VHOST_USER_RESET_OWNER),
 107        REQ(VHOST_USER_SET_MEM_TABLE),
 108        REQ(VHOST_USER_SET_LOG_BASE),
 109        REQ(VHOST_USER_SET_LOG_FD),
 110        REQ(VHOST_USER_SET_VRING_NUM),
 111        REQ(VHOST_USER_SET_VRING_ADDR),
 112        REQ(VHOST_USER_SET_VRING_BASE),
 113        REQ(VHOST_USER_GET_VRING_BASE),
 114        REQ(VHOST_USER_SET_VRING_KICK),
 115        REQ(VHOST_USER_SET_VRING_CALL),
 116        REQ(VHOST_USER_SET_VRING_ERR),
 117        REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
 118        REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
 119        REQ(VHOST_USER_GET_QUEUE_NUM),
 120        REQ(VHOST_USER_SET_VRING_ENABLE),
 121        REQ(VHOST_USER_SEND_RARP),
 122        REQ(VHOST_USER_NET_SET_MTU),
 123        REQ(VHOST_USER_SET_SLAVE_REQ_FD),
 124        REQ(VHOST_USER_IOTLB_MSG),
 125        REQ(VHOST_USER_SET_VRING_ENDIAN),
 126        REQ(VHOST_USER_GET_CONFIG),
 127        REQ(VHOST_USER_SET_CONFIG),
 128        REQ(VHOST_USER_POSTCOPY_ADVISE),
 129        REQ(VHOST_USER_POSTCOPY_LISTEN),
 130        REQ(VHOST_USER_POSTCOPY_END),
 131        REQ(VHOST_USER_GET_INFLIGHT_FD),
 132        REQ(VHOST_USER_SET_INFLIGHT_FD),
 133        REQ(VHOST_USER_MAX),
 134    };
 135#undef REQ
 136
 137    if (req < VHOST_USER_MAX) {
 138        return vu_request_str[req];
 139    } else {
 140        return "unknown";
 141    }
 142}
 143
 144static void
 145vu_panic(VuDev *dev, const char *msg, ...)
 146{
 147    char *buf = NULL;
 148    va_list ap;
 149
 150    va_start(ap, msg);
 151    if (vasprintf(&buf, msg, ap) < 0) {
 152        buf = NULL;
 153    }
 154    va_end(ap);
 155
 156    dev->broken = true;
 157    dev->panic(dev, buf);
 158    free(buf);
 159
 160    /* FIXME: find a way to call virtio_error? */
 161}
 162
 163/* Translate guest physical address to our virtual address.  */
 164void *
 165vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
 166{
 167    int i;
 168
 169    if (*plen == 0) {
 170        return NULL;
 171    }
 172
 173    /* Find matching memory region.  */
 174    for (i = 0; i < dev->nregions; i++) {
 175        VuDevRegion *r = &dev->regions[i];
 176
 177        if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
 178            if ((guest_addr + *plen) > (r->gpa + r->size)) {
 179                *plen = r->gpa + r->size - guest_addr;
 180            }
 181            return (void *)(uintptr_t)
 182                guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
 183        }
 184    }
 185
 186    return NULL;
 187}
 188
 189/* Translate qemu virtual address to our virtual address.  */
 190static void *
 191qva_to_va(VuDev *dev, uint64_t qemu_addr)
 192{
 193    int i;
 194
 195    /* Find matching memory region.  */
 196    for (i = 0; i < dev->nregions; i++) {
 197        VuDevRegion *r = &dev->regions[i];
 198
 199        if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
 200            return (void *)(uintptr_t)
 201                qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
 202        }
 203    }
 204
 205    return NULL;
 206}
 207
 208static void
 209vmsg_close_fds(VhostUserMsg *vmsg)
 210{
 211    int i;
 212
 213    for (i = 0; i < vmsg->fd_num; i++) {
 214        close(vmsg->fds[i]);
 215    }
 216}
 217
 218/* A test to see if we have userfault available */
 219static bool
 220have_userfault(void)
 221{
 222#if defined(__linux__) && defined(__NR_userfaultfd) &&\
 223        defined(UFFD_FEATURE_MISSING_SHMEM) &&\
 224        defined(UFFD_FEATURE_MISSING_HUGETLBFS)
 225    /* Now test the kernel we're running on really has the features */
 226    int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 227    struct uffdio_api api_struct;
 228    if (ufd < 0) {
 229        return false;
 230    }
 231
 232    api_struct.api = UFFD_API;
 233    api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
 234                          UFFD_FEATURE_MISSING_HUGETLBFS;
 235    if (ioctl(ufd, UFFDIO_API, &api_struct)) {
 236        close(ufd);
 237        return false;
 238    }
 239    close(ufd);
 240    return true;
 241
 242#else
 243    return false;
 244#endif
 245}
 246
 247static bool
 248vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 249{
 250    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
 251    struct iovec iov = {
 252        .iov_base = (char *)vmsg,
 253        .iov_len = VHOST_USER_HDR_SIZE,
 254    };
 255    struct msghdr msg = {
 256        .msg_iov = &iov,
 257        .msg_iovlen = 1,
 258        .msg_control = control,
 259        .msg_controllen = sizeof(control),
 260    };
 261    size_t fd_size;
 262    struct cmsghdr *cmsg;
 263    int rc;
 264
 265    do {
 266        rc = recvmsg(conn_fd, &msg, 0);
 267    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 268
 269    if (rc < 0) {
 270        vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
 271        return false;
 272    }
 273
 274    vmsg->fd_num = 0;
 275    for (cmsg = CMSG_FIRSTHDR(&msg);
 276         cmsg != NULL;
 277         cmsg = CMSG_NXTHDR(&msg, cmsg))
 278    {
 279        if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
 280            fd_size = cmsg->cmsg_len - CMSG_LEN(0);
 281            vmsg->fd_num = fd_size / sizeof(int);
 282            memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
 283            break;
 284        }
 285    }
 286
 287    if (vmsg->size > sizeof(vmsg->payload)) {
 288        vu_panic(dev,
 289                 "Error: too big message request: %d, size: vmsg->size: %u, "
 290                 "while sizeof(vmsg->payload) = %zu\n",
 291                 vmsg->request, vmsg->size, sizeof(vmsg->payload));
 292        goto fail;
 293    }
 294
 295    if (vmsg->size) {
 296        do {
 297            rc = read(conn_fd, &vmsg->payload, vmsg->size);
 298        } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 299
 300        if (rc <= 0) {
 301            vu_panic(dev, "Error while reading: %s", strerror(errno));
 302            goto fail;
 303        }
 304
 305        assert(rc == vmsg->size);
 306    }
 307
 308    return true;
 309
 310fail:
 311    vmsg_close_fds(vmsg);
 312
 313    return false;
 314}
 315
 316static bool
 317vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 318{
 319    int rc;
 320    uint8_t *p = (uint8_t *)vmsg;
 321    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
 322    struct iovec iov = {
 323        .iov_base = (char *)vmsg,
 324        .iov_len = VHOST_USER_HDR_SIZE,
 325    };
 326    struct msghdr msg = {
 327        .msg_iov = &iov,
 328        .msg_iovlen = 1,
 329        .msg_control = control,
 330    };
 331    struct cmsghdr *cmsg;
 332
 333    memset(control, 0, sizeof(control));
 334    assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
 335    if (vmsg->fd_num > 0) {
 336        size_t fdsize = vmsg->fd_num * sizeof(int);
 337        msg.msg_controllen = CMSG_SPACE(fdsize);
 338        cmsg = CMSG_FIRSTHDR(&msg);
 339        cmsg->cmsg_len = CMSG_LEN(fdsize);
 340        cmsg->cmsg_level = SOL_SOCKET;
 341        cmsg->cmsg_type = SCM_RIGHTS;
 342        memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
 343    } else {
 344        msg.msg_controllen = 0;
 345    }
 346
 347    do {
 348        rc = sendmsg(conn_fd, &msg, 0);
 349    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 350
 351    if (vmsg->size) {
 352        do {
 353            if (vmsg->data) {
 354                rc = write(conn_fd, vmsg->data, vmsg->size);
 355            } else {
 356                rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
 357            }
 358        } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 359    }
 360
 361    if (rc <= 0) {
 362        vu_panic(dev, "Error while writing: %s", strerror(errno));
 363        return false;
 364    }
 365
 366    return true;
 367}
 368
 369static bool
 370vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 371{
 372    /* Set the version in the flags when sending the reply */
 373    vmsg->flags &= ~VHOST_USER_VERSION_MASK;
 374    vmsg->flags |= VHOST_USER_VERSION;
 375    vmsg->flags |= VHOST_USER_REPLY_MASK;
 376
 377    return vu_message_write(dev, conn_fd, vmsg);
 378}
 379
 380static bool
 381vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
 382{
 383    VhostUserMsg msg_reply;
 384
 385    if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
 386        return true;
 387    }
 388
 389    if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
 390        return false;
 391    }
 392
 393    if (msg_reply.request != vmsg->request) {
 394        DPRINT("Received unexpected msg type. Expected %d received %d",
 395               vmsg->request, msg_reply.request);
 396        return false;
 397    }
 398
 399    return msg_reply.payload.u64 == 0;
 400}
 401
 402/* Kick the log_call_fd if required. */
 403static void
 404vu_log_kick(VuDev *dev)
 405{
 406    if (dev->log_call_fd != -1) {
 407        DPRINT("Kicking the QEMU's log...\n");
 408        if (eventfd_write(dev->log_call_fd, 1) < 0) {
 409            vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
 410        }
 411    }
 412}
 413
 414static void
 415vu_log_page(uint8_t *log_table, uint64_t page)
 416{
 417    DPRINT("Logged dirty guest page: %"PRId64"\n", page);
 418    atomic_or(&log_table[page / 8], 1 << (page % 8));
 419}
 420
 421static void
 422vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
 423{
 424    uint64_t page;
 425
 426    if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
 427        !dev->log_table || !length) {
 428        return;
 429    }
 430
 431    assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
 432
 433    page = address / VHOST_LOG_PAGE;
 434    while (page * VHOST_LOG_PAGE < address + length) {
 435        vu_log_page(dev->log_table, page);
 436        page += VHOST_LOG_PAGE;
 437    }
 438
 439    vu_log_kick(dev);
 440}
 441
 442static void
 443vu_kick_cb(VuDev *dev, int condition, void *data)
 444{
 445    int index = (intptr_t)data;
 446    VuVirtq *vq = &dev->vq[index];
 447    int sock = vq->kick_fd;
 448    eventfd_t kick_data;
 449    ssize_t rc;
 450
 451    rc = eventfd_read(sock, &kick_data);
 452    if (rc == -1) {
 453        vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
 454        dev->remove_watch(dev, dev->vq[index].kick_fd);
 455    } else {
 456        DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
 457               kick_data, vq->handler, index);
 458        if (vq->handler) {
 459            vq->handler(dev, index);
 460        }
 461    }
 462}
 463
 464static bool
 465vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
 466{
 467    vmsg->payload.u64 =
 468        1ULL << VHOST_F_LOG_ALL |
 469        1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
 470
 471    if (dev->iface->get_features) {
 472        vmsg->payload.u64 |= dev->iface->get_features(dev);
 473    }
 474
 475    vmsg->size = sizeof(vmsg->payload.u64);
 476    vmsg->fd_num = 0;
 477
 478    DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
 479
 480    return true;
 481}
 482
 483static void
 484vu_set_enable_all_rings(VuDev *dev, bool enabled)
 485{
 486    int i;
 487
 488    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
 489        dev->vq[i].enable = enabled;
 490    }
 491}
 492
 493static bool
 494vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
 495{
 496    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
 497
 498    dev->features = vmsg->payload.u64;
 499
 500    if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
 501        vu_set_enable_all_rings(dev, true);
 502    }
 503
 504    if (dev->iface->set_features) {
 505        dev->iface->set_features(dev, dev->features);
 506    }
 507
 508    return false;
 509}
 510
 511static bool
 512vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
 513{
 514    return false;
 515}
 516
 517static void
 518vu_close_log(VuDev *dev)
 519{
 520    if (dev->log_table) {
 521        if (munmap(dev->log_table, dev->log_size) != 0) {
 522            perror("close log munmap() error");
 523        }
 524
 525        dev->log_table = NULL;
 526    }
 527    if (dev->log_call_fd != -1) {
 528        close(dev->log_call_fd);
 529        dev->log_call_fd = -1;
 530    }
 531}
 532
 533static bool
 534vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
 535{
 536    vu_set_enable_all_rings(dev, false);
 537
 538    return false;
 539}
 540
 541static bool
 542vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
 543{
 544    int i;
 545    VhostUserMemory *memory = &vmsg->payload.memory;
 546    dev->nregions = memory->nregions;
 547
 548    DPRINT("Nregions: %d\n", memory->nregions);
 549    for (i = 0; i < dev->nregions; i++) {
 550        void *mmap_addr;
 551        VhostUserMemoryRegion *msg_region = &memory->regions[i];
 552        VuDevRegion *dev_region = &dev->regions[i];
 553
 554        DPRINT("Region %d\n", i);
 555        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
 556               msg_region->guest_phys_addr);
 557        DPRINT("    memory_size:     0x%016"PRIx64"\n",
 558               msg_region->memory_size);
 559        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
 560               msg_region->userspace_addr);
 561        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
 562               msg_region->mmap_offset);
 563
 564        dev_region->gpa = msg_region->guest_phys_addr;
 565        dev_region->size = msg_region->memory_size;
 566        dev_region->qva = msg_region->userspace_addr;
 567        dev_region->mmap_offset = msg_region->mmap_offset;
 568
 569        /* We don't use offset argument of mmap() since the
 570         * mapped address has to be page aligned, and we use huge
 571         * pages.
 572         * In postcopy we're using PROT_NONE here to catch anyone
 573         * accessing it before we userfault
 574         */
 575        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
 576                         PROT_NONE, MAP_SHARED,
 577                         vmsg->fds[i], 0);
 578
 579        if (mmap_addr == MAP_FAILED) {
 580            vu_panic(dev, "region mmap error: %s", strerror(errno));
 581        } else {
 582            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
 583            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
 584                   dev_region->mmap_addr);
 585        }
 586
 587        /* Return the address to QEMU so that it can translate the ufd
 588         * fault addresses back.
 589         */
 590        msg_region->userspace_addr = (uintptr_t)(mmap_addr +
 591                                                 dev_region->mmap_offset);
 592        close(vmsg->fds[i]);
 593    }
 594
 595    /* Send the message back to qemu with the addresses filled in */
 596    vmsg->fd_num = 0;
 597    if (!vu_send_reply(dev, dev->sock, vmsg)) {
 598        vu_panic(dev, "failed to respond to set-mem-table for postcopy");
 599        return false;
 600    }
 601
 602    /* Wait for QEMU to confirm that it's registered the handler for the
 603     * faults.
 604     */
 605    if (!vu_message_read(dev, dev->sock, vmsg) ||
 606        vmsg->size != sizeof(vmsg->payload.u64) ||
 607        vmsg->payload.u64 != 0) {
 608        vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
 609        return false;
 610    }
 611
 612    /* OK, now we can go and register the memory and generate faults */
 613    for (i = 0; i < dev->nregions; i++) {
 614        VuDevRegion *dev_region = &dev->regions[i];
 615        int ret;
 616#ifdef UFFDIO_REGISTER
 617        /* We should already have an open ufd. Mark each memory
 618         * range as ufd.
 619         * Discard any mapping we have here; note I can't use MADV_REMOVE
 620         * or fallocate to make the hole since I don't want to lose
 621         * data that's already arrived in the shared process.
 622         * TODO: How to do hugepage
 623         */
 624        ret = madvise((void *)dev_region->mmap_addr,
 625                      dev_region->size + dev_region->mmap_offset,
 626                      MADV_DONTNEED);
 627        if (ret) {
 628            fprintf(stderr,
 629                    "%s: Failed to madvise(DONTNEED) region %d: %s\n",
 630                    __func__, i, strerror(errno));
 631        }
 632        /* Turn off transparent hugepages so we dont get lose wakeups
 633         * in neighbouring pages.
 634         * TODO: Turn this backon later.
 635         */
 636        ret = madvise((void *)dev_region->mmap_addr,
 637                      dev_region->size + dev_region->mmap_offset,
 638                      MADV_NOHUGEPAGE);
 639        if (ret) {
 640            /* Note: This can happen legally on kernels that are configured
 641             * without madvise'able hugepages
 642             */
 643            fprintf(stderr,
 644                    "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
 645                    __func__, i, strerror(errno));
 646        }
 647        struct uffdio_register reg_struct;
 648        reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
 649        reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
 650        reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
 651
 652        if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
 653            vu_panic(dev, "%s: Failed to userfault region %d "
 654                          "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
 655                     __func__, i,
 656                     dev_region->mmap_addr,
 657                     dev_region->size, dev_region->mmap_offset,
 658                     dev->postcopy_ufd, strerror(errno));
 659            return false;
 660        }
 661        if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
 662            vu_panic(dev, "%s Region (%d) doesn't support COPY",
 663                     __func__, i);
 664            return false;
 665        }
 666        DPRINT("%s: region %d: Registered userfault for %llx + %llx\n",
 667                __func__, i, reg_struct.range.start, reg_struct.range.len);
 668        /* Now it's registered we can let the client at it */
 669        if (mprotect((void *)dev_region->mmap_addr,
 670                     dev_region->size + dev_region->mmap_offset,
 671                     PROT_READ | PROT_WRITE)) {
 672            vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
 673                     i, strerror(errno));
 674            return false;
 675        }
 676        /* TODO: Stash 'zero' support flags somewhere */
 677#endif
 678    }
 679
 680    return false;
 681}
 682
 683static bool
 684vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
 685{
 686    int i;
 687    VhostUserMemory *memory = &vmsg->payload.memory;
 688
 689    for (i = 0; i < dev->nregions; i++) {
 690        VuDevRegion *r = &dev->regions[i];
 691        void *m = (void *) (uintptr_t) r->mmap_addr;
 692
 693        if (m) {
 694            munmap(m, r->size + r->mmap_offset);
 695        }
 696    }
 697    dev->nregions = memory->nregions;
 698
 699    if (dev->postcopy_listening) {
 700        return vu_set_mem_table_exec_postcopy(dev, vmsg);
 701    }
 702
 703    DPRINT("Nregions: %d\n", memory->nregions);
 704    for (i = 0; i < dev->nregions; i++) {
 705        void *mmap_addr;
 706        VhostUserMemoryRegion *msg_region = &memory->regions[i];
 707        VuDevRegion *dev_region = &dev->regions[i];
 708
 709        DPRINT("Region %d\n", i);
 710        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
 711               msg_region->guest_phys_addr);
 712        DPRINT("    memory_size:     0x%016"PRIx64"\n",
 713               msg_region->memory_size);
 714        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
 715               msg_region->userspace_addr);
 716        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
 717               msg_region->mmap_offset);
 718
 719        dev_region->gpa = msg_region->guest_phys_addr;
 720        dev_region->size = msg_region->memory_size;
 721        dev_region->qva = msg_region->userspace_addr;
 722        dev_region->mmap_offset = msg_region->mmap_offset;
 723
 724        /* We don't use offset argument of mmap() since the
 725         * mapped address has to be page aligned, and we use huge
 726         * pages.  */
 727        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
 728                         PROT_READ | PROT_WRITE, MAP_SHARED,
 729                         vmsg->fds[i], 0);
 730
 731        if (mmap_addr == MAP_FAILED) {
 732            vu_panic(dev, "region mmap error: %s", strerror(errno));
 733        } else {
 734            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
 735            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
 736                   dev_region->mmap_addr);
 737        }
 738
 739        close(vmsg->fds[i]);
 740    }
 741
 742    return false;
 743}
 744
 745static bool
 746vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 747{
 748    int fd;
 749    uint64_t log_mmap_size, log_mmap_offset;
 750    void *rc;
 751
 752    if (vmsg->fd_num != 1 ||
 753        vmsg->size != sizeof(vmsg->payload.log)) {
 754        vu_panic(dev, "Invalid log_base message");
 755        return true;
 756    }
 757
 758    fd = vmsg->fds[0];
 759    log_mmap_offset = vmsg->payload.log.mmap_offset;
 760    log_mmap_size = vmsg->payload.log.mmap_size;
 761    DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
 762    DPRINT("Log mmap_size:   %"PRId64"\n", log_mmap_size);
 763
 764    rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
 765              log_mmap_offset);
 766    close(fd);
 767    if (rc == MAP_FAILED) {
 768        perror("log mmap error");
 769    }
 770
 771    if (dev->log_table) {
 772        munmap(dev->log_table, dev->log_size);
 773    }
 774    dev->log_table = rc;
 775    dev->log_size = log_mmap_size;
 776
 777    vmsg->size = sizeof(vmsg->payload.u64);
 778    vmsg->fd_num = 0;
 779
 780    return true;
 781}
 782
 783static bool
 784vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
 785{
 786    if (vmsg->fd_num != 1) {
 787        vu_panic(dev, "Invalid log_fd message");
 788        return false;
 789    }
 790
 791    if (dev->log_call_fd != -1) {
 792        close(dev->log_call_fd);
 793    }
 794    dev->log_call_fd = vmsg->fds[0];
 795    DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
 796
 797    return false;
 798}
 799
 800static bool
 801vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
 802{
 803    unsigned int index = vmsg->payload.state.index;
 804    unsigned int num = vmsg->payload.state.num;
 805
 806    DPRINT("State.index: %d\n", index);
 807    DPRINT("State.num:   %d\n", num);
 808    dev->vq[index].vring.num = num;
 809
 810    return false;
 811}
 812
 813static bool
 814vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
 815{
 816    struct vhost_vring_addr *vra = &vmsg->payload.addr;
 817    unsigned int index = vra->index;
 818    VuVirtq *vq = &dev->vq[index];
 819
 820    DPRINT("vhost_vring_addr:\n");
 821    DPRINT("    index:  %d\n", vra->index);
 822    DPRINT("    flags:  %d\n", vra->flags);
 823    DPRINT("    desc_user_addr:   0x%016" PRIx64 "\n", vra->desc_user_addr);
 824    DPRINT("    used_user_addr:   0x%016" PRIx64 "\n", vra->used_user_addr);
 825    DPRINT("    avail_user_addr:  0x%016" PRIx64 "\n", vra->avail_user_addr);
 826    DPRINT("    log_guest_addr:   0x%016" PRIx64 "\n", vra->log_guest_addr);
 827
 828    vq->vring.flags = vra->flags;
 829    vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
 830    vq->vring.used = qva_to_va(dev, vra->used_user_addr);
 831    vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
 832    vq->vring.log_guest_addr = vra->log_guest_addr;
 833
 834    DPRINT("Setting virtq addresses:\n");
 835    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
 836    DPRINT("    vring_used  at %p\n", vq->vring.used);
 837    DPRINT("    vring_avail at %p\n", vq->vring.avail);
 838
 839    if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
 840        vu_panic(dev, "Invalid vring_addr message");
 841        return false;
 842    }
 843
 844    vq->used_idx = vq->vring.used->idx;
 845
 846    if (vq->last_avail_idx != vq->used_idx) {
 847        bool resume = dev->iface->queue_is_processed_in_order &&
 848            dev->iface->queue_is_processed_in_order(dev, index);
 849
 850        DPRINT("Last avail index != used index: %u != %u%s\n",
 851               vq->last_avail_idx, vq->used_idx,
 852               resume ? ", resuming" : "");
 853
 854        if (resume) {
 855            vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
 856        }
 857    }
 858
 859    return false;
 860}
 861
 862static bool
 863vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 864{
 865    unsigned int index = vmsg->payload.state.index;
 866    unsigned int num = vmsg->payload.state.num;
 867
 868    DPRINT("State.index: %d\n", index);
 869    DPRINT("State.num:   %d\n", num);
 870    dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
 871
 872    return false;
 873}
 874
 875static bool
 876vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 877{
 878    unsigned int index = vmsg->payload.state.index;
 879
 880    DPRINT("State.index: %d\n", index);
 881    vmsg->payload.state.num = dev->vq[index].last_avail_idx;
 882    vmsg->size = sizeof(vmsg->payload.state);
 883
 884    dev->vq[index].started = false;
 885    if (dev->iface->queue_set_started) {
 886        dev->iface->queue_set_started(dev, index, false);
 887    }
 888
 889    if (dev->vq[index].call_fd != -1) {
 890        close(dev->vq[index].call_fd);
 891        dev->vq[index].call_fd = -1;
 892    }
 893    if (dev->vq[index].kick_fd != -1) {
 894        dev->remove_watch(dev, dev->vq[index].kick_fd);
 895        close(dev->vq[index].kick_fd);
 896        dev->vq[index].kick_fd = -1;
 897    }
 898
 899    return true;
 900}
 901
 902static bool
 903vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
 904{
 905    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
 906
 907    if (index >= VHOST_MAX_NR_VIRTQUEUE) {
 908        vmsg_close_fds(vmsg);
 909        vu_panic(dev, "Invalid queue index: %u", index);
 910        return false;
 911    }
 912
 913    if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK ||
 914        vmsg->fd_num != 1) {
 915        vmsg_close_fds(vmsg);
 916        vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
 917        return false;
 918    }
 919
 920    return true;
 921}
 922
 923static int
 924inflight_desc_compare(const void *a, const void *b)
 925{
 926    VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
 927                        *desc1 = (VuVirtqInflightDesc *)b;
 928
 929    if (desc1->counter > desc0->counter &&
 930        (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
 931        return 1;
 932    }
 933
 934    return -1;
 935}
 936
 937static int
 938vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
 939{
 940    int i = 0;
 941
 942    if (!has_feature(dev->protocol_features,
 943        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
 944        return 0;
 945    }
 946
 947    if (unlikely(!vq->inflight)) {
 948        return -1;
 949    }
 950
 951    if (unlikely(!vq->inflight->version)) {
 952        /* initialize the buffer */
 953        vq->inflight->version = INFLIGHT_VERSION;
 954        return 0;
 955    }
 956
 957    vq->used_idx = vq->vring.used->idx;
 958    vq->resubmit_num = 0;
 959    vq->resubmit_list = NULL;
 960    vq->counter = 0;
 961
 962    if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
 963        vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
 964
 965        barrier();
 966
 967        vq->inflight->used_idx = vq->used_idx;
 968    }
 969
 970    for (i = 0; i < vq->inflight->desc_num; i++) {
 971        if (vq->inflight->desc[i].inflight == 1) {
 972            vq->inuse++;
 973        }
 974    }
 975
 976    vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
 977
 978    if (vq->inuse) {
 979        vq->resubmit_list = malloc(sizeof(VuVirtqInflightDesc) * vq->inuse);
 980        if (!vq->resubmit_list) {
 981            return -1;
 982        }
 983
 984        for (i = 0; i < vq->inflight->desc_num; i++) {
 985            if (vq->inflight->desc[i].inflight) {
 986                vq->resubmit_list[vq->resubmit_num].index = i;
 987                vq->resubmit_list[vq->resubmit_num].counter =
 988                                        vq->inflight->desc[i].counter;
 989                vq->resubmit_num++;
 990            }
 991        }
 992
 993        if (vq->resubmit_num > 1) {
 994            qsort(vq->resubmit_list, vq->resubmit_num,
 995                  sizeof(VuVirtqInflightDesc), inflight_desc_compare);
 996        }
 997        vq->counter = vq->resubmit_list[0].counter + 1;
 998    }
 999
1000    /* in case of I/O hang after reconnecting */
1001    if (eventfd_write(vq->kick_fd, 1)) {
1002        return -1;
1003    }
1004
1005    return 0;
1006}
1007
1008static bool
1009vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1010{
1011    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1012
1013    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1014
1015    if (!vu_check_queue_msg_file(dev, vmsg)) {
1016        return false;
1017    }
1018
1019    if (dev->vq[index].kick_fd != -1) {
1020        dev->remove_watch(dev, dev->vq[index].kick_fd);
1021        close(dev->vq[index].kick_fd);
1022        dev->vq[index].kick_fd = -1;
1023    }
1024
1025    dev->vq[index].kick_fd = vmsg->fds[0];
1026    DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
1027
1028    dev->vq[index].started = true;
1029    if (dev->iface->queue_set_started) {
1030        dev->iface->queue_set_started(dev, index, true);
1031    }
1032
1033    if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1034        dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1035                       vu_kick_cb, (void *)(long)index);
1036
1037        DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1038               dev->vq[index].kick_fd, index);
1039    }
1040
1041    if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1042        vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1043    }
1044
1045    return false;
1046}
1047
1048void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1049                          vu_queue_handler_cb handler)
1050{
1051    int qidx = vq - dev->vq;
1052
1053    vq->handler = handler;
1054    if (vq->kick_fd >= 0) {
1055        if (handler) {
1056            dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1057                           vu_kick_cb, (void *)(long)qidx);
1058        } else {
1059            dev->remove_watch(dev, vq->kick_fd);
1060        }
1061    }
1062}
1063
1064bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1065                                int size, int offset)
1066{
1067    int qidx = vq - dev->vq;
1068    int fd_num = 0;
1069    VhostUserMsg vmsg = {
1070        .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1071        .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1072        .size = sizeof(vmsg.payload.area),
1073        .payload.area = {
1074            .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1075            .size = size,
1076            .offset = offset,
1077        },
1078    };
1079
1080    if (fd == -1) {
1081        vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1082    } else {
1083        vmsg.fds[fd_num++] = fd;
1084    }
1085
1086    vmsg.fd_num = fd_num;
1087
1088    if ((dev->protocol_features & VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) == 0) {
1089        return false;
1090    }
1091
1092    if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
1093        return false;
1094    }
1095
1096    return vu_process_message_reply(dev, &vmsg);
1097}
1098
1099static bool
1100vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1101{
1102    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1103
1104    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1105
1106    if (!vu_check_queue_msg_file(dev, vmsg)) {
1107        return false;
1108    }
1109
1110    if (dev->vq[index].call_fd != -1) {
1111        close(dev->vq[index].call_fd);
1112        dev->vq[index].call_fd = -1;
1113    }
1114
1115    dev->vq[index].call_fd = vmsg->fds[0];
1116
1117    /* in case of I/O hang after reconnecting */
1118    if (eventfd_write(vmsg->fds[0], 1)) {
1119        return -1;
1120    }
1121
1122    DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
1123
1124    return false;
1125}
1126
1127static bool
1128vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1129{
1130    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1131
1132    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1133
1134    if (!vu_check_queue_msg_file(dev, vmsg)) {
1135        return false;
1136    }
1137
1138    if (dev->vq[index].err_fd != -1) {
1139        close(dev->vq[index].err_fd);
1140        dev->vq[index].err_fd = -1;
1141    }
1142
1143    dev->vq[index].err_fd = vmsg->fds[0];
1144
1145    return false;
1146}
1147
1148static bool
1149vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1150{
1151    uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1152                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1153                        1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1154                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD;
1155
1156    if (have_userfault()) {
1157        features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1158    }
1159
1160    if (dev->iface->get_protocol_features) {
1161        features |= dev->iface->get_protocol_features(dev);
1162    }
1163
1164    vmsg->payload.u64 = features;
1165    vmsg->size = sizeof(vmsg->payload.u64);
1166    vmsg->fd_num = 0;
1167
1168    return true;
1169}
1170
1171static bool
1172vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1173{
1174    uint64_t features = vmsg->payload.u64;
1175
1176    DPRINT("u64: 0x%016"PRIx64"\n", features);
1177
1178    dev->protocol_features = vmsg->payload.u64;
1179
1180    if (dev->iface->set_protocol_features) {
1181        dev->iface->set_protocol_features(dev, features);
1182    }
1183
1184    return false;
1185}
1186
1187static bool
1188vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1189{
1190    DPRINT("Function %s() not implemented yet.\n", __func__);
1191    return false;
1192}
1193
1194static bool
1195vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1196{
1197    unsigned int index = vmsg->payload.state.index;
1198    unsigned int enable = vmsg->payload.state.num;
1199
1200    DPRINT("State.index: %d\n", index);
1201    DPRINT("State.enable:   %d\n", enable);
1202
1203    if (index >= VHOST_MAX_NR_VIRTQUEUE) {
1204        vu_panic(dev, "Invalid vring_enable index: %u", index);
1205        return false;
1206    }
1207
1208    dev->vq[index].enable = enable;
1209    return false;
1210}
1211
1212static bool
1213vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1214{
1215    if (vmsg->fd_num != 1) {
1216        vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
1217        return false;
1218    }
1219
1220    if (dev->slave_fd != -1) {
1221        close(dev->slave_fd);
1222    }
1223    dev->slave_fd = vmsg->fds[0];
1224    DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
1225
1226    return false;
1227}
1228
1229static bool
1230vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1231{
1232    int ret = -1;
1233
1234    if (dev->iface->get_config) {
1235        ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1236                                     vmsg->payload.config.size);
1237    }
1238
1239    if (ret) {
1240        /* resize to zero to indicate an error to master */
1241        vmsg->size = 0;
1242    }
1243
1244    return true;
1245}
1246
1247static bool
1248vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1249{
1250    int ret = -1;
1251
1252    if (dev->iface->set_config) {
1253        ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1254                                     vmsg->payload.config.offset,
1255                                     vmsg->payload.config.size,
1256                                     vmsg->payload.config.flags);
1257        if (ret) {
1258            vu_panic(dev, "Set virtio configuration space failed");
1259        }
1260    }
1261
1262    return false;
1263}
1264
1265static bool
1266vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1267{
1268    dev->postcopy_ufd = -1;
1269#ifdef UFFDIO_API
1270    struct uffdio_api api_struct;
1271
1272    dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1273    vmsg->size = 0;
1274#endif
1275
1276    if (dev->postcopy_ufd == -1) {
1277        vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1278        goto out;
1279    }
1280
1281#ifdef UFFDIO_API
1282    api_struct.api = UFFD_API;
1283    api_struct.features = 0;
1284    if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1285        vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1286        close(dev->postcopy_ufd);
1287        dev->postcopy_ufd = -1;
1288        goto out;
1289    }
1290    /* TODO: Stash feature flags somewhere */
1291#endif
1292
1293out:
1294    /* Return a ufd to the QEMU */
1295    vmsg->fd_num = 1;
1296    vmsg->fds[0] = dev->postcopy_ufd;
1297    return true; /* = send a reply */
1298}
1299
1300static bool
1301vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1302{
1303    vmsg->payload.u64 = -1;
1304    vmsg->size = sizeof(vmsg->payload.u64);
1305
1306    if (dev->nregions) {
1307        vu_panic(dev, "Regions already registered at postcopy-listen");
1308        return true;
1309    }
1310    dev->postcopy_listening = true;
1311
1312    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
1313    vmsg->payload.u64 = 0; /* Success */
1314    return true;
1315}
1316
1317static bool
1318vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1319{
1320    DPRINT("%s: Entry\n", __func__);
1321    dev->postcopy_listening = false;
1322    if (dev->postcopy_ufd > 0) {
1323        close(dev->postcopy_ufd);
1324        dev->postcopy_ufd = -1;
1325        DPRINT("%s: Done close\n", __func__);
1326    }
1327
1328    vmsg->fd_num = 0;
1329    vmsg->payload.u64 = 0;
1330    vmsg->size = sizeof(vmsg->payload.u64);
1331    vmsg->flags = VHOST_USER_VERSION |  VHOST_USER_REPLY_MASK;
1332    DPRINT("%s: exit\n", __func__);
1333    return true;
1334}
1335
1336static inline uint64_t
1337vu_inflight_queue_size(uint16_t queue_size)
1338{
1339    return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1340           sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1341}
1342
1343static bool
1344vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1345{
1346    int fd;
1347    void *addr;
1348    uint64_t mmap_size;
1349    uint16_t num_queues, queue_size;
1350
1351    if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1352        vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1353        vmsg->payload.inflight.mmap_size = 0;
1354        return true;
1355    }
1356
1357    num_queues = vmsg->payload.inflight.num_queues;
1358    queue_size = vmsg->payload.inflight.queue_size;
1359
1360    DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1361    DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1362
1363    mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1364
1365    addr = qemu_memfd_alloc("vhost-inflight", mmap_size,
1366                            F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1367                            &fd, NULL);
1368
1369    if (!addr) {
1370        vu_panic(dev, "Failed to alloc vhost inflight area");
1371        vmsg->payload.inflight.mmap_size = 0;
1372        return true;
1373    }
1374
1375    memset(addr, 0, mmap_size);
1376
1377    dev->inflight_info.addr = addr;
1378    dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1379    dev->inflight_info.fd = vmsg->fds[0] = fd;
1380    vmsg->fd_num = 1;
1381    vmsg->payload.inflight.mmap_offset = 0;
1382
1383    DPRINT("send inflight mmap_size: %"PRId64"\n",
1384           vmsg->payload.inflight.mmap_size);
1385    DPRINT("send inflight mmap offset: %"PRId64"\n",
1386           vmsg->payload.inflight.mmap_offset);
1387
1388    return true;
1389}
1390
1391static bool
1392vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1393{
1394    int fd, i;
1395    uint64_t mmap_size, mmap_offset;
1396    uint16_t num_queues, queue_size;
1397    void *rc;
1398
1399    if (vmsg->fd_num != 1 ||
1400        vmsg->size != sizeof(vmsg->payload.inflight)) {
1401        vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1402                 vmsg->size, vmsg->fd_num);
1403        return false;
1404    }
1405
1406    fd = vmsg->fds[0];
1407    mmap_size = vmsg->payload.inflight.mmap_size;
1408    mmap_offset = vmsg->payload.inflight.mmap_offset;
1409    num_queues = vmsg->payload.inflight.num_queues;
1410    queue_size = vmsg->payload.inflight.queue_size;
1411
1412    DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1413    DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1414    DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1415    DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1416
1417    rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1418              fd, mmap_offset);
1419
1420    if (rc == MAP_FAILED) {
1421        vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1422        return false;
1423    }
1424
1425    if (dev->inflight_info.fd) {
1426        close(dev->inflight_info.fd);
1427    }
1428
1429    if (dev->inflight_info.addr) {
1430        munmap(dev->inflight_info.addr, dev->inflight_info.size);
1431    }
1432
1433    dev->inflight_info.fd = fd;
1434    dev->inflight_info.addr = rc;
1435    dev->inflight_info.size = mmap_size;
1436
1437    for (i = 0; i < num_queues; i++) {
1438        dev->vq[i].inflight = (VuVirtqInflight *)rc;
1439        dev->vq[i].inflight->desc_num = queue_size;
1440        rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1441    }
1442
1443    return false;
1444}
1445
1446static bool
1447vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
1448{
1449    int do_reply = 0;
1450
1451    /* Print out generic part of the request. */
1452    DPRINT("================ Vhost user message ================\n");
1453    DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
1454           vmsg->request);
1455    DPRINT("Flags:   0x%x\n", vmsg->flags);
1456    DPRINT("Size:    %d\n", vmsg->size);
1457
1458    if (vmsg->fd_num) {
1459        int i;
1460        DPRINT("Fds:");
1461        for (i = 0; i < vmsg->fd_num; i++) {
1462            DPRINT(" %d", vmsg->fds[i]);
1463        }
1464        DPRINT("\n");
1465    }
1466
1467    if (dev->iface->process_msg &&
1468        dev->iface->process_msg(dev, vmsg, &do_reply)) {
1469        return do_reply;
1470    }
1471
1472    switch (vmsg->request) {
1473    case VHOST_USER_GET_FEATURES:
1474        return vu_get_features_exec(dev, vmsg);
1475    case VHOST_USER_SET_FEATURES:
1476        return vu_set_features_exec(dev, vmsg);
1477    case VHOST_USER_GET_PROTOCOL_FEATURES:
1478        return vu_get_protocol_features_exec(dev, vmsg);
1479    case VHOST_USER_SET_PROTOCOL_FEATURES:
1480        return vu_set_protocol_features_exec(dev, vmsg);
1481    case VHOST_USER_SET_OWNER:
1482        return vu_set_owner_exec(dev, vmsg);
1483    case VHOST_USER_RESET_OWNER:
1484        return vu_reset_device_exec(dev, vmsg);
1485    case VHOST_USER_SET_MEM_TABLE:
1486        return vu_set_mem_table_exec(dev, vmsg);
1487    case VHOST_USER_SET_LOG_BASE:
1488        return vu_set_log_base_exec(dev, vmsg);
1489    case VHOST_USER_SET_LOG_FD:
1490        return vu_set_log_fd_exec(dev, vmsg);
1491    case VHOST_USER_SET_VRING_NUM:
1492        return vu_set_vring_num_exec(dev, vmsg);
1493    case VHOST_USER_SET_VRING_ADDR:
1494        return vu_set_vring_addr_exec(dev, vmsg);
1495    case VHOST_USER_SET_VRING_BASE:
1496        return vu_set_vring_base_exec(dev, vmsg);
1497    case VHOST_USER_GET_VRING_BASE:
1498        return vu_get_vring_base_exec(dev, vmsg);
1499    case VHOST_USER_SET_VRING_KICK:
1500        return vu_set_vring_kick_exec(dev, vmsg);
1501    case VHOST_USER_SET_VRING_CALL:
1502        return vu_set_vring_call_exec(dev, vmsg);
1503    case VHOST_USER_SET_VRING_ERR:
1504        return vu_set_vring_err_exec(dev, vmsg);
1505    case VHOST_USER_GET_QUEUE_NUM:
1506        return vu_get_queue_num_exec(dev, vmsg);
1507    case VHOST_USER_SET_VRING_ENABLE:
1508        return vu_set_vring_enable_exec(dev, vmsg);
1509    case VHOST_USER_SET_SLAVE_REQ_FD:
1510        return vu_set_slave_req_fd(dev, vmsg);
1511    case VHOST_USER_GET_CONFIG:
1512        return vu_get_config(dev, vmsg);
1513    case VHOST_USER_SET_CONFIG:
1514        return vu_set_config(dev, vmsg);
1515    case VHOST_USER_NONE:
1516        /* if you need processing before exit, override iface->process_msg */
1517        exit(0);
1518    case VHOST_USER_POSTCOPY_ADVISE:
1519        return vu_set_postcopy_advise(dev, vmsg);
1520    case VHOST_USER_POSTCOPY_LISTEN:
1521        return vu_set_postcopy_listen(dev, vmsg);
1522    case VHOST_USER_POSTCOPY_END:
1523        return vu_set_postcopy_end(dev, vmsg);
1524    case VHOST_USER_GET_INFLIGHT_FD:
1525        return vu_get_inflight_fd(dev, vmsg);
1526    case VHOST_USER_SET_INFLIGHT_FD:
1527        return vu_set_inflight_fd(dev, vmsg);
1528    default:
1529        vmsg_close_fds(vmsg);
1530        vu_panic(dev, "Unhandled request: %d", vmsg->request);
1531    }
1532
1533    return false;
1534}
1535
1536bool
1537vu_dispatch(VuDev *dev)
1538{
1539    VhostUserMsg vmsg = { 0, };
1540    int reply_requested;
1541    bool success = false;
1542
1543    if (!vu_message_read(dev, dev->sock, &vmsg)) {
1544        goto end;
1545    }
1546
1547    reply_requested = vu_process_message(dev, &vmsg);
1548    if (!reply_requested) {
1549        success = true;
1550        goto end;
1551    }
1552
1553    if (!vu_send_reply(dev, dev->sock, &vmsg)) {
1554        goto end;
1555    }
1556
1557    success = true;
1558
1559end:
1560    free(vmsg.data);
1561    return success;
1562}
1563
1564void
1565vu_deinit(VuDev *dev)
1566{
1567    int i;
1568
1569    for (i = 0; i < dev->nregions; i++) {
1570        VuDevRegion *r = &dev->regions[i];
1571        void *m = (void *) (uintptr_t) r->mmap_addr;
1572        if (m != MAP_FAILED) {
1573            munmap(m, r->size + r->mmap_offset);
1574        }
1575    }
1576    dev->nregions = 0;
1577
1578    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
1579        VuVirtq *vq = &dev->vq[i];
1580
1581        if (vq->call_fd != -1) {
1582            close(vq->call_fd);
1583            vq->call_fd = -1;
1584        }
1585
1586        if (vq->kick_fd != -1) {
1587            close(vq->kick_fd);
1588            vq->kick_fd = -1;
1589        }
1590
1591        if (vq->err_fd != -1) {
1592            close(vq->err_fd);
1593            vq->err_fd = -1;
1594        }
1595
1596        if (vq->resubmit_list) {
1597            free(vq->resubmit_list);
1598            vq->resubmit_list = NULL;
1599        }
1600
1601        vq->inflight = NULL;
1602    }
1603
1604    if (dev->inflight_info.addr) {
1605        munmap(dev->inflight_info.addr, dev->inflight_info.size);
1606        dev->inflight_info.addr = NULL;
1607    }
1608
1609    if (dev->inflight_info.fd > 0) {
1610        close(dev->inflight_info.fd);
1611        dev->inflight_info.fd = -1;
1612    }
1613
1614    vu_close_log(dev);
1615    if (dev->slave_fd != -1) {
1616        close(dev->slave_fd);
1617        dev->slave_fd = -1;
1618    }
1619
1620    if (dev->sock != -1) {
1621        close(dev->sock);
1622    }
1623}
1624
1625void
1626vu_init(VuDev *dev,
1627        int socket,
1628        vu_panic_cb panic,
1629        vu_set_watch_cb set_watch,
1630        vu_remove_watch_cb remove_watch,
1631        const VuDevIface *iface)
1632{
1633    int i;
1634
1635    assert(socket >= 0);
1636    assert(set_watch);
1637    assert(remove_watch);
1638    assert(iface);
1639    assert(panic);
1640
1641    memset(dev, 0, sizeof(*dev));
1642
1643    dev->sock = socket;
1644    dev->panic = panic;
1645    dev->set_watch = set_watch;
1646    dev->remove_watch = remove_watch;
1647    dev->iface = iface;
1648    dev->log_call_fd = -1;
1649    dev->slave_fd = -1;
1650    for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
1651        dev->vq[i] = (VuVirtq) {
1652            .call_fd = -1, .kick_fd = -1, .err_fd = -1,
1653            .notification = true,
1654        };
1655    }
1656}
1657
1658VuVirtq *
1659vu_get_queue(VuDev *dev, int qidx)
1660{
1661    assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
1662    return &dev->vq[qidx];
1663}
1664
1665bool
1666vu_queue_enabled(VuDev *dev, VuVirtq *vq)
1667{
1668    return vq->enable;
1669}
1670
1671bool
1672vu_queue_started(const VuDev *dev, const VuVirtq *vq)
1673{
1674    return vq->started;
1675}
1676
1677static inline uint16_t
1678vring_avail_flags(VuVirtq *vq)
1679{
1680    return vq->vring.avail->flags;
1681}
1682
1683static inline uint16_t
1684vring_avail_idx(VuVirtq *vq)
1685{
1686    vq->shadow_avail_idx = vq->vring.avail->idx;
1687
1688    return vq->shadow_avail_idx;
1689}
1690
1691static inline uint16_t
1692vring_avail_ring(VuVirtq *vq, int i)
1693{
1694    return vq->vring.avail->ring[i];
1695}
1696
1697static inline uint16_t
1698vring_get_used_event(VuVirtq *vq)
1699{
1700    return vring_avail_ring(vq, vq->vring.num);
1701}
1702
1703static int
1704virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
1705{
1706    uint16_t num_heads = vring_avail_idx(vq) - idx;
1707
1708    /* Check it isn't doing very strange things with descriptor numbers. */
1709    if (num_heads > vq->vring.num) {
1710        vu_panic(dev, "Guest moved used index from %u to %u",
1711                 idx, vq->shadow_avail_idx);
1712        return -1;
1713    }
1714    if (num_heads) {
1715        /* On success, callers read a descriptor at vq->last_avail_idx.
1716         * Make sure descriptor read does not bypass avail index read. */
1717        smp_rmb();
1718    }
1719
1720    return num_heads;
1721}
1722
1723static bool
1724virtqueue_get_head(VuDev *dev, VuVirtq *vq,
1725                   unsigned int idx, unsigned int *head)
1726{
1727    /* Grab the next descriptor number they're advertising, and increment
1728     * the index we've seen. */
1729    *head = vring_avail_ring(vq, idx % vq->vring.num);
1730
1731    /* If their number is silly, that's a fatal mistake. */
1732    if (*head >= vq->vring.num) {
1733        vu_panic(dev, "Guest says index %u is available", head);
1734        return false;
1735    }
1736
1737    return true;
1738}
1739
1740static int
1741virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
1742                             uint64_t addr, size_t len)
1743{
1744    struct vring_desc *ori_desc;
1745    uint64_t read_len;
1746
1747    if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
1748        return -1;
1749    }
1750
1751    if (len == 0) {
1752        return -1;
1753    }
1754
1755    while (len) {
1756        read_len = len;
1757        ori_desc = vu_gpa_to_va(dev, &read_len, addr);
1758        if (!ori_desc) {
1759            return -1;
1760        }
1761
1762        memcpy(desc, ori_desc, read_len);
1763        len -= read_len;
1764        addr += read_len;
1765        desc += read_len;
1766    }
1767
1768    return 0;
1769}
1770
1771enum {
1772    VIRTQUEUE_READ_DESC_ERROR = -1,
1773    VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1774    VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1775};
1776
1777static int
1778virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
1779                         int i, unsigned int max, unsigned int *next)
1780{
1781    /* If this descriptor says it doesn't chain, we're done. */
1782    if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
1783        return VIRTQUEUE_READ_DESC_DONE;
1784    }
1785
1786    /* Check they're not leading us off end of descriptors. */
1787    *next = desc[i].next;
1788    /* Make sure compiler knows to grab that: we don't want it changing! */
1789    smp_wmb();
1790
1791    if (*next >= max) {
1792        vu_panic(dev, "Desc next is %u", next);
1793        return VIRTQUEUE_READ_DESC_ERROR;
1794    }
1795
1796    return VIRTQUEUE_READ_DESC_MORE;
1797}
1798
1799void
1800vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
1801                         unsigned int *out_bytes,
1802                         unsigned max_in_bytes, unsigned max_out_bytes)
1803{
1804    unsigned int idx;
1805    unsigned int total_bufs, in_total, out_total;
1806    int rc;
1807
1808    idx = vq->last_avail_idx;
1809
1810    total_bufs = in_total = out_total = 0;
1811    if (unlikely(dev->broken) ||
1812        unlikely(!vq->vring.avail)) {
1813        goto done;
1814    }
1815
1816    while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
1817        unsigned int max, desc_len, num_bufs, indirect = 0;
1818        uint64_t desc_addr, read_len;
1819        struct vring_desc *desc;
1820        struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
1821        unsigned int i;
1822
1823        max = vq->vring.num;
1824        num_bufs = total_bufs;
1825        if (!virtqueue_get_head(dev, vq, idx++, &i)) {
1826            goto err;
1827        }
1828        desc = vq->vring.desc;
1829
1830        if (desc[i].flags & VRING_DESC_F_INDIRECT) {
1831            if (desc[i].len % sizeof(struct vring_desc)) {
1832                vu_panic(dev, "Invalid size for indirect buffer table");
1833                goto err;
1834            }
1835
1836            /* If we've got too many, that implies a descriptor loop. */
1837            if (num_bufs >= max) {
1838                vu_panic(dev, "Looped descriptor");
1839                goto err;
1840            }
1841
1842            /* loop over the indirect descriptor table */
1843            indirect = 1;
1844            desc_addr = desc[i].addr;
1845            desc_len = desc[i].len;
1846            max = desc_len / sizeof(struct vring_desc);
1847            read_len = desc_len;
1848            desc = vu_gpa_to_va(dev, &read_len, desc_addr);
1849            if (unlikely(desc && read_len != desc_len)) {
1850                /* Failed to use zero copy */
1851                desc = NULL;
1852                if (!virtqueue_read_indirect_desc(dev, desc_buf,
1853                                                  desc_addr,
1854                                                  desc_len)) {
1855                    desc = desc_buf;
1856                }
1857            }
1858            if (!desc) {
1859                vu_panic(dev, "Invalid indirect buffer table");
1860                goto err;
1861            }
1862            num_bufs = i = 0;
1863        }
1864
1865        do {
1866            /* If we've got too many, that implies a descriptor loop. */
1867            if (++num_bufs > max) {
1868                vu_panic(dev, "Looped descriptor");
1869                goto err;
1870            }
1871
1872            if (desc[i].flags & VRING_DESC_F_WRITE) {
1873                in_total += desc[i].len;
1874            } else {
1875                out_total += desc[i].len;
1876            }
1877            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1878                goto done;
1879            }
1880            rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
1881        } while (rc == VIRTQUEUE_READ_DESC_MORE);
1882
1883        if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1884            goto err;
1885        }
1886
1887        if (!indirect) {
1888            total_bufs = num_bufs;
1889        } else {
1890            total_bufs++;
1891        }
1892    }
1893    if (rc < 0) {
1894        goto err;
1895    }
1896done:
1897    if (in_bytes) {
1898        *in_bytes = in_total;
1899    }
1900    if (out_bytes) {
1901        *out_bytes = out_total;
1902    }
1903    return;
1904
1905err:
1906    in_total = out_total = 0;
1907    goto done;
1908}
1909
1910bool
1911vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
1912                     unsigned int out_bytes)
1913{
1914    unsigned int in_total, out_total;
1915
1916    vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
1917                             in_bytes, out_bytes);
1918
1919    return in_bytes <= in_total && out_bytes <= out_total;
1920}
1921
1922/* Fetch avail_idx from VQ memory only when we really need to know if
1923 * guest has added some buffers. */
1924bool
1925vu_queue_empty(VuDev *dev, VuVirtq *vq)
1926{
1927    if (unlikely(dev->broken) ||
1928        unlikely(!vq->vring.avail)) {
1929        return true;
1930    }
1931
1932    if (vq->shadow_avail_idx != vq->last_avail_idx) {
1933        return false;
1934    }
1935
1936    return vring_avail_idx(vq) == vq->last_avail_idx;
1937}
1938
1939static bool
1940vring_notify(VuDev *dev, VuVirtq *vq)
1941{
1942    uint16_t old, new;
1943    bool v;
1944
1945    /* We need to expose used array entries before checking used event. */
1946    smp_mb();
1947
1948    /* Always notify when queue is empty (when feature acknowledge) */
1949    if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1950        !vq->inuse && vu_queue_empty(dev, vq)) {
1951        return true;
1952    }
1953
1954    if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1955        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1956    }
1957
1958    v = vq->signalled_used_valid;
1959    vq->signalled_used_valid = true;
1960    old = vq->signalled_used;
1961    new = vq->signalled_used = vq->used_idx;
1962    return !v || vring_need_event(vring_get_used_event(vq), new, old);
1963}
1964
1965void
1966vu_queue_notify(VuDev *dev, VuVirtq *vq)
1967{
1968    if (unlikely(dev->broken) ||
1969        unlikely(!vq->vring.avail)) {
1970        return;
1971    }
1972
1973    if (!vring_notify(dev, vq)) {
1974        DPRINT("skipped notify...\n");
1975        return;
1976    }
1977
1978    if (eventfd_write(vq->call_fd, 1) < 0) {
1979        vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
1980    }
1981}
1982
1983static inline void
1984vring_used_flags_set_bit(VuVirtq *vq, int mask)
1985{
1986    uint16_t *flags;
1987
1988    flags = (uint16_t *)((char*)vq->vring.used +
1989                         offsetof(struct vring_used, flags));
1990    *flags |= mask;
1991}
1992
1993static inline void
1994vring_used_flags_unset_bit(VuVirtq *vq, int mask)
1995{
1996    uint16_t *flags;
1997
1998    flags = (uint16_t *)((char*)vq->vring.used +
1999                         offsetof(struct vring_used, flags));
2000    *flags &= ~mask;
2001}
2002
2003static inline void
2004vring_set_avail_event(VuVirtq *vq, uint16_t val)
2005{
2006    if (!vq->notification) {
2007        return;
2008    }
2009
2010    *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
2011}
2012
2013void
2014vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2015{
2016    vq->notification = enable;
2017    if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2018        vring_set_avail_event(vq, vring_avail_idx(vq));
2019    } else if (enable) {
2020        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2021    } else {
2022        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2023    }
2024    if (enable) {
2025        /* Expose avail event/used flags before caller checks the avail idx. */
2026        smp_mb();
2027    }
2028}
2029
2030static void
2031virtqueue_map_desc(VuDev *dev,
2032                   unsigned int *p_num_sg, struct iovec *iov,
2033                   unsigned int max_num_sg, bool is_write,
2034                   uint64_t pa, size_t sz)
2035{
2036    unsigned num_sg = *p_num_sg;
2037
2038    assert(num_sg <= max_num_sg);
2039
2040    if (!sz) {
2041        vu_panic(dev, "virtio: zero sized buffers are not allowed");
2042        return;
2043    }
2044
2045    while (sz) {
2046        uint64_t len = sz;
2047
2048        if (num_sg == max_num_sg) {
2049            vu_panic(dev, "virtio: too many descriptors in indirect table");
2050            return;
2051        }
2052
2053        iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2054        if (iov[num_sg].iov_base == NULL) {
2055            vu_panic(dev, "virtio: invalid address for buffers");
2056            return;
2057        }
2058        iov[num_sg].iov_len = len;
2059        num_sg++;
2060        sz -= len;
2061        pa += len;
2062    }
2063
2064    *p_num_sg = num_sg;
2065}
2066
2067static void *
2068virtqueue_alloc_element(size_t sz,
2069                                     unsigned out_num, unsigned in_num)
2070{
2071    VuVirtqElement *elem;
2072    size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2073    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2074    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2075
2076    assert(sz >= sizeof(VuVirtqElement));
2077    elem = malloc(out_sg_end);
2078    elem->out_num = out_num;
2079    elem->in_num = in_num;
2080    elem->in_sg = (void *)elem + in_sg_ofs;
2081    elem->out_sg = (void *)elem + out_sg_ofs;
2082    return elem;
2083}
2084
2085static void *
2086vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2087{
2088    struct vring_desc *desc = vq->vring.desc;
2089    uint64_t desc_addr, read_len;
2090    unsigned int desc_len;
2091    unsigned int max = vq->vring.num;
2092    unsigned int i = idx;
2093    VuVirtqElement *elem;
2094    unsigned int out_num = 0, in_num = 0;
2095    struct iovec iov[VIRTQUEUE_MAX_SIZE];
2096    struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2097    int rc;
2098
2099    if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2100        if (desc[i].len % sizeof(struct vring_desc)) {
2101            vu_panic(dev, "Invalid size for indirect buffer table");
2102        }
2103
2104        /* loop over the indirect descriptor table */
2105        desc_addr = desc[i].addr;
2106        desc_len = desc[i].len;
2107        max = desc_len / sizeof(struct vring_desc);
2108        read_len = desc_len;
2109        desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2110        if (unlikely(desc && read_len != desc_len)) {
2111            /* Failed to use zero copy */
2112            desc = NULL;
2113            if (!virtqueue_read_indirect_desc(dev, desc_buf,
2114                                              desc_addr,
2115                                              desc_len)) {
2116                desc = desc_buf;
2117            }
2118        }
2119        if (!desc) {
2120            vu_panic(dev, "Invalid indirect buffer table");
2121            return NULL;
2122        }
2123        i = 0;
2124    }
2125
2126    /* Collect all the descriptors */
2127    do {
2128        if (desc[i].flags & VRING_DESC_F_WRITE) {
2129            virtqueue_map_desc(dev, &in_num, iov + out_num,
2130                               VIRTQUEUE_MAX_SIZE - out_num, true,
2131                               desc[i].addr, desc[i].len);
2132        } else {
2133            if (in_num) {
2134                vu_panic(dev, "Incorrect order for descriptors");
2135                return NULL;
2136            }
2137            virtqueue_map_desc(dev, &out_num, iov,
2138                               VIRTQUEUE_MAX_SIZE, false,
2139                               desc[i].addr, desc[i].len);
2140        }
2141
2142        /* If we've got too many, that implies a descriptor loop. */
2143        if ((in_num + out_num) > max) {
2144            vu_panic(dev, "Looped descriptor");
2145        }
2146        rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2147    } while (rc == VIRTQUEUE_READ_DESC_MORE);
2148
2149    if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2150        vu_panic(dev, "read descriptor error");
2151        return NULL;
2152    }
2153
2154    /* Now copy what we have collected and mapped */
2155    elem = virtqueue_alloc_element(sz, out_num, in_num);
2156    elem->index = idx;
2157    for (i = 0; i < out_num; i++) {
2158        elem->out_sg[i] = iov[i];
2159    }
2160    for (i = 0; i < in_num; i++) {
2161        elem->in_sg[i] = iov[out_num + i];
2162    }
2163
2164    return elem;
2165}
2166
2167static int
2168vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2169{
2170    if (!has_feature(dev->protocol_features,
2171        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2172        return 0;
2173    }
2174
2175    if (unlikely(!vq->inflight)) {
2176        return -1;
2177    }
2178
2179    vq->inflight->desc[desc_idx].counter = vq->counter++;
2180    vq->inflight->desc[desc_idx].inflight = 1;
2181
2182    return 0;
2183}
2184
2185static int
2186vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2187{
2188    if (!has_feature(dev->protocol_features,
2189        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2190        return 0;
2191    }
2192
2193    if (unlikely(!vq->inflight)) {
2194        return -1;
2195    }
2196
2197    vq->inflight->last_batch_head = desc_idx;
2198
2199    return 0;
2200}
2201
2202static int
2203vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2204{
2205    if (!has_feature(dev->protocol_features,
2206        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2207        return 0;
2208    }
2209
2210    if (unlikely(!vq->inflight)) {
2211        return -1;
2212    }
2213
2214    barrier();
2215
2216    vq->inflight->desc[desc_idx].inflight = 0;
2217
2218    barrier();
2219
2220    vq->inflight->used_idx = vq->used_idx;
2221
2222    return 0;
2223}
2224
2225void *
2226vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2227{
2228    int i;
2229    unsigned int head;
2230    VuVirtqElement *elem;
2231
2232    if (unlikely(dev->broken) ||
2233        unlikely(!vq->vring.avail)) {
2234        return NULL;
2235    }
2236
2237    if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2238        i = (--vq->resubmit_num);
2239        elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2240
2241        if (!vq->resubmit_num) {
2242            free(vq->resubmit_list);
2243            vq->resubmit_list = NULL;
2244        }
2245
2246        return elem;
2247    }
2248
2249    if (vu_queue_empty(dev, vq)) {
2250        return NULL;
2251    }
2252    /*
2253     * Needed after virtio_queue_empty(), see comment in
2254     * virtqueue_num_heads().
2255     */
2256    smp_rmb();
2257
2258    if (vq->inuse >= vq->vring.num) {
2259        vu_panic(dev, "Virtqueue size exceeded");
2260        return NULL;
2261    }
2262
2263    if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2264        return NULL;
2265    }
2266
2267    if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2268        vring_set_avail_event(vq, vq->last_avail_idx);
2269    }
2270
2271    elem = vu_queue_map_desc(dev, vq, head, sz);
2272
2273    if (!elem) {
2274        return NULL;
2275    }
2276
2277    vq->inuse++;
2278
2279    vu_queue_inflight_get(dev, vq, head);
2280
2281    return elem;
2282}
2283
2284static void
2285vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2286                        size_t len)
2287{
2288    vq->inuse--;
2289    /* unmap, when DMA support is added */
2290}
2291
2292void
2293vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2294               size_t len)
2295{
2296    vq->last_avail_idx--;
2297    vu_queue_detach_element(dev, vq, elem, len);
2298}
2299
2300bool
2301vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2302{
2303    if (num > vq->inuse) {
2304        return false;
2305    }
2306    vq->last_avail_idx -= num;
2307    vq->inuse -= num;
2308    return true;
2309}
2310
2311static inline
2312void vring_used_write(VuDev *dev, VuVirtq *vq,
2313                      struct vring_used_elem *uelem, int i)
2314{
2315    struct vring_used *used = vq->vring.used;
2316
2317    used->ring[i] = *uelem;
2318    vu_log_write(dev, vq->vring.log_guest_addr +
2319                 offsetof(struct vring_used, ring[i]),
2320                 sizeof(used->ring[i]));
2321}
2322
2323
2324static void
2325vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
2326                  const VuVirtqElement *elem,
2327                  unsigned int len)
2328{
2329    struct vring_desc *desc = vq->vring.desc;
2330    unsigned int i, max, min, desc_len;
2331    uint64_t desc_addr, read_len;
2332    struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2333    unsigned num_bufs = 0;
2334
2335    max = vq->vring.num;
2336    i = elem->index;
2337
2338    if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2339        if (desc[i].len % sizeof(struct vring_desc)) {
2340            vu_panic(dev, "Invalid size for indirect buffer table");
2341        }
2342
2343        /* loop over the indirect descriptor table */
2344        desc_addr = desc[i].addr;
2345        desc_len = desc[i].len;
2346        max = desc_len / sizeof(struct vring_desc);
2347        read_len = desc_len;
2348        desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2349        if (unlikely(desc && read_len != desc_len)) {
2350            /* Failed to use zero copy */
2351            desc = NULL;
2352            if (!virtqueue_read_indirect_desc(dev, desc_buf,
2353                                              desc_addr,
2354                                              desc_len)) {
2355                desc = desc_buf;
2356            }
2357        }
2358        if (!desc) {
2359            vu_panic(dev, "Invalid indirect buffer table");
2360            return;
2361        }
2362        i = 0;
2363    }
2364
2365    do {
2366        if (++num_bufs > max) {
2367            vu_panic(dev, "Looped descriptor");
2368            return;
2369        }
2370
2371        if (desc[i].flags & VRING_DESC_F_WRITE) {
2372            min = MIN(desc[i].len, len);
2373            vu_log_write(dev, desc[i].addr, min);
2374            len -= min;
2375        }
2376
2377    } while (len > 0 &&
2378             (virtqueue_read_next_desc(dev, desc, i, max, &i)
2379              == VIRTQUEUE_READ_DESC_MORE));
2380}
2381
2382void
2383vu_queue_fill(VuDev *dev, VuVirtq *vq,
2384              const VuVirtqElement *elem,
2385              unsigned int len, unsigned int idx)
2386{
2387    struct vring_used_elem uelem;
2388
2389    if (unlikely(dev->broken) ||
2390        unlikely(!vq->vring.avail)) {
2391        return;
2392    }
2393
2394    vu_log_queue_fill(dev, vq, elem, len);
2395
2396    idx = (idx + vq->used_idx) % vq->vring.num;
2397
2398    uelem.id = elem->index;
2399    uelem.len = len;
2400    vring_used_write(dev, vq, &uelem, idx);
2401}
2402
2403static inline
2404void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
2405{
2406    vq->vring.used->idx = val;
2407    vu_log_write(dev,
2408                 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
2409                 sizeof(vq->vring.used->idx));
2410
2411    vq->used_idx = val;
2412}
2413
2414void
2415vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
2416{
2417    uint16_t old, new;
2418
2419    if (unlikely(dev->broken) ||
2420        unlikely(!vq->vring.avail)) {
2421        return;
2422    }
2423
2424    /* Make sure buffer is written before we update index. */
2425    smp_wmb();
2426
2427    old = vq->used_idx;
2428    new = old + count;
2429    vring_used_idx_set(dev, vq, new);
2430    vq->inuse -= count;
2431    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
2432        vq->signalled_used_valid = false;
2433    }
2434}
2435
2436void
2437vu_queue_push(VuDev *dev, VuVirtq *vq,
2438              const VuVirtqElement *elem, unsigned int len)
2439{
2440    vu_queue_fill(dev, vq, elem, len, 0);
2441    vu_queue_inflight_pre_put(dev, vq, elem->index);
2442    vu_queue_flush(dev, vq, 1);
2443    vu_queue_inflight_post_put(dev, vq, elem->index);
2444}
2445