qemu/contrib/libvhost-user/libvhost-user.c
<<
>>
Prefs
   1/*
   2 * Vhost User library
   3 *
   4 * Copyright IBM, Corp. 2007
   5 * Copyright (c) 2016 Red Hat, Inc.
   6 *
   7 * Authors:
   8 *  Anthony Liguori <aliguori@us.ibm.com>
   9 *  Marc-André Lureau <mlureau@redhat.com>
  10 *  Victor Kaplansky <victork@redhat.com>
  11 *
  12 * This work is licensed under the terms of the GNU GPL, version 2 or
  13 * later.  See the COPYING file in the top-level directory.
  14 */
  15
  16/* this code avoids GLib dependency */
  17#include <stdlib.h>
  18#include <stdio.h>
  19#include <unistd.h>
  20#include <stdarg.h>
  21#include <errno.h>
  22#include <string.h>
  23#include <assert.h>
  24#include <inttypes.h>
  25#include <sys/types.h>
  26#include <sys/socket.h>
  27#include <sys/eventfd.h>
  28#include <sys/mman.h>
  29#include "qemu/compiler.h"
  30
  31#if defined(__linux__)
  32#include <sys/syscall.h>
  33#include <fcntl.h>
  34#include <sys/ioctl.h>
  35#include <linux/vhost.h>
  36
  37#ifdef __NR_userfaultfd
  38#include <linux/userfaultfd.h>
  39#endif
  40
  41#endif
  42
  43#include "qemu/atomic.h"
  44#include "qemu/osdep.h"
  45#include "qemu/memfd.h"
  46
  47#include "libvhost-user.h"
  48
  49/* usually provided by GLib */
  50#ifndef MIN
  51#define MIN(x, y) ({                            \
  52            typeof(x) _min1 = (x);              \
  53            typeof(y) _min2 = (y);              \
  54            (void) (&_min1 == &_min2);          \
  55            _min1 < _min2 ? _min1 : _min2; })
  56#endif
  57
  58/* Round number down to multiple */
  59#define ALIGN_DOWN(n, m) ((n) / (m) * (m))
  60
  61/* Round number up to multiple */
  62#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
  63
  64/* Align each region to cache line size in inflight buffer */
  65#define INFLIGHT_ALIGNMENT 64
  66
  67/* The version of inflight buffer */
  68#define INFLIGHT_VERSION 1
  69
  70#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
  71
  72/* The version of the protocol we support */
  73#define VHOST_USER_VERSION 1
  74#define LIBVHOST_USER_DEBUG 0
  75
  76#define DPRINT(...)                             \
  77    do {                                        \
  78        if (LIBVHOST_USER_DEBUG) {              \
  79            fprintf(stderr, __VA_ARGS__);        \
  80        }                                       \
  81    } while (0)
  82
  83static inline
  84bool has_feature(uint64_t features, unsigned int fbit)
  85{
  86    assert(fbit < 64);
  87    return !!(features & (1ULL << fbit));
  88}
  89
  90static inline
  91bool vu_has_feature(VuDev *dev,
  92                    unsigned int fbit)
  93{
  94    return has_feature(dev->features, fbit);
  95}
  96
  97static const char *
  98vu_request_to_string(unsigned int req)
  99{
 100#define REQ(req) [req] = #req
 101    static const char *vu_request_str[] = {
 102        REQ(VHOST_USER_NONE),
 103        REQ(VHOST_USER_GET_FEATURES),
 104        REQ(VHOST_USER_SET_FEATURES),
 105        REQ(VHOST_USER_SET_OWNER),
 106        REQ(VHOST_USER_RESET_OWNER),
 107        REQ(VHOST_USER_SET_MEM_TABLE),
 108        REQ(VHOST_USER_SET_LOG_BASE),
 109        REQ(VHOST_USER_SET_LOG_FD),
 110        REQ(VHOST_USER_SET_VRING_NUM),
 111        REQ(VHOST_USER_SET_VRING_ADDR),
 112        REQ(VHOST_USER_SET_VRING_BASE),
 113        REQ(VHOST_USER_GET_VRING_BASE),
 114        REQ(VHOST_USER_SET_VRING_KICK),
 115        REQ(VHOST_USER_SET_VRING_CALL),
 116        REQ(VHOST_USER_SET_VRING_ERR),
 117        REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
 118        REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
 119        REQ(VHOST_USER_GET_QUEUE_NUM),
 120        REQ(VHOST_USER_SET_VRING_ENABLE),
 121        REQ(VHOST_USER_SEND_RARP),
 122        REQ(VHOST_USER_NET_SET_MTU),
 123        REQ(VHOST_USER_SET_SLAVE_REQ_FD),
 124        REQ(VHOST_USER_IOTLB_MSG),
 125        REQ(VHOST_USER_SET_VRING_ENDIAN),
 126        REQ(VHOST_USER_GET_CONFIG),
 127        REQ(VHOST_USER_SET_CONFIG),
 128        REQ(VHOST_USER_POSTCOPY_ADVISE),
 129        REQ(VHOST_USER_POSTCOPY_LISTEN),
 130        REQ(VHOST_USER_POSTCOPY_END),
 131        REQ(VHOST_USER_GET_INFLIGHT_FD),
 132        REQ(VHOST_USER_SET_INFLIGHT_FD),
 133        REQ(VHOST_USER_GPU_SET_SOCKET),
 134        REQ(VHOST_USER_MAX),
 135    };
 136#undef REQ
 137
 138    if (req < VHOST_USER_MAX) {
 139        return vu_request_str[req];
 140    } else {
 141        return "unknown";
 142    }
 143}
 144
 145static void
 146vu_panic(VuDev *dev, const char *msg, ...)
 147{
 148    char *buf = NULL;
 149    va_list ap;
 150
 151    va_start(ap, msg);
 152    if (vasprintf(&buf, msg, ap) < 0) {
 153        buf = NULL;
 154    }
 155    va_end(ap);
 156
 157    dev->broken = true;
 158    dev->panic(dev, buf);
 159    free(buf);
 160
 161    /* FIXME: find a way to call virtio_error? */
 162}
 163
 164/* Translate guest physical address to our virtual address.  */
 165void *
 166vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
 167{
 168    int i;
 169
 170    if (*plen == 0) {
 171        return NULL;
 172    }
 173
 174    /* Find matching memory region.  */
 175    for (i = 0; i < dev->nregions; i++) {
 176        VuDevRegion *r = &dev->regions[i];
 177
 178        if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
 179            if ((guest_addr + *plen) > (r->gpa + r->size)) {
 180                *plen = r->gpa + r->size - guest_addr;
 181            }
 182            return (void *)(uintptr_t)
 183                guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
 184        }
 185    }
 186
 187    return NULL;
 188}
 189
 190/* Translate qemu virtual address to our virtual address.  */
 191static void *
 192qva_to_va(VuDev *dev, uint64_t qemu_addr)
 193{
 194    int i;
 195
 196    /* Find matching memory region.  */
 197    for (i = 0; i < dev->nregions; i++) {
 198        VuDevRegion *r = &dev->regions[i];
 199
 200        if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
 201            return (void *)(uintptr_t)
 202                qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
 203        }
 204    }
 205
 206    return NULL;
 207}
 208
 209static void
 210vmsg_close_fds(VhostUserMsg *vmsg)
 211{
 212    int i;
 213
 214    for (i = 0; i < vmsg->fd_num; i++) {
 215        close(vmsg->fds[i]);
 216    }
 217}
 218
 219/* Set reply payload.u64 and clear request flags and fd_num */
 220static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val)
 221{
 222    vmsg->flags = 0; /* defaults will be set by vu_send_reply() */
 223    vmsg->size = sizeof(vmsg->payload.u64);
 224    vmsg->payload.u64 = val;
 225    vmsg->fd_num = 0;
 226}
 227
 228/* A test to see if we have userfault available */
 229static bool
 230have_userfault(void)
 231{
 232#if defined(__linux__) && defined(__NR_userfaultfd) &&\
 233        defined(UFFD_FEATURE_MISSING_SHMEM) &&\
 234        defined(UFFD_FEATURE_MISSING_HUGETLBFS)
 235    /* Now test the kernel we're running on really has the features */
 236    int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 237    struct uffdio_api api_struct;
 238    if (ufd < 0) {
 239        return false;
 240    }
 241
 242    api_struct.api = UFFD_API;
 243    api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
 244                          UFFD_FEATURE_MISSING_HUGETLBFS;
 245    if (ioctl(ufd, UFFDIO_API, &api_struct)) {
 246        close(ufd);
 247        return false;
 248    }
 249    close(ufd);
 250    return true;
 251
 252#else
 253    return false;
 254#endif
 255}
 256
 257static bool
 258vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 259{
 260    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
 261    struct iovec iov = {
 262        .iov_base = (char *)vmsg,
 263        .iov_len = VHOST_USER_HDR_SIZE,
 264    };
 265    struct msghdr msg = {
 266        .msg_iov = &iov,
 267        .msg_iovlen = 1,
 268        .msg_control = control,
 269        .msg_controllen = sizeof(control),
 270    };
 271    size_t fd_size;
 272    struct cmsghdr *cmsg;
 273    int rc;
 274
 275    do {
 276        rc = recvmsg(conn_fd, &msg, 0);
 277    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 278
 279    if (rc < 0) {
 280        vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
 281        return false;
 282    }
 283
 284    vmsg->fd_num = 0;
 285    for (cmsg = CMSG_FIRSTHDR(&msg);
 286         cmsg != NULL;
 287         cmsg = CMSG_NXTHDR(&msg, cmsg))
 288    {
 289        if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
 290            fd_size = cmsg->cmsg_len - CMSG_LEN(0);
 291            vmsg->fd_num = fd_size / sizeof(int);
 292            memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
 293            break;
 294        }
 295    }
 296
 297    if (vmsg->size > sizeof(vmsg->payload)) {
 298        vu_panic(dev,
 299                 "Error: too big message request: %d, size: vmsg->size: %u, "
 300                 "while sizeof(vmsg->payload) = %zu\n",
 301                 vmsg->request, vmsg->size, sizeof(vmsg->payload));
 302        goto fail;
 303    }
 304
 305    if (vmsg->size) {
 306        do {
 307            rc = read(conn_fd, &vmsg->payload, vmsg->size);
 308        } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 309
 310        if (rc <= 0) {
 311            vu_panic(dev, "Error while reading: %s", strerror(errno));
 312            goto fail;
 313        }
 314
 315        assert(rc == vmsg->size);
 316    }
 317
 318    return true;
 319
 320fail:
 321    vmsg_close_fds(vmsg);
 322
 323    return false;
 324}
 325
 326static bool
 327vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 328{
 329    int rc;
 330    uint8_t *p = (uint8_t *)vmsg;
 331    char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
 332    struct iovec iov = {
 333        .iov_base = (char *)vmsg,
 334        .iov_len = VHOST_USER_HDR_SIZE,
 335    };
 336    struct msghdr msg = {
 337        .msg_iov = &iov,
 338        .msg_iovlen = 1,
 339        .msg_control = control,
 340    };
 341    struct cmsghdr *cmsg;
 342
 343    memset(control, 0, sizeof(control));
 344    assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
 345    if (vmsg->fd_num > 0) {
 346        size_t fdsize = vmsg->fd_num * sizeof(int);
 347        msg.msg_controllen = CMSG_SPACE(fdsize);
 348        cmsg = CMSG_FIRSTHDR(&msg);
 349        cmsg->cmsg_len = CMSG_LEN(fdsize);
 350        cmsg->cmsg_level = SOL_SOCKET;
 351        cmsg->cmsg_type = SCM_RIGHTS;
 352        memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
 353    } else {
 354        msg.msg_controllen = 0;
 355    }
 356
 357    do {
 358        rc = sendmsg(conn_fd, &msg, 0);
 359    } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 360
 361    if (vmsg->size) {
 362        do {
 363            if (vmsg->data) {
 364                rc = write(conn_fd, vmsg->data, vmsg->size);
 365            } else {
 366                rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
 367            }
 368        } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
 369    }
 370
 371    if (rc <= 0) {
 372        vu_panic(dev, "Error while writing: %s", strerror(errno));
 373        return false;
 374    }
 375
 376    return true;
 377}
 378
 379static bool
 380vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
 381{
 382    /* Set the version in the flags when sending the reply */
 383    vmsg->flags &= ~VHOST_USER_VERSION_MASK;
 384    vmsg->flags |= VHOST_USER_VERSION;
 385    vmsg->flags |= VHOST_USER_REPLY_MASK;
 386
 387    return vu_message_write(dev, conn_fd, vmsg);
 388}
 389
 390static bool
 391vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
 392{
 393    VhostUserMsg msg_reply;
 394
 395    if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
 396        return true;
 397    }
 398
 399    if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
 400        return false;
 401    }
 402
 403    if (msg_reply.request != vmsg->request) {
 404        DPRINT("Received unexpected msg type. Expected %d received %d",
 405               vmsg->request, msg_reply.request);
 406        return false;
 407    }
 408
 409    return msg_reply.payload.u64 == 0;
 410}
 411
 412/* Kick the log_call_fd if required. */
 413static void
 414vu_log_kick(VuDev *dev)
 415{
 416    if (dev->log_call_fd != -1) {
 417        DPRINT("Kicking the QEMU's log...\n");
 418        if (eventfd_write(dev->log_call_fd, 1) < 0) {
 419            vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
 420        }
 421    }
 422}
 423
 424static void
 425vu_log_page(uint8_t *log_table, uint64_t page)
 426{
 427    DPRINT("Logged dirty guest page: %"PRId64"\n", page);
 428    atomic_or(&log_table[page / 8], 1 << (page % 8));
 429}
 430
 431static void
 432vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
 433{
 434    uint64_t page;
 435
 436    if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
 437        !dev->log_table || !length) {
 438        return;
 439    }
 440
 441    assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
 442
 443    page = address / VHOST_LOG_PAGE;
 444    while (page * VHOST_LOG_PAGE < address + length) {
 445        vu_log_page(dev->log_table, page);
 446        page += 1;
 447    }
 448
 449    vu_log_kick(dev);
 450}
 451
 452static void
 453vu_kick_cb(VuDev *dev, int condition, void *data)
 454{
 455    int index = (intptr_t)data;
 456    VuVirtq *vq = &dev->vq[index];
 457    int sock = vq->kick_fd;
 458    eventfd_t kick_data;
 459    ssize_t rc;
 460
 461    rc = eventfd_read(sock, &kick_data);
 462    if (rc == -1) {
 463        vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
 464        dev->remove_watch(dev, dev->vq[index].kick_fd);
 465    } else {
 466        DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
 467               kick_data, vq->handler, index);
 468        if (vq->handler) {
 469            vq->handler(dev, index);
 470        }
 471    }
 472}
 473
 474static bool
 475vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
 476{
 477    vmsg->payload.u64 =
 478        1ULL << VHOST_F_LOG_ALL |
 479        1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
 480
 481    if (dev->iface->get_features) {
 482        vmsg->payload.u64 |= dev->iface->get_features(dev);
 483    }
 484
 485    vmsg->size = sizeof(vmsg->payload.u64);
 486    vmsg->fd_num = 0;
 487
 488    DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
 489
 490    return true;
 491}
 492
 493static void
 494vu_set_enable_all_rings(VuDev *dev, bool enabled)
 495{
 496    uint16_t i;
 497
 498    for (i = 0; i < dev->max_queues; i++) {
 499        dev->vq[i].enable = enabled;
 500    }
 501}
 502
 503static bool
 504vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
 505{
 506    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
 507
 508    dev->features = vmsg->payload.u64;
 509
 510    if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
 511        vu_set_enable_all_rings(dev, true);
 512    }
 513
 514    if (dev->iface->set_features) {
 515        dev->iface->set_features(dev, dev->features);
 516    }
 517
 518    return false;
 519}
 520
 521static bool
 522vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
 523{
 524    return false;
 525}
 526
 527static void
 528vu_close_log(VuDev *dev)
 529{
 530    if (dev->log_table) {
 531        if (munmap(dev->log_table, dev->log_size) != 0) {
 532            perror("close log munmap() error");
 533        }
 534
 535        dev->log_table = NULL;
 536    }
 537    if (dev->log_call_fd != -1) {
 538        close(dev->log_call_fd);
 539        dev->log_call_fd = -1;
 540    }
 541}
 542
 543static bool
 544vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
 545{
 546    vu_set_enable_all_rings(dev, false);
 547
 548    return false;
 549}
 550
 551static bool
 552vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
 553{
 554    int i;
 555    VhostUserMemory m = vmsg->payload.memory, *memory = &m;
 556    dev->nregions = memory->nregions;
 557
 558    DPRINT("Nregions: %d\n", memory->nregions);
 559    for (i = 0; i < dev->nregions; i++) {
 560        void *mmap_addr;
 561        VhostUserMemoryRegion *msg_region = &memory->regions[i];
 562        VuDevRegion *dev_region = &dev->regions[i];
 563
 564        DPRINT("Region %d\n", i);
 565        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
 566               msg_region->guest_phys_addr);
 567        DPRINT("    memory_size:     0x%016"PRIx64"\n",
 568               msg_region->memory_size);
 569        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
 570               msg_region->userspace_addr);
 571        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
 572               msg_region->mmap_offset);
 573
 574        dev_region->gpa = msg_region->guest_phys_addr;
 575        dev_region->size = msg_region->memory_size;
 576        dev_region->qva = msg_region->userspace_addr;
 577        dev_region->mmap_offset = msg_region->mmap_offset;
 578
 579        /* We don't use offset argument of mmap() since the
 580         * mapped address has to be page aligned, and we use huge
 581         * pages.
 582         * In postcopy we're using PROT_NONE here to catch anyone
 583         * accessing it before we userfault
 584         */
 585        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
 586                         PROT_NONE, MAP_SHARED,
 587                         vmsg->fds[i], 0);
 588
 589        if (mmap_addr == MAP_FAILED) {
 590            vu_panic(dev, "region mmap error: %s", strerror(errno));
 591        } else {
 592            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
 593            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
 594                   dev_region->mmap_addr);
 595        }
 596
 597        /* Return the address to QEMU so that it can translate the ufd
 598         * fault addresses back.
 599         */
 600        msg_region->userspace_addr = (uintptr_t)(mmap_addr +
 601                                                 dev_region->mmap_offset);
 602        close(vmsg->fds[i]);
 603    }
 604
 605    /* Send the message back to qemu with the addresses filled in */
 606    vmsg->fd_num = 0;
 607    if (!vu_send_reply(dev, dev->sock, vmsg)) {
 608        vu_panic(dev, "failed to respond to set-mem-table for postcopy");
 609        return false;
 610    }
 611
 612    /* Wait for QEMU to confirm that it's registered the handler for the
 613     * faults.
 614     */
 615    if (!vu_message_read(dev, dev->sock, vmsg) ||
 616        vmsg->size != sizeof(vmsg->payload.u64) ||
 617        vmsg->payload.u64 != 0) {
 618        vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
 619        return false;
 620    }
 621
 622    /* OK, now we can go and register the memory and generate faults */
 623    for (i = 0; i < dev->nregions; i++) {
 624        VuDevRegion *dev_region = &dev->regions[i];
 625        int ret;
 626#ifdef UFFDIO_REGISTER
 627        /* We should already have an open ufd. Mark each memory
 628         * range as ufd.
 629         * Discard any mapping we have here; note I can't use MADV_REMOVE
 630         * or fallocate to make the hole since I don't want to lose
 631         * data that's already arrived in the shared process.
 632         * TODO: How to do hugepage
 633         */
 634        ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
 635                      dev_region->size + dev_region->mmap_offset,
 636                      MADV_DONTNEED);
 637        if (ret) {
 638            fprintf(stderr,
 639                    "%s: Failed to madvise(DONTNEED) region %d: %s\n",
 640                    __func__, i, strerror(errno));
 641        }
 642        /* Turn off transparent hugepages so we dont get lose wakeups
 643         * in neighbouring pages.
 644         * TODO: Turn this backon later.
 645         */
 646        ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
 647                      dev_region->size + dev_region->mmap_offset,
 648                      MADV_NOHUGEPAGE);
 649        if (ret) {
 650            /* Note: This can happen legally on kernels that are configured
 651             * without madvise'able hugepages
 652             */
 653            fprintf(stderr,
 654                    "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
 655                    __func__, i, strerror(errno));
 656        }
 657        struct uffdio_register reg_struct;
 658        reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
 659        reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
 660        reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
 661
 662        if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
 663            vu_panic(dev, "%s: Failed to userfault region %d "
 664                          "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
 665                     __func__, i,
 666                     dev_region->mmap_addr,
 667                     dev_region->size, dev_region->mmap_offset,
 668                     dev->postcopy_ufd, strerror(errno));
 669            return false;
 670        }
 671        if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
 672            vu_panic(dev, "%s Region (%d) doesn't support COPY",
 673                     __func__, i);
 674            return false;
 675        }
 676        DPRINT("%s: region %d: Registered userfault for %"
 677               PRIx64 " + %" PRIx64 "\n", __func__, i,
 678               (uint64_t)reg_struct.range.start,
 679               (uint64_t)reg_struct.range.len);
 680        /* Now it's registered we can let the client at it */
 681        if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
 682                     dev_region->size + dev_region->mmap_offset,
 683                     PROT_READ | PROT_WRITE)) {
 684            vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
 685                     i, strerror(errno));
 686            return false;
 687        }
 688        /* TODO: Stash 'zero' support flags somewhere */
 689#endif
 690    }
 691
 692    return false;
 693}
 694
 695static bool
 696vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
 697{
 698    int i;
 699    VhostUserMemory m = vmsg->payload.memory, *memory = &m;
 700
 701    for (i = 0; i < dev->nregions; i++) {
 702        VuDevRegion *r = &dev->regions[i];
 703        void *m = (void *) (uintptr_t) r->mmap_addr;
 704
 705        if (m) {
 706            munmap(m, r->size + r->mmap_offset);
 707        }
 708    }
 709    dev->nregions = memory->nregions;
 710
 711    if (dev->postcopy_listening) {
 712        return vu_set_mem_table_exec_postcopy(dev, vmsg);
 713    }
 714
 715    DPRINT("Nregions: %d\n", memory->nregions);
 716    for (i = 0; i < dev->nregions; i++) {
 717        void *mmap_addr;
 718        VhostUserMemoryRegion *msg_region = &memory->regions[i];
 719        VuDevRegion *dev_region = &dev->regions[i];
 720
 721        DPRINT("Region %d\n", i);
 722        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
 723               msg_region->guest_phys_addr);
 724        DPRINT("    memory_size:     0x%016"PRIx64"\n",
 725               msg_region->memory_size);
 726        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
 727               msg_region->userspace_addr);
 728        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
 729               msg_region->mmap_offset);
 730
 731        dev_region->gpa = msg_region->guest_phys_addr;
 732        dev_region->size = msg_region->memory_size;
 733        dev_region->qva = msg_region->userspace_addr;
 734        dev_region->mmap_offset = msg_region->mmap_offset;
 735
 736        /* We don't use offset argument of mmap() since the
 737         * mapped address has to be page aligned, and we use huge
 738         * pages.  */
 739        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
 740                         PROT_READ | PROT_WRITE, MAP_SHARED,
 741                         vmsg->fds[i], 0);
 742
 743        if (mmap_addr == MAP_FAILED) {
 744            vu_panic(dev, "region mmap error: %s", strerror(errno));
 745        } else {
 746            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
 747            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
 748                   dev_region->mmap_addr);
 749        }
 750
 751        close(vmsg->fds[i]);
 752    }
 753
 754    return false;
 755}
 756
 757static bool
 758vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 759{
 760    int fd;
 761    uint64_t log_mmap_size, log_mmap_offset;
 762    void *rc;
 763
 764    if (vmsg->fd_num != 1 ||
 765        vmsg->size != sizeof(vmsg->payload.log)) {
 766        vu_panic(dev, "Invalid log_base message");
 767        return true;
 768    }
 769
 770    fd = vmsg->fds[0];
 771    log_mmap_offset = vmsg->payload.log.mmap_offset;
 772    log_mmap_size = vmsg->payload.log.mmap_size;
 773    DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
 774    DPRINT("Log mmap_size:   %"PRId64"\n", log_mmap_size);
 775
 776    rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
 777              log_mmap_offset);
 778    close(fd);
 779    if (rc == MAP_FAILED) {
 780        perror("log mmap error");
 781    }
 782
 783    if (dev->log_table) {
 784        munmap(dev->log_table, dev->log_size);
 785    }
 786    dev->log_table = rc;
 787    dev->log_size = log_mmap_size;
 788
 789    vmsg->size = sizeof(vmsg->payload.u64);
 790    vmsg->fd_num = 0;
 791
 792    return true;
 793}
 794
 795static bool
 796vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
 797{
 798    if (vmsg->fd_num != 1) {
 799        vu_panic(dev, "Invalid log_fd message");
 800        return false;
 801    }
 802
 803    if (dev->log_call_fd != -1) {
 804        close(dev->log_call_fd);
 805    }
 806    dev->log_call_fd = vmsg->fds[0];
 807    DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
 808
 809    return false;
 810}
 811
 812static bool
 813vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
 814{
 815    unsigned int index = vmsg->payload.state.index;
 816    unsigned int num = vmsg->payload.state.num;
 817
 818    DPRINT("State.index: %d\n", index);
 819    DPRINT("State.num:   %d\n", num);
 820    dev->vq[index].vring.num = num;
 821
 822    return false;
 823}
 824
 825static bool
 826vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
 827{
 828    struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
 829    unsigned int index = vra->index;
 830    VuVirtq *vq = &dev->vq[index];
 831
 832    DPRINT("vhost_vring_addr:\n");
 833    DPRINT("    index:  %d\n", vra->index);
 834    DPRINT("    flags:  %d\n", vra->flags);
 835    DPRINT("    desc_user_addr:   0x%016" PRIx64 "\n", vra->desc_user_addr);
 836    DPRINT("    used_user_addr:   0x%016" PRIx64 "\n", vra->used_user_addr);
 837    DPRINT("    avail_user_addr:  0x%016" PRIx64 "\n", vra->avail_user_addr);
 838    DPRINT("    log_guest_addr:   0x%016" PRIx64 "\n", vra->log_guest_addr);
 839
 840    vq->vring.flags = vra->flags;
 841    vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
 842    vq->vring.used = qva_to_va(dev, vra->used_user_addr);
 843    vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
 844    vq->vring.log_guest_addr = vra->log_guest_addr;
 845
 846    DPRINT("Setting virtq addresses:\n");
 847    DPRINT("    vring_desc  at %p\n", vq->vring.desc);
 848    DPRINT("    vring_used  at %p\n", vq->vring.used);
 849    DPRINT("    vring_avail at %p\n", vq->vring.avail);
 850
 851    if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
 852        vu_panic(dev, "Invalid vring_addr message");
 853        return false;
 854    }
 855
 856    vq->used_idx = vq->vring.used->idx;
 857
 858    if (vq->last_avail_idx != vq->used_idx) {
 859        bool resume = dev->iface->queue_is_processed_in_order &&
 860            dev->iface->queue_is_processed_in_order(dev, index);
 861
 862        DPRINT("Last avail index != used index: %u != %u%s\n",
 863               vq->last_avail_idx, vq->used_idx,
 864               resume ? ", resuming" : "");
 865
 866        if (resume) {
 867            vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
 868        }
 869    }
 870
 871    return false;
 872}
 873
 874static bool
 875vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 876{
 877    unsigned int index = vmsg->payload.state.index;
 878    unsigned int num = vmsg->payload.state.num;
 879
 880    DPRINT("State.index: %d\n", index);
 881    DPRINT("State.num:   %d\n", num);
 882    dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
 883
 884    return false;
 885}
 886
 887static bool
 888vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
 889{
 890    unsigned int index = vmsg->payload.state.index;
 891
 892    DPRINT("State.index: %d\n", index);
 893    vmsg->payload.state.num = dev->vq[index].last_avail_idx;
 894    vmsg->size = sizeof(vmsg->payload.state);
 895
 896    dev->vq[index].started = false;
 897    if (dev->iface->queue_set_started) {
 898        dev->iface->queue_set_started(dev, index, false);
 899    }
 900
 901    if (dev->vq[index].call_fd != -1) {
 902        close(dev->vq[index].call_fd);
 903        dev->vq[index].call_fd = -1;
 904    }
 905    if (dev->vq[index].kick_fd != -1) {
 906        dev->remove_watch(dev, dev->vq[index].kick_fd);
 907        close(dev->vq[index].kick_fd);
 908        dev->vq[index].kick_fd = -1;
 909    }
 910
 911    return true;
 912}
 913
 914static bool
 915vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
 916{
 917    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
 918
 919    if (index >= dev->max_queues) {
 920        vmsg_close_fds(vmsg);
 921        vu_panic(dev, "Invalid queue index: %u", index);
 922        return false;
 923    }
 924
 925    if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK ||
 926        vmsg->fd_num != 1) {
 927        vmsg_close_fds(vmsg);
 928        vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
 929        return false;
 930    }
 931
 932    return true;
 933}
 934
 935static int
 936inflight_desc_compare(const void *a, const void *b)
 937{
 938    VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
 939                        *desc1 = (VuVirtqInflightDesc *)b;
 940
 941    if (desc1->counter > desc0->counter &&
 942        (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
 943        return 1;
 944    }
 945
 946    return -1;
 947}
 948
 949static int
 950vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
 951{
 952    int i = 0;
 953
 954    if (!has_feature(dev->protocol_features,
 955        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
 956        return 0;
 957    }
 958
 959    if (unlikely(!vq->inflight)) {
 960        return -1;
 961    }
 962
 963    if (unlikely(!vq->inflight->version)) {
 964        /* initialize the buffer */
 965        vq->inflight->version = INFLIGHT_VERSION;
 966        return 0;
 967    }
 968
 969    vq->used_idx = vq->vring.used->idx;
 970    vq->resubmit_num = 0;
 971    vq->resubmit_list = NULL;
 972    vq->counter = 0;
 973
 974    if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
 975        vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
 976
 977        barrier();
 978
 979        vq->inflight->used_idx = vq->used_idx;
 980    }
 981
 982    for (i = 0; i < vq->inflight->desc_num; i++) {
 983        if (vq->inflight->desc[i].inflight == 1) {
 984            vq->inuse++;
 985        }
 986    }
 987
 988    vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
 989
 990    if (vq->inuse) {
 991        vq->resubmit_list = malloc(sizeof(VuVirtqInflightDesc) * vq->inuse);
 992        if (!vq->resubmit_list) {
 993            return -1;
 994        }
 995
 996        for (i = 0; i < vq->inflight->desc_num; i++) {
 997            if (vq->inflight->desc[i].inflight) {
 998                vq->resubmit_list[vq->resubmit_num].index = i;
 999                vq->resubmit_list[vq->resubmit_num].counter =
1000                                        vq->inflight->desc[i].counter;
1001                vq->resubmit_num++;
1002            }
1003        }
1004
1005        if (vq->resubmit_num > 1) {
1006            qsort(vq->resubmit_list, vq->resubmit_num,
1007                  sizeof(VuVirtqInflightDesc), inflight_desc_compare);
1008        }
1009        vq->counter = vq->resubmit_list[0].counter + 1;
1010    }
1011
1012    /* in case of I/O hang after reconnecting */
1013    if (eventfd_write(vq->kick_fd, 1)) {
1014        return -1;
1015    }
1016
1017    return 0;
1018}
1019
1020static bool
1021vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1022{
1023    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1024
1025    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1026
1027    if (!vu_check_queue_msg_file(dev, vmsg)) {
1028        return false;
1029    }
1030
1031    if (dev->vq[index].kick_fd != -1) {
1032        dev->remove_watch(dev, dev->vq[index].kick_fd);
1033        close(dev->vq[index].kick_fd);
1034        dev->vq[index].kick_fd = -1;
1035    }
1036
1037    dev->vq[index].kick_fd = vmsg->fds[0];
1038    DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
1039
1040    dev->vq[index].started = true;
1041    if (dev->iface->queue_set_started) {
1042        dev->iface->queue_set_started(dev, index, true);
1043    }
1044
1045    if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1046        dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1047                       vu_kick_cb, (void *)(long)index);
1048
1049        DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1050               dev->vq[index].kick_fd, index);
1051    }
1052
1053    if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1054        vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1055    }
1056
1057    return false;
1058}
1059
1060void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1061                          vu_queue_handler_cb handler)
1062{
1063    int qidx = vq - dev->vq;
1064
1065    vq->handler = handler;
1066    if (vq->kick_fd >= 0) {
1067        if (handler) {
1068            dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1069                           vu_kick_cb, (void *)(long)qidx);
1070        } else {
1071            dev->remove_watch(dev, vq->kick_fd);
1072        }
1073    }
1074}
1075
1076bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1077                                int size, int offset)
1078{
1079    int qidx = vq - dev->vq;
1080    int fd_num = 0;
1081    VhostUserMsg vmsg = {
1082        .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1083        .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1084        .size = sizeof(vmsg.payload.area),
1085        .payload.area = {
1086            .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1087            .size = size,
1088            .offset = offset,
1089        },
1090    };
1091
1092    if (fd == -1) {
1093        vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1094    } else {
1095        vmsg.fds[fd_num++] = fd;
1096    }
1097
1098    vmsg.fd_num = fd_num;
1099
1100    if (!has_feature(dev->protocol_features,
1101                     VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
1102        return false;
1103    }
1104
1105    if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
1106        return false;
1107    }
1108
1109    return vu_process_message_reply(dev, &vmsg);
1110}
1111
1112static bool
1113vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1114{
1115    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1116
1117    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1118
1119    if (!vu_check_queue_msg_file(dev, vmsg)) {
1120        return false;
1121    }
1122
1123    if (dev->vq[index].call_fd != -1) {
1124        close(dev->vq[index].call_fd);
1125        dev->vq[index].call_fd = -1;
1126    }
1127
1128    dev->vq[index].call_fd = vmsg->fds[0];
1129
1130    /* in case of I/O hang after reconnecting */
1131    if (eventfd_write(vmsg->fds[0], 1)) {
1132        return -1;
1133    }
1134
1135    DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
1136
1137    return false;
1138}
1139
1140static bool
1141vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1142{
1143    int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1144
1145    DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1146
1147    if (!vu_check_queue_msg_file(dev, vmsg)) {
1148        return false;
1149    }
1150
1151    if (dev->vq[index].err_fd != -1) {
1152        close(dev->vq[index].err_fd);
1153        dev->vq[index].err_fd = -1;
1154    }
1155
1156    dev->vq[index].err_fd = vmsg->fds[0];
1157
1158    return false;
1159}
1160
1161static bool
1162vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1163{
1164    uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ |
1165                        1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1166                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1167                        1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1168                        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD;
1169
1170    if (have_userfault()) {
1171        features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1172    }
1173
1174    if (dev->iface->get_config && dev->iface->set_config) {
1175        features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1176    }
1177
1178    if (dev->iface->get_protocol_features) {
1179        features |= dev->iface->get_protocol_features(dev);
1180    }
1181
1182    vmsg_set_reply_u64(vmsg, features);
1183    return true;
1184}
1185
1186static bool
1187vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1188{
1189    uint64_t features = vmsg->payload.u64;
1190
1191    DPRINT("u64: 0x%016"PRIx64"\n", features);
1192
1193    dev->protocol_features = vmsg->payload.u64;
1194
1195    if (dev->iface->set_protocol_features) {
1196        dev->iface->set_protocol_features(dev, features);
1197    }
1198
1199    return false;
1200}
1201
1202static bool
1203vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1204{
1205    vmsg_set_reply_u64(vmsg, dev->max_queues);
1206    return true;
1207}
1208
1209static bool
1210vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1211{
1212    unsigned int index = vmsg->payload.state.index;
1213    unsigned int enable = vmsg->payload.state.num;
1214
1215    DPRINT("State.index: %d\n", index);
1216    DPRINT("State.enable:   %d\n", enable);
1217
1218    if (index >= dev->max_queues) {
1219        vu_panic(dev, "Invalid vring_enable index: %u", index);
1220        return false;
1221    }
1222
1223    dev->vq[index].enable = enable;
1224    return false;
1225}
1226
1227static bool
1228vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1229{
1230    if (vmsg->fd_num != 1) {
1231        vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
1232        return false;
1233    }
1234
1235    if (dev->slave_fd != -1) {
1236        close(dev->slave_fd);
1237    }
1238    dev->slave_fd = vmsg->fds[0];
1239    DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
1240
1241    return false;
1242}
1243
1244static bool
1245vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1246{
1247    int ret = -1;
1248
1249    if (dev->iface->get_config) {
1250        ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1251                                     vmsg->payload.config.size);
1252    }
1253
1254    if (ret) {
1255        /* resize to zero to indicate an error to master */
1256        vmsg->size = 0;
1257    }
1258
1259    return true;
1260}
1261
1262static bool
1263vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1264{
1265    int ret = -1;
1266
1267    if (dev->iface->set_config) {
1268        ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1269                                     vmsg->payload.config.offset,
1270                                     vmsg->payload.config.size,
1271                                     vmsg->payload.config.flags);
1272        if (ret) {
1273            vu_panic(dev, "Set virtio configuration space failed");
1274        }
1275    }
1276
1277    return false;
1278}
1279
1280static bool
1281vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1282{
1283    dev->postcopy_ufd = -1;
1284#ifdef UFFDIO_API
1285    struct uffdio_api api_struct;
1286
1287    dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1288    vmsg->size = 0;
1289#endif
1290
1291    if (dev->postcopy_ufd == -1) {
1292        vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1293        goto out;
1294    }
1295
1296#ifdef UFFDIO_API
1297    api_struct.api = UFFD_API;
1298    api_struct.features = 0;
1299    if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1300        vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1301        close(dev->postcopy_ufd);
1302        dev->postcopy_ufd = -1;
1303        goto out;
1304    }
1305    /* TODO: Stash feature flags somewhere */
1306#endif
1307
1308out:
1309    /* Return a ufd to the QEMU */
1310    vmsg->fd_num = 1;
1311    vmsg->fds[0] = dev->postcopy_ufd;
1312    return true; /* = send a reply */
1313}
1314
1315static bool
1316vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1317{
1318    if (dev->nregions) {
1319        vu_panic(dev, "Regions already registered at postcopy-listen");
1320        vmsg_set_reply_u64(vmsg, -1);
1321        return true;
1322    }
1323    dev->postcopy_listening = true;
1324
1325    vmsg_set_reply_u64(vmsg, 0);
1326    return true;
1327}
1328
1329static bool
1330vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1331{
1332    DPRINT("%s: Entry\n", __func__);
1333    dev->postcopy_listening = false;
1334    if (dev->postcopy_ufd > 0) {
1335        close(dev->postcopy_ufd);
1336        dev->postcopy_ufd = -1;
1337        DPRINT("%s: Done close\n", __func__);
1338    }
1339
1340    vmsg_set_reply_u64(vmsg, 0);
1341    DPRINT("%s: exit\n", __func__);
1342    return true;
1343}
1344
1345static inline uint64_t
1346vu_inflight_queue_size(uint16_t queue_size)
1347{
1348    return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1349           sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1350}
1351
1352static bool
1353vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1354{
1355    int fd;
1356    void *addr;
1357    uint64_t mmap_size;
1358    uint16_t num_queues, queue_size;
1359
1360    if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1361        vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1362        vmsg->payload.inflight.mmap_size = 0;
1363        return true;
1364    }
1365
1366    num_queues = vmsg->payload.inflight.num_queues;
1367    queue_size = vmsg->payload.inflight.queue_size;
1368
1369    DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1370    DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1371
1372    mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1373
1374    addr = qemu_memfd_alloc("vhost-inflight", mmap_size,
1375                            F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1376                            &fd, NULL);
1377
1378    if (!addr) {
1379        vu_panic(dev, "Failed to alloc vhost inflight area");
1380        vmsg->payload.inflight.mmap_size = 0;
1381        return true;
1382    }
1383
1384    memset(addr, 0, mmap_size);
1385
1386    dev->inflight_info.addr = addr;
1387    dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1388    dev->inflight_info.fd = vmsg->fds[0] = fd;
1389    vmsg->fd_num = 1;
1390    vmsg->payload.inflight.mmap_offset = 0;
1391
1392    DPRINT("send inflight mmap_size: %"PRId64"\n",
1393           vmsg->payload.inflight.mmap_size);
1394    DPRINT("send inflight mmap offset: %"PRId64"\n",
1395           vmsg->payload.inflight.mmap_offset);
1396
1397    return true;
1398}
1399
1400static bool
1401vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1402{
1403    int fd, i;
1404    uint64_t mmap_size, mmap_offset;
1405    uint16_t num_queues, queue_size;
1406    void *rc;
1407
1408    if (vmsg->fd_num != 1 ||
1409        vmsg->size != sizeof(vmsg->payload.inflight)) {
1410        vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1411                 vmsg->size, vmsg->fd_num);
1412        return false;
1413    }
1414
1415    fd = vmsg->fds[0];
1416    mmap_size = vmsg->payload.inflight.mmap_size;
1417    mmap_offset = vmsg->payload.inflight.mmap_offset;
1418    num_queues = vmsg->payload.inflight.num_queues;
1419    queue_size = vmsg->payload.inflight.queue_size;
1420
1421    DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1422    DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1423    DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1424    DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1425
1426    rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1427              fd, mmap_offset);
1428
1429    if (rc == MAP_FAILED) {
1430        vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1431        return false;
1432    }
1433
1434    if (dev->inflight_info.fd) {
1435        close(dev->inflight_info.fd);
1436    }
1437
1438    if (dev->inflight_info.addr) {
1439        munmap(dev->inflight_info.addr, dev->inflight_info.size);
1440    }
1441
1442    dev->inflight_info.fd = fd;
1443    dev->inflight_info.addr = rc;
1444    dev->inflight_info.size = mmap_size;
1445
1446    for (i = 0; i < num_queues; i++) {
1447        dev->vq[i].inflight = (VuVirtqInflight *)rc;
1448        dev->vq[i].inflight->desc_num = queue_size;
1449        rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1450    }
1451
1452    return false;
1453}
1454
1455static bool
1456vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
1457{
1458    int do_reply = 0;
1459
1460    /* Print out generic part of the request. */
1461    DPRINT("================ Vhost user message ================\n");
1462    DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
1463           vmsg->request);
1464    DPRINT("Flags:   0x%x\n", vmsg->flags);
1465    DPRINT("Size:    %d\n", vmsg->size);
1466
1467    if (vmsg->fd_num) {
1468        int i;
1469        DPRINT("Fds:");
1470        for (i = 0; i < vmsg->fd_num; i++) {
1471            DPRINT(" %d", vmsg->fds[i]);
1472        }
1473        DPRINT("\n");
1474    }
1475
1476    if (dev->iface->process_msg &&
1477        dev->iface->process_msg(dev, vmsg, &do_reply)) {
1478        return do_reply;
1479    }
1480
1481    switch (vmsg->request) {
1482    case VHOST_USER_GET_FEATURES:
1483        return vu_get_features_exec(dev, vmsg);
1484    case VHOST_USER_SET_FEATURES:
1485        return vu_set_features_exec(dev, vmsg);
1486    case VHOST_USER_GET_PROTOCOL_FEATURES:
1487        return vu_get_protocol_features_exec(dev, vmsg);
1488    case VHOST_USER_SET_PROTOCOL_FEATURES:
1489        return vu_set_protocol_features_exec(dev, vmsg);
1490    case VHOST_USER_SET_OWNER:
1491        return vu_set_owner_exec(dev, vmsg);
1492    case VHOST_USER_RESET_OWNER:
1493        return vu_reset_device_exec(dev, vmsg);
1494    case VHOST_USER_SET_MEM_TABLE:
1495        return vu_set_mem_table_exec(dev, vmsg);
1496    case VHOST_USER_SET_LOG_BASE:
1497        return vu_set_log_base_exec(dev, vmsg);
1498    case VHOST_USER_SET_LOG_FD:
1499        return vu_set_log_fd_exec(dev, vmsg);
1500    case VHOST_USER_SET_VRING_NUM:
1501        return vu_set_vring_num_exec(dev, vmsg);
1502    case VHOST_USER_SET_VRING_ADDR:
1503        return vu_set_vring_addr_exec(dev, vmsg);
1504    case VHOST_USER_SET_VRING_BASE:
1505        return vu_set_vring_base_exec(dev, vmsg);
1506    case VHOST_USER_GET_VRING_BASE:
1507        return vu_get_vring_base_exec(dev, vmsg);
1508    case VHOST_USER_SET_VRING_KICK:
1509        return vu_set_vring_kick_exec(dev, vmsg);
1510    case VHOST_USER_SET_VRING_CALL:
1511        return vu_set_vring_call_exec(dev, vmsg);
1512    case VHOST_USER_SET_VRING_ERR:
1513        return vu_set_vring_err_exec(dev, vmsg);
1514    case VHOST_USER_GET_QUEUE_NUM:
1515        return vu_get_queue_num_exec(dev, vmsg);
1516    case VHOST_USER_SET_VRING_ENABLE:
1517        return vu_set_vring_enable_exec(dev, vmsg);
1518    case VHOST_USER_SET_SLAVE_REQ_FD:
1519        return vu_set_slave_req_fd(dev, vmsg);
1520    case VHOST_USER_GET_CONFIG:
1521        return vu_get_config(dev, vmsg);
1522    case VHOST_USER_SET_CONFIG:
1523        return vu_set_config(dev, vmsg);
1524    case VHOST_USER_NONE:
1525        /* if you need processing before exit, override iface->process_msg */
1526        exit(0);
1527    case VHOST_USER_POSTCOPY_ADVISE:
1528        return vu_set_postcopy_advise(dev, vmsg);
1529    case VHOST_USER_POSTCOPY_LISTEN:
1530        return vu_set_postcopy_listen(dev, vmsg);
1531    case VHOST_USER_POSTCOPY_END:
1532        return vu_set_postcopy_end(dev, vmsg);
1533    case VHOST_USER_GET_INFLIGHT_FD:
1534        return vu_get_inflight_fd(dev, vmsg);
1535    case VHOST_USER_SET_INFLIGHT_FD:
1536        return vu_set_inflight_fd(dev, vmsg);
1537    default:
1538        vmsg_close_fds(vmsg);
1539        vu_panic(dev, "Unhandled request: %d", vmsg->request);
1540    }
1541
1542    return false;
1543}
1544
1545bool
1546vu_dispatch(VuDev *dev)
1547{
1548    VhostUserMsg vmsg = { 0, };
1549    int reply_requested;
1550    bool success = false;
1551
1552    if (!vu_message_read(dev, dev->sock, &vmsg)) {
1553        goto end;
1554    }
1555
1556    reply_requested = vu_process_message(dev, &vmsg);
1557    if (!reply_requested) {
1558        success = true;
1559        goto end;
1560    }
1561
1562    if (!vu_send_reply(dev, dev->sock, &vmsg)) {
1563        goto end;
1564    }
1565
1566    success = true;
1567
1568end:
1569    free(vmsg.data);
1570    return success;
1571}
1572
1573void
1574vu_deinit(VuDev *dev)
1575{
1576    int i;
1577
1578    for (i = 0; i < dev->nregions; i++) {
1579        VuDevRegion *r = &dev->regions[i];
1580        void *m = (void *) (uintptr_t) r->mmap_addr;
1581        if (m != MAP_FAILED) {
1582            munmap(m, r->size + r->mmap_offset);
1583        }
1584    }
1585    dev->nregions = 0;
1586
1587    for (i = 0; i < dev->max_queues; i++) {
1588        VuVirtq *vq = &dev->vq[i];
1589
1590        if (vq->call_fd != -1) {
1591            close(vq->call_fd);
1592            vq->call_fd = -1;
1593        }
1594
1595        if (vq->kick_fd != -1) {
1596            close(vq->kick_fd);
1597            vq->kick_fd = -1;
1598        }
1599
1600        if (vq->err_fd != -1) {
1601            close(vq->err_fd);
1602            vq->err_fd = -1;
1603        }
1604
1605        if (vq->resubmit_list) {
1606            free(vq->resubmit_list);
1607            vq->resubmit_list = NULL;
1608        }
1609
1610        vq->inflight = NULL;
1611    }
1612
1613    if (dev->inflight_info.addr) {
1614        munmap(dev->inflight_info.addr, dev->inflight_info.size);
1615        dev->inflight_info.addr = NULL;
1616    }
1617
1618    if (dev->inflight_info.fd > 0) {
1619        close(dev->inflight_info.fd);
1620        dev->inflight_info.fd = -1;
1621    }
1622
1623    vu_close_log(dev);
1624    if (dev->slave_fd != -1) {
1625        close(dev->slave_fd);
1626        dev->slave_fd = -1;
1627    }
1628
1629    if (dev->sock != -1) {
1630        close(dev->sock);
1631    }
1632
1633    free(dev->vq);
1634    dev->vq = NULL;
1635}
1636
1637bool
1638vu_init(VuDev *dev,
1639        uint16_t max_queues,
1640        int socket,
1641        vu_panic_cb panic,
1642        vu_set_watch_cb set_watch,
1643        vu_remove_watch_cb remove_watch,
1644        const VuDevIface *iface)
1645{
1646    uint16_t i;
1647
1648    assert(max_queues > 0);
1649    assert(socket >= 0);
1650    assert(set_watch);
1651    assert(remove_watch);
1652    assert(iface);
1653    assert(panic);
1654
1655    memset(dev, 0, sizeof(*dev));
1656
1657    dev->sock = socket;
1658    dev->panic = panic;
1659    dev->set_watch = set_watch;
1660    dev->remove_watch = remove_watch;
1661    dev->iface = iface;
1662    dev->log_call_fd = -1;
1663    dev->slave_fd = -1;
1664    dev->max_queues = max_queues;
1665
1666    dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
1667    if (!dev->vq) {
1668        DPRINT("%s: failed to malloc virtqueues\n", __func__);
1669        return false;
1670    }
1671
1672    for (i = 0; i < max_queues; i++) {
1673        dev->vq[i] = (VuVirtq) {
1674            .call_fd = -1, .kick_fd = -1, .err_fd = -1,
1675            .notification = true,
1676        };
1677    }
1678
1679    return true;
1680}
1681
1682VuVirtq *
1683vu_get_queue(VuDev *dev, int qidx)
1684{
1685    assert(qidx < dev->max_queues);
1686    return &dev->vq[qidx];
1687}
1688
1689bool
1690vu_queue_enabled(VuDev *dev, VuVirtq *vq)
1691{
1692    return vq->enable;
1693}
1694
1695bool
1696vu_queue_started(const VuDev *dev, const VuVirtq *vq)
1697{
1698    return vq->started;
1699}
1700
1701static inline uint16_t
1702vring_avail_flags(VuVirtq *vq)
1703{
1704    return vq->vring.avail->flags;
1705}
1706
1707static inline uint16_t
1708vring_avail_idx(VuVirtq *vq)
1709{
1710    vq->shadow_avail_idx = vq->vring.avail->idx;
1711
1712    return vq->shadow_avail_idx;
1713}
1714
1715static inline uint16_t
1716vring_avail_ring(VuVirtq *vq, int i)
1717{
1718    return vq->vring.avail->ring[i];
1719}
1720
1721static inline uint16_t
1722vring_get_used_event(VuVirtq *vq)
1723{
1724    return vring_avail_ring(vq, vq->vring.num);
1725}
1726
1727static int
1728virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
1729{
1730    uint16_t num_heads = vring_avail_idx(vq) - idx;
1731
1732    /* Check it isn't doing very strange things with descriptor numbers. */
1733    if (num_heads > vq->vring.num) {
1734        vu_panic(dev, "Guest moved used index from %u to %u",
1735                 idx, vq->shadow_avail_idx);
1736        return -1;
1737    }
1738    if (num_heads) {
1739        /* On success, callers read a descriptor at vq->last_avail_idx.
1740         * Make sure descriptor read does not bypass avail index read. */
1741        smp_rmb();
1742    }
1743
1744    return num_heads;
1745}
1746
1747static bool
1748virtqueue_get_head(VuDev *dev, VuVirtq *vq,
1749                   unsigned int idx, unsigned int *head)
1750{
1751    /* Grab the next descriptor number they're advertising, and increment
1752     * the index we've seen. */
1753    *head = vring_avail_ring(vq, idx % vq->vring.num);
1754
1755    /* If their number is silly, that's a fatal mistake. */
1756    if (*head >= vq->vring.num) {
1757        vu_panic(dev, "Guest says index %u is available", head);
1758        return false;
1759    }
1760
1761    return true;
1762}
1763
1764static int
1765virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
1766                             uint64_t addr, size_t len)
1767{
1768    struct vring_desc *ori_desc;
1769    uint64_t read_len;
1770
1771    if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
1772        return -1;
1773    }
1774
1775    if (len == 0) {
1776        return -1;
1777    }
1778
1779    while (len) {
1780        read_len = len;
1781        ori_desc = vu_gpa_to_va(dev, &read_len, addr);
1782        if (!ori_desc) {
1783            return -1;
1784        }
1785
1786        memcpy(desc, ori_desc, read_len);
1787        len -= read_len;
1788        addr += read_len;
1789        desc += read_len;
1790    }
1791
1792    return 0;
1793}
1794
1795enum {
1796    VIRTQUEUE_READ_DESC_ERROR = -1,
1797    VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
1798    VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
1799};
1800
1801static int
1802virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
1803                         int i, unsigned int max, unsigned int *next)
1804{
1805    /* If this descriptor says it doesn't chain, we're done. */
1806    if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
1807        return VIRTQUEUE_READ_DESC_DONE;
1808    }
1809
1810    /* Check they're not leading us off end of descriptors. */
1811    *next = desc[i].next;
1812    /* Make sure compiler knows to grab that: we don't want it changing! */
1813    smp_wmb();
1814
1815    if (*next >= max) {
1816        vu_panic(dev, "Desc next is %u", next);
1817        return VIRTQUEUE_READ_DESC_ERROR;
1818    }
1819
1820    return VIRTQUEUE_READ_DESC_MORE;
1821}
1822
1823void
1824vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
1825                         unsigned int *out_bytes,
1826                         unsigned max_in_bytes, unsigned max_out_bytes)
1827{
1828    unsigned int idx;
1829    unsigned int total_bufs, in_total, out_total;
1830    int rc;
1831
1832    idx = vq->last_avail_idx;
1833
1834    total_bufs = in_total = out_total = 0;
1835    if (unlikely(dev->broken) ||
1836        unlikely(!vq->vring.avail)) {
1837        goto done;
1838    }
1839
1840    while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
1841        unsigned int max, desc_len, num_bufs, indirect = 0;
1842        uint64_t desc_addr, read_len;
1843        struct vring_desc *desc;
1844        struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
1845        unsigned int i;
1846
1847        max = vq->vring.num;
1848        num_bufs = total_bufs;
1849        if (!virtqueue_get_head(dev, vq, idx++, &i)) {
1850            goto err;
1851        }
1852        desc = vq->vring.desc;
1853
1854        if (desc[i].flags & VRING_DESC_F_INDIRECT) {
1855            if (desc[i].len % sizeof(struct vring_desc)) {
1856                vu_panic(dev, "Invalid size for indirect buffer table");
1857                goto err;
1858            }
1859
1860            /* If we've got too many, that implies a descriptor loop. */
1861            if (num_bufs >= max) {
1862                vu_panic(dev, "Looped descriptor");
1863                goto err;
1864            }
1865
1866            /* loop over the indirect descriptor table */
1867            indirect = 1;
1868            desc_addr = desc[i].addr;
1869            desc_len = desc[i].len;
1870            max = desc_len / sizeof(struct vring_desc);
1871            read_len = desc_len;
1872            desc = vu_gpa_to_va(dev, &read_len, desc_addr);
1873            if (unlikely(desc && read_len != desc_len)) {
1874                /* Failed to use zero copy */
1875                desc = NULL;
1876                if (!virtqueue_read_indirect_desc(dev, desc_buf,
1877                                                  desc_addr,
1878                                                  desc_len)) {
1879                    desc = desc_buf;
1880                }
1881            }
1882            if (!desc) {
1883                vu_panic(dev, "Invalid indirect buffer table");
1884                goto err;
1885            }
1886            num_bufs = i = 0;
1887        }
1888
1889        do {
1890            /* If we've got too many, that implies a descriptor loop. */
1891            if (++num_bufs > max) {
1892                vu_panic(dev, "Looped descriptor");
1893                goto err;
1894            }
1895
1896            if (desc[i].flags & VRING_DESC_F_WRITE) {
1897                in_total += desc[i].len;
1898            } else {
1899                out_total += desc[i].len;
1900            }
1901            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1902                goto done;
1903            }
1904            rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
1905        } while (rc == VIRTQUEUE_READ_DESC_MORE);
1906
1907        if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1908            goto err;
1909        }
1910
1911        if (!indirect) {
1912            total_bufs = num_bufs;
1913        } else {
1914            total_bufs++;
1915        }
1916    }
1917    if (rc < 0) {
1918        goto err;
1919    }
1920done:
1921    if (in_bytes) {
1922        *in_bytes = in_total;
1923    }
1924    if (out_bytes) {
1925        *out_bytes = out_total;
1926    }
1927    return;
1928
1929err:
1930    in_total = out_total = 0;
1931    goto done;
1932}
1933
1934bool
1935vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
1936                     unsigned int out_bytes)
1937{
1938    unsigned int in_total, out_total;
1939
1940    vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
1941                             in_bytes, out_bytes);
1942
1943    return in_bytes <= in_total && out_bytes <= out_total;
1944}
1945
1946/* Fetch avail_idx from VQ memory only when we really need to know if
1947 * guest has added some buffers. */
1948bool
1949vu_queue_empty(VuDev *dev, VuVirtq *vq)
1950{
1951    if (unlikely(dev->broken) ||
1952        unlikely(!vq->vring.avail)) {
1953        return true;
1954    }
1955
1956    if (vq->shadow_avail_idx != vq->last_avail_idx) {
1957        return false;
1958    }
1959
1960    return vring_avail_idx(vq) == vq->last_avail_idx;
1961}
1962
1963static bool
1964vring_notify(VuDev *dev, VuVirtq *vq)
1965{
1966    uint16_t old, new;
1967    bool v;
1968
1969    /* We need to expose used array entries before checking used event. */
1970    smp_mb();
1971
1972    /* Always notify when queue is empty (when feature acknowledge) */
1973    if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1974        !vq->inuse && vu_queue_empty(dev, vq)) {
1975        return true;
1976    }
1977
1978    if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1979        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1980    }
1981
1982    v = vq->signalled_used_valid;
1983    vq->signalled_used_valid = true;
1984    old = vq->signalled_used;
1985    new = vq->signalled_used = vq->used_idx;
1986    return !v || vring_need_event(vring_get_used_event(vq), new, old);
1987}
1988
1989void
1990vu_queue_notify(VuDev *dev, VuVirtq *vq)
1991{
1992    if (unlikely(dev->broken) ||
1993        unlikely(!vq->vring.avail)) {
1994        return;
1995    }
1996
1997    if (!vring_notify(dev, vq)) {
1998        DPRINT("skipped notify...\n");
1999        return;
2000    }
2001
2002    if (eventfd_write(vq->call_fd, 1) < 0) {
2003        vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
2004    }
2005}
2006
2007static inline void
2008vring_used_flags_set_bit(VuVirtq *vq, int mask)
2009{
2010    uint16_t *flags;
2011
2012    flags = (uint16_t *)((char*)vq->vring.used +
2013                         offsetof(struct vring_used, flags));
2014    *flags |= mask;
2015}
2016
2017static inline void
2018vring_used_flags_unset_bit(VuVirtq *vq, int mask)
2019{
2020    uint16_t *flags;
2021
2022    flags = (uint16_t *)((char*)vq->vring.used +
2023                         offsetof(struct vring_used, flags));
2024    *flags &= ~mask;
2025}
2026
2027static inline void
2028vring_set_avail_event(VuVirtq *vq, uint16_t val)
2029{
2030    if (!vq->notification) {
2031        return;
2032    }
2033
2034    *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
2035}
2036
2037void
2038vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2039{
2040    vq->notification = enable;
2041    if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2042        vring_set_avail_event(vq, vring_avail_idx(vq));
2043    } else if (enable) {
2044        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2045    } else {
2046        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2047    }
2048    if (enable) {
2049        /* Expose avail event/used flags before caller checks the avail idx. */
2050        smp_mb();
2051    }
2052}
2053
2054static void
2055virtqueue_map_desc(VuDev *dev,
2056                   unsigned int *p_num_sg, struct iovec *iov,
2057                   unsigned int max_num_sg, bool is_write,
2058                   uint64_t pa, size_t sz)
2059{
2060    unsigned num_sg = *p_num_sg;
2061
2062    assert(num_sg <= max_num_sg);
2063
2064    if (!sz) {
2065        vu_panic(dev, "virtio: zero sized buffers are not allowed");
2066        return;
2067    }
2068
2069    while (sz) {
2070        uint64_t len = sz;
2071
2072        if (num_sg == max_num_sg) {
2073            vu_panic(dev, "virtio: too many descriptors in indirect table");
2074            return;
2075        }
2076
2077        iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2078        if (iov[num_sg].iov_base == NULL) {
2079            vu_panic(dev, "virtio: invalid address for buffers");
2080            return;
2081        }
2082        iov[num_sg].iov_len = len;
2083        num_sg++;
2084        sz -= len;
2085        pa += len;
2086    }
2087
2088    *p_num_sg = num_sg;
2089}
2090
2091static void *
2092virtqueue_alloc_element(size_t sz,
2093                                     unsigned out_num, unsigned in_num)
2094{
2095    VuVirtqElement *elem;
2096    size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2097    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2098    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2099
2100    assert(sz >= sizeof(VuVirtqElement));
2101    elem = malloc(out_sg_end);
2102    elem->out_num = out_num;
2103    elem->in_num = in_num;
2104    elem->in_sg = (void *)elem + in_sg_ofs;
2105    elem->out_sg = (void *)elem + out_sg_ofs;
2106    return elem;
2107}
2108
2109static void *
2110vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2111{
2112    struct vring_desc *desc = vq->vring.desc;
2113    uint64_t desc_addr, read_len;
2114    unsigned int desc_len;
2115    unsigned int max = vq->vring.num;
2116    unsigned int i = idx;
2117    VuVirtqElement *elem;
2118    unsigned int out_num = 0, in_num = 0;
2119    struct iovec iov[VIRTQUEUE_MAX_SIZE];
2120    struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2121    int rc;
2122
2123    if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2124        if (desc[i].len % sizeof(struct vring_desc)) {
2125            vu_panic(dev, "Invalid size for indirect buffer table");
2126        }
2127
2128        /* loop over the indirect descriptor table */
2129        desc_addr = desc[i].addr;
2130        desc_len = desc[i].len;
2131        max = desc_len / sizeof(struct vring_desc);
2132        read_len = desc_len;
2133        desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2134        if (unlikely(desc && read_len != desc_len)) {
2135            /* Failed to use zero copy */
2136            desc = NULL;
2137            if (!virtqueue_read_indirect_desc(dev, desc_buf,
2138                                              desc_addr,
2139                                              desc_len)) {
2140                desc = desc_buf;
2141            }
2142        }
2143        if (!desc) {
2144            vu_panic(dev, "Invalid indirect buffer table");
2145            return NULL;
2146        }
2147        i = 0;
2148    }
2149
2150    /* Collect all the descriptors */
2151    do {
2152        if (desc[i].flags & VRING_DESC_F_WRITE) {
2153            virtqueue_map_desc(dev, &in_num, iov + out_num,
2154                               VIRTQUEUE_MAX_SIZE - out_num, true,
2155                               desc[i].addr, desc[i].len);
2156        } else {
2157            if (in_num) {
2158                vu_panic(dev, "Incorrect order for descriptors");
2159                return NULL;
2160            }
2161            virtqueue_map_desc(dev, &out_num, iov,
2162                               VIRTQUEUE_MAX_SIZE, false,
2163                               desc[i].addr, desc[i].len);
2164        }
2165
2166        /* If we've got too many, that implies a descriptor loop. */
2167        if ((in_num + out_num) > max) {
2168            vu_panic(dev, "Looped descriptor");
2169        }
2170        rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2171    } while (rc == VIRTQUEUE_READ_DESC_MORE);
2172
2173    if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2174        vu_panic(dev, "read descriptor error");
2175        return NULL;
2176    }
2177
2178    /* Now copy what we have collected and mapped */
2179    elem = virtqueue_alloc_element(sz, out_num, in_num);
2180    elem->index = idx;
2181    for (i = 0; i < out_num; i++) {
2182        elem->out_sg[i] = iov[i];
2183    }
2184    for (i = 0; i < in_num; i++) {
2185        elem->in_sg[i] = iov[out_num + i];
2186    }
2187
2188    return elem;
2189}
2190
2191static int
2192vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2193{
2194    if (!has_feature(dev->protocol_features,
2195        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2196        return 0;
2197    }
2198
2199    if (unlikely(!vq->inflight)) {
2200        return -1;
2201    }
2202
2203    vq->inflight->desc[desc_idx].counter = vq->counter++;
2204    vq->inflight->desc[desc_idx].inflight = 1;
2205
2206    return 0;
2207}
2208
2209static int
2210vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2211{
2212    if (!has_feature(dev->protocol_features,
2213        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2214        return 0;
2215    }
2216
2217    if (unlikely(!vq->inflight)) {
2218        return -1;
2219    }
2220
2221    vq->inflight->last_batch_head = desc_idx;
2222
2223    return 0;
2224}
2225
2226static int
2227vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2228{
2229    if (!has_feature(dev->protocol_features,
2230        VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2231        return 0;
2232    }
2233
2234    if (unlikely(!vq->inflight)) {
2235        return -1;
2236    }
2237
2238    barrier();
2239
2240    vq->inflight->desc[desc_idx].inflight = 0;
2241
2242    barrier();
2243
2244    vq->inflight->used_idx = vq->used_idx;
2245
2246    return 0;
2247}
2248
2249void *
2250vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2251{
2252    int i;
2253    unsigned int head;
2254    VuVirtqElement *elem;
2255
2256    if (unlikely(dev->broken) ||
2257        unlikely(!vq->vring.avail)) {
2258        return NULL;
2259    }
2260
2261    if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2262        i = (--vq->resubmit_num);
2263        elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2264
2265        if (!vq->resubmit_num) {
2266            free(vq->resubmit_list);
2267            vq->resubmit_list = NULL;
2268        }
2269
2270        return elem;
2271    }
2272
2273    if (vu_queue_empty(dev, vq)) {
2274        return NULL;
2275    }
2276    /*
2277     * Needed after virtio_queue_empty(), see comment in
2278     * virtqueue_num_heads().
2279     */
2280    smp_rmb();
2281
2282    if (vq->inuse >= vq->vring.num) {
2283        vu_panic(dev, "Virtqueue size exceeded");
2284        return NULL;
2285    }
2286
2287    if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2288        return NULL;
2289    }
2290
2291    if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2292        vring_set_avail_event(vq, vq->last_avail_idx);
2293    }
2294
2295    elem = vu_queue_map_desc(dev, vq, head, sz);
2296
2297    if (!elem) {
2298        return NULL;
2299    }
2300
2301    vq->inuse++;
2302
2303    vu_queue_inflight_get(dev, vq, head);
2304
2305    return elem;
2306}
2307
2308static void
2309vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2310                        size_t len)
2311{
2312    vq->inuse--;
2313    /* unmap, when DMA support is added */
2314}
2315
2316void
2317vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2318               size_t len)
2319{
2320    vq->last_avail_idx--;
2321    vu_queue_detach_element(dev, vq, elem, len);
2322}
2323
2324bool
2325vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2326{
2327    if (num > vq->inuse) {
2328        return false;
2329    }
2330    vq->last_avail_idx -= num;
2331    vq->inuse -= num;
2332    return true;
2333}
2334
2335static inline
2336void vring_used_write(VuDev *dev, VuVirtq *vq,
2337                      struct vring_used_elem *uelem, int i)
2338{
2339    struct vring_used *used = vq->vring.used;
2340
2341    used->ring[i] = *uelem;
2342    vu_log_write(dev, vq->vring.log_guest_addr +
2343                 offsetof(struct vring_used, ring[i]),
2344                 sizeof(used->ring[i]));
2345}
2346
2347
2348static void
2349vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
2350                  const VuVirtqElement *elem,
2351                  unsigned int len)
2352{
2353    struct vring_desc *desc = vq->vring.desc;
2354    unsigned int i, max, min, desc_len;
2355    uint64_t desc_addr, read_len;
2356    struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2357    unsigned num_bufs = 0;
2358
2359    max = vq->vring.num;
2360    i = elem->index;
2361
2362    if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2363        if (desc[i].len % sizeof(struct vring_desc)) {
2364            vu_panic(dev, "Invalid size for indirect buffer table");
2365        }
2366
2367        /* loop over the indirect descriptor table */
2368        desc_addr = desc[i].addr;
2369        desc_len = desc[i].len;
2370        max = desc_len / sizeof(struct vring_desc);
2371        read_len = desc_len;
2372        desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2373        if (unlikely(desc && read_len != desc_len)) {
2374            /* Failed to use zero copy */
2375            desc = NULL;
2376            if (!virtqueue_read_indirect_desc(dev, desc_buf,
2377                                              desc_addr,
2378                                              desc_len)) {
2379                desc = desc_buf;
2380            }
2381        }
2382        if (!desc) {
2383            vu_panic(dev, "Invalid indirect buffer table");
2384            return;
2385        }
2386        i = 0;
2387    }
2388
2389    do {
2390        if (++num_bufs > max) {
2391            vu_panic(dev, "Looped descriptor");
2392            return;
2393        }
2394
2395        if (desc[i].flags & VRING_DESC_F_WRITE) {
2396            min = MIN(desc[i].len, len);
2397            vu_log_write(dev, desc[i].addr, min);
2398            len -= min;
2399        }
2400
2401    } while (len > 0 &&
2402             (virtqueue_read_next_desc(dev, desc, i, max, &i)
2403              == VIRTQUEUE_READ_DESC_MORE));
2404}
2405
2406void
2407vu_queue_fill(VuDev *dev, VuVirtq *vq,
2408              const VuVirtqElement *elem,
2409              unsigned int len, unsigned int idx)
2410{
2411    struct vring_used_elem uelem;
2412
2413    if (unlikely(dev->broken) ||
2414        unlikely(!vq->vring.avail)) {
2415        return;
2416    }
2417
2418    vu_log_queue_fill(dev, vq, elem, len);
2419
2420    idx = (idx + vq->used_idx) % vq->vring.num;
2421
2422    uelem.id = elem->index;
2423    uelem.len = len;
2424    vring_used_write(dev, vq, &uelem, idx);
2425}
2426
2427static inline
2428void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
2429{
2430    vq->vring.used->idx = val;
2431    vu_log_write(dev,
2432                 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
2433                 sizeof(vq->vring.used->idx));
2434
2435    vq->used_idx = val;
2436}
2437
2438void
2439vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
2440{
2441    uint16_t old, new;
2442
2443    if (unlikely(dev->broken) ||
2444        unlikely(!vq->vring.avail)) {
2445        return;
2446    }
2447
2448    /* Make sure buffer is written before we update index. */
2449    smp_wmb();
2450
2451    old = vq->used_idx;
2452    new = old + count;
2453    vring_used_idx_set(dev, vq, new);
2454    vq->inuse -= count;
2455    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
2456        vq->signalled_used_valid = false;
2457    }
2458}
2459
2460void
2461vu_queue_push(VuDev *dev, VuVirtq *vq,
2462              const VuVirtqElement *elem, unsigned int len)
2463{
2464    vu_queue_fill(dev, vq, elem, len, 0);
2465    vu_queue_inflight_pre_put(dev, vq, elem->index);
2466    vu_queue_flush(dev, vq, 1);
2467    vu_queue_inflight_post_put(dev, vq, elem->index);
2468}
2469