qemu/hw/virtio/vhost.c
<<
>>
Prefs
   1/*
   2 * vhost support
   3 *
   4 * Copyright Red Hat, Inc. 2010
   5 *
   6 * Authors:
   7 *  Michael S. Tsirkin <mst@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "hw/virtio/vhost.h"
  19#include "hw/hw.h"
  20#include "qemu/atomic.h"
  21#include "qemu/range.h"
  22#include "qemu/error-report.h"
  23#include "qemu/memfd.h"
  24#include <linux/vhost.h>
  25#include "exec/address-spaces.h"
  26#include "hw/virtio/virtio-bus.h"
  27#include "hw/virtio/virtio-access.h"
  28#include "migration/migration.h"
  29
  30/* enabled until disconnected backend stabilizes */
  31#define _VHOST_DEBUG 1
  32
  33#ifdef _VHOST_DEBUG
  34#define VHOST_OPS_DEBUG(fmt, ...) \
  35    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
  36                      strerror(errno), errno); } while (0)
  37#else
  38#define VHOST_OPS_DEBUG(fmt, ...) \
  39    do { } while (0)
  40#endif
  41
  42static struct vhost_log *vhost_log;
  43static struct vhost_log *vhost_log_shm;
  44
  45static unsigned int used_memslots;
  46static QLIST_HEAD(, vhost_dev) vhost_devices =
  47    QLIST_HEAD_INITIALIZER(vhost_devices);
  48
  49bool vhost_has_free_slot(void)
  50{
  51    unsigned int slots_limit = ~0U;
  52    struct vhost_dev *hdev;
  53
  54    QLIST_FOREACH(hdev, &vhost_devices, entry) {
  55        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
  56        slots_limit = MIN(slots_limit, r);
  57    }
  58    return slots_limit > used_memslots;
  59}
  60
  61static void vhost_dev_sync_region(struct vhost_dev *dev,
  62                                  MemoryRegionSection *section,
  63                                  uint64_t mfirst, uint64_t mlast,
  64                                  uint64_t rfirst, uint64_t rlast)
  65{
  66    vhost_log_chunk_t *log = dev->log->log;
  67
  68    uint64_t start = MAX(mfirst, rfirst);
  69    uint64_t end = MIN(mlast, rlast);
  70    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
  71    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
  72    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
  73
  74    if (end < start) {
  75        return;
  76    }
  77    assert(end / VHOST_LOG_CHUNK < dev->log_size);
  78    assert(start / VHOST_LOG_CHUNK < dev->log_size);
  79
  80    for (;from < to; ++from) {
  81        vhost_log_chunk_t log;
  82        /* We first check with non-atomic: much cheaper,
  83         * and we expect non-dirty to be the common case. */
  84        if (!*from) {
  85            addr += VHOST_LOG_CHUNK;
  86            continue;
  87        }
  88        /* Data must be read atomically. We don't really need barrier semantics
  89         * but it's easier to use atomic_* than roll our own. */
  90        log = atomic_xchg(from, 0);
  91        while (log) {
  92            int bit = ctzl(log);
  93            hwaddr page_addr;
  94            hwaddr section_offset;
  95            hwaddr mr_offset;
  96            page_addr = addr + bit * VHOST_LOG_PAGE;
  97            section_offset = page_addr - section->offset_within_address_space;
  98            mr_offset = section_offset + section->offset_within_region;
  99            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
 100            log &= ~(0x1ull << bit);
 101        }
 102        addr += VHOST_LOG_CHUNK;
 103    }
 104}
 105
 106static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
 107                                   MemoryRegionSection *section,
 108                                   hwaddr first,
 109                                   hwaddr last)
 110{
 111    int i;
 112    hwaddr start_addr;
 113    hwaddr end_addr;
 114
 115    if (!dev->log_enabled || !dev->started) {
 116        return 0;
 117    }
 118    start_addr = section->offset_within_address_space;
 119    end_addr = range_get_last(start_addr, int128_get64(section->size));
 120    start_addr = MAX(first, start_addr);
 121    end_addr = MIN(last, end_addr);
 122
 123    for (i = 0; i < dev->mem->nregions; ++i) {
 124        struct vhost_memory_region *reg = dev->mem->regions + i;
 125        vhost_dev_sync_region(dev, section, start_addr, end_addr,
 126                              reg->guest_phys_addr,
 127                              range_get_last(reg->guest_phys_addr,
 128                                             reg->memory_size));
 129    }
 130    for (i = 0; i < dev->nvqs; ++i) {
 131        struct vhost_virtqueue *vq = dev->vqs + i;
 132        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
 133                              range_get_last(vq->used_phys, vq->used_size));
 134    }
 135    return 0;
 136}
 137
 138static void vhost_log_sync(MemoryListener *listener,
 139                          MemoryRegionSection *section)
 140{
 141    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 142                                         memory_listener);
 143    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
 144}
 145
 146static void vhost_log_sync_range(struct vhost_dev *dev,
 147                                 hwaddr first, hwaddr last)
 148{
 149    int i;
 150    /* FIXME: this is N^2 in number of sections */
 151    for (i = 0; i < dev->n_mem_sections; ++i) {
 152        MemoryRegionSection *section = &dev->mem_sections[i];
 153        vhost_sync_dirty_bitmap(dev, section, first, last);
 154    }
 155}
 156
 157/* Assign/unassign. Keep an unsorted array of non-overlapping
 158 * memory regions in dev->mem. */
 159static void vhost_dev_unassign_memory(struct vhost_dev *dev,
 160                                      uint64_t start_addr,
 161                                      uint64_t size)
 162{
 163    int from, to, n = dev->mem->nregions;
 164    /* Track overlapping/split regions for sanity checking. */
 165    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
 166
 167    for (from = 0, to = 0; from < n; ++from, ++to) {
 168        struct vhost_memory_region *reg = dev->mem->regions + to;
 169        uint64_t reglast;
 170        uint64_t memlast;
 171        uint64_t change;
 172
 173        /* clone old region */
 174        if (to != from) {
 175            memcpy(reg, dev->mem->regions + from, sizeof *reg);
 176        }
 177
 178        /* No overlap is simple */
 179        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
 180                            start_addr, size)) {
 181            continue;
 182        }
 183
 184        /* Split only happens if supplied region
 185         * is in the middle of an existing one. Thus it can not
 186         * overlap with any other existing region. */
 187        assert(!split);
 188
 189        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
 190        memlast = range_get_last(start_addr, size);
 191
 192        /* Remove whole region */
 193        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
 194            --dev->mem->nregions;
 195            --to;
 196            ++overlap_middle;
 197            continue;
 198        }
 199
 200        /* Shrink region */
 201        if (memlast >= reglast) {
 202            reg->memory_size = start_addr - reg->guest_phys_addr;
 203            assert(reg->memory_size);
 204            assert(!overlap_end);
 205            ++overlap_end;
 206            continue;
 207        }
 208
 209        /* Shift region */
 210        if (start_addr <= reg->guest_phys_addr) {
 211            change = memlast + 1 - reg->guest_phys_addr;
 212            reg->memory_size -= change;
 213            reg->guest_phys_addr += change;
 214            reg->userspace_addr += change;
 215            assert(reg->memory_size);
 216            assert(!overlap_start);
 217            ++overlap_start;
 218            continue;
 219        }
 220
 221        /* This only happens if supplied region
 222         * is in the middle of an existing one. Thus it can not
 223         * overlap with any other existing region. */
 224        assert(!overlap_start);
 225        assert(!overlap_end);
 226        assert(!overlap_middle);
 227        /* Split region: shrink first part, shift second part. */
 228        memcpy(dev->mem->regions + n, reg, sizeof *reg);
 229        reg->memory_size = start_addr - reg->guest_phys_addr;
 230        assert(reg->memory_size);
 231        change = memlast + 1 - reg->guest_phys_addr;
 232        reg = dev->mem->regions + n;
 233        reg->memory_size -= change;
 234        assert(reg->memory_size);
 235        reg->guest_phys_addr += change;
 236        reg->userspace_addr += change;
 237        /* Never add more than 1 region */
 238        assert(dev->mem->nregions == n);
 239        ++dev->mem->nregions;
 240        ++split;
 241    }
 242}
 243
 244/* Called after unassign, so no regions overlap the given range. */
 245static void vhost_dev_assign_memory(struct vhost_dev *dev,
 246                                    uint64_t start_addr,
 247                                    uint64_t size,
 248                                    uint64_t uaddr)
 249{
 250    int from, to;
 251    struct vhost_memory_region *merged = NULL;
 252    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
 253        struct vhost_memory_region *reg = dev->mem->regions + to;
 254        uint64_t prlast, urlast;
 255        uint64_t pmlast, umlast;
 256        uint64_t s, e, u;
 257
 258        /* clone old region */
 259        if (to != from) {
 260            memcpy(reg, dev->mem->regions + from, sizeof *reg);
 261        }
 262        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
 263        pmlast = range_get_last(start_addr, size);
 264        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
 265        umlast = range_get_last(uaddr, size);
 266
 267        /* check for overlapping regions: should never happen. */
 268        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
 269        /* Not an adjacent or overlapping region - do not merge. */
 270        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
 271            (pmlast + 1 != reg->guest_phys_addr ||
 272             umlast + 1 != reg->userspace_addr)) {
 273            continue;
 274        }
 275
 276        if (dev->vhost_ops->vhost_backend_can_merge &&
 277            !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
 278                                                     reg->userspace_addr,
 279                                                     reg->memory_size)) {
 280            continue;
 281        }
 282
 283        if (merged) {
 284            --to;
 285            assert(to >= 0);
 286        } else {
 287            merged = reg;
 288        }
 289        u = MIN(uaddr, reg->userspace_addr);
 290        s = MIN(start_addr, reg->guest_phys_addr);
 291        e = MAX(pmlast, prlast);
 292        uaddr = merged->userspace_addr = u;
 293        start_addr = merged->guest_phys_addr = s;
 294        size = merged->memory_size = e - s + 1;
 295        assert(merged->memory_size);
 296    }
 297
 298    if (!merged) {
 299        struct vhost_memory_region *reg = dev->mem->regions + to;
 300        memset(reg, 0, sizeof *reg);
 301        reg->memory_size = size;
 302        assert(reg->memory_size);
 303        reg->guest_phys_addr = start_addr;
 304        reg->userspace_addr = uaddr;
 305        ++to;
 306    }
 307    assert(to <= dev->mem->nregions + 1);
 308    dev->mem->nregions = to;
 309}
 310
 311static uint64_t vhost_get_log_size(struct vhost_dev *dev)
 312{
 313    uint64_t log_size = 0;
 314    int i;
 315    for (i = 0; i < dev->mem->nregions; ++i) {
 316        struct vhost_memory_region *reg = dev->mem->regions + i;
 317        uint64_t last = range_get_last(reg->guest_phys_addr,
 318                                       reg->memory_size);
 319        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 320    }
 321    for (i = 0; i < dev->nvqs; ++i) {
 322        struct vhost_virtqueue *vq = dev->vqs + i;
 323        uint64_t last = vq->used_phys + vq->used_size - 1;
 324        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 325    }
 326    return log_size;
 327}
 328
 329static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
 330{
 331    struct vhost_log *log;
 332    uint64_t logsize = size * sizeof(*(log->log));
 333    int fd = -1;
 334
 335    log = g_new0(struct vhost_log, 1);
 336    if (share) {
 337        log->log = qemu_memfd_alloc("vhost-log", logsize,
 338                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
 339                                    &fd);
 340        memset(log->log, 0, logsize);
 341    } else {
 342        log->log = g_malloc0(logsize);
 343    }
 344
 345    log->size = size;
 346    log->refcnt = 1;
 347    log->fd = fd;
 348
 349    return log;
 350}
 351
 352static struct vhost_log *vhost_log_get(uint64_t size, bool share)
 353{
 354    struct vhost_log *log = share ? vhost_log_shm : vhost_log;
 355
 356    if (!log || log->size != size) {
 357        log = vhost_log_alloc(size, share);
 358        if (share) {
 359            vhost_log_shm = log;
 360        } else {
 361            vhost_log = log;
 362        }
 363    } else {
 364        ++log->refcnt;
 365    }
 366
 367    return log;
 368}
 369
 370static void vhost_log_put(struct vhost_dev *dev, bool sync)
 371{
 372    struct vhost_log *log = dev->log;
 373
 374    if (!log) {
 375        return;
 376    }
 377    dev->log = NULL;
 378    dev->log_size = 0;
 379
 380    --log->refcnt;
 381    if (log->refcnt == 0) {
 382        /* Sync only the range covered by the old log */
 383        if (dev->log_size && sync) {
 384            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
 385        }
 386
 387        if (vhost_log == log) {
 388            g_free(log->log);
 389            vhost_log = NULL;
 390        } else if (vhost_log_shm == log) {
 391            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
 392                            log->fd);
 393            vhost_log_shm = NULL;
 394        }
 395
 396        g_free(log);
 397    }
 398}
 399
 400static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
 401{
 402    return dev->vhost_ops->vhost_requires_shm_log &&
 403           dev->vhost_ops->vhost_requires_shm_log(dev);
 404}
 405
 406static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
 407{
 408    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
 409    uint64_t log_base = (uintptr_t)log->log;
 410    int r;
 411
 412    /* inform backend of log switching, this must be done before
 413       releasing the current log, to ensure no logging is lost */
 414    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
 415    if (r < 0) {
 416        VHOST_OPS_DEBUG("vhost_set_log_base failed");
 417    }
 418
 419    vhost_log_put(dev, true);
 420    dev->log = log;
 421    dev->log_size = size;
 422}
 423
 424static int vhost_verify_ring_mappings(struct vhost_dev *dev,
 425                                      uint64_t start_addr,
 426                                      uint64_t size)
 427{
 428    int i;
 429    int r = 0;
 430
 431    for (i = 0; !r && i < dev->nvqs; ++i) {
 432        struct vhost_virtqueue *vq = dev->vqs + i;
 433        hwaddr l;
 434        void *p;
 435
 436        if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
 437            continue;
 438        }
 439        l = vq->ring_size;
 440        p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
 441        if (!p || l != vq->ring_size) {
 442            error_report("Unable to map ring buffer for ring %d", i);
 443            r = -ENOMEM;
 444        }
 445        if (p != vq->ring) {
 446            error_report("Ring buffer relocated for ring %d", i);
 447            r = -EBUSY;
 448        }
 449        cpu_physical_memory_unmap(p, l, 0, 0);
 450    }
 451    return r;
 452}
 453
 454static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
 455                                                      uint64_t start_addr,
 456                                                      uint64_t size)
 457{
 458    int i, n = dev->mem->nregions;
 459    for (i = 0; i < n; ++i) {
 460        struct vhost_memory_region *reg = dev->mem->regions + i;
 461        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
 462                           start_addr, size)) {
 463            return reg;
 464        }
 465    }
 466    return NULL;
 467}
 468
 469static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
 470                                 uint64_t start_addr,
 471                                 uint64_t size,
 472                                 uint64_t uaddr)
 473{
 474    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
 475    uint64_t reglast;
 476    uint64_t memlast;
 477
 478    if (!reg) {
 479        return true;
 480    }
 481
 482    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
 483    memlast = range_get_last(start_addr, size);
 484
 485    /* Need to extend region? */
 486    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
 487        return true;
 488    }
 489    /* userspace_addr changed? */
 490    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
 491}
 492
 493static void vhost_set_memory(MemoryListener *listener,
 494                             MemoryRegionSection *section,
 495                             bool add)
 496{
 497    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 498                                         memory_listener);
 499    hwaddr start_addr = section->offset_within_address_space;
 500    ram_addr_t size = int128_get64(section->size);
 501    bool log_dirty =
 502        memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
 503    int s = offsetof(struct vhost_memory, regions) +
 504        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
 505    void *ram;
 506
 507    dev->mem = g_realloc(dev->mem, s);
 508
 509    if (log_dirty) {
 510        add = false;
 511    }
 512
 513    assert(size);
 514
 515    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
 516    ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
 517    if (add) {
 518        if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
 519            /* Region exists with same address. Nothing to do. */
 520            return;
 521        }
 522    } else {
 523        if (!vhost_dev_find_reg(dev, start_addr, size)) {
 524            /* Removing region that we don't access. Nothing to do. */
 525            return;
 526        }
 527    }
 528
 529    vhost_dev_unassign_memory(dev, start_addr, size);
 530    if (add) {
 531        /* Add given mapping, merging adjacent regions if any */
 532        vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
 533    } else {
 534        /* Remove old mapping for this memory, if any. */
 535        vhost_dev_unassign_memory(dev, start_addr, size);
 536    }
 537    dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
 538    dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
 539    dev->memory_changed = true;
 540    used_memslots = dev->mem->nregions;
 541}
 542
 543static bool vhost_section(MemoryRegionSection *section)
 544{
 545    return memory_region_is_ram(section->mr);
 546}
 547
 548static void vhost_begin(MemoryListener *listener)
 549{
 550    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 551                                         memory_listener);
 552    dev->mem_changed_end_addr = 0;
 553    dev->mem_changed_start_addr = -1;
 554}
 555
 556static void vhost_commit(MemoryListener *listener)
 557{
 558    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 559                                         memory_listener);
 560    hwaddr start_addr = 0;
 561    ram_addr_t size = 0;
 562    uint64_t log_size;
 563    int r;
 564
 565    if (!dev->memory_changed) {
 566        return;
 567    }
 568    if (!dev->started) {
 569        return;
 570    }
 571    if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
 572        return;
 573    }
 574
 575    if (dev->started) {
 576        start_addr = dev->mem_changed_start_addr;
 577        size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
 578
 579        r = vhost_verify_ring_mappings(dev, start_addr, size);
 580        assert(r >= 0);
 581    }
 582
 583    if (!dev->log_enabled) {
 584        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 585        if (r < 0) {
 586            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 587        }
 588        dev->memory_changed = false;
 589        return;
 590    }
 591    log_size = vhost_get_log_size(dev);
 592    /* We allocate an extra 4K bytes to log,
 593     * to reduce the * number of reallocations. */
 594#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
 595    /* To log more, must increase log size before table update. */
 596    if (dev->log_size < log_size) {
 597        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
 598    }
 599    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 600    if (r < 0) {
 601        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 602    }
 603    /* To log less, can only decrease log size after table update. */
 604    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
 605        vhost_dev_log_resize(dev, log_size);
 606    }
 607    dev->memory_changed = false;
 608}
 609
 610static void vhost_region_add(MemoryListener *listener,
 611                             MemoryRegionSection *section)
 612{
 613    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 614                                         memory_listener);
 615
 616    if (!vhost_section(section)) {
 617        return;
 618    }
 619
 620    ++dev->n_mem_sections;
 621    dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
 622                                dev->n_mem_sections);
 623    dev->mem_sections[dev->n_mem_sections - 1] = *section;
 624    memory_region_ref(section->mr);
 625    vhost_set_memory(listener, section, true);
 626}
 627
 628static void vhost_region_del(MemoryListener *listener,
 629                             MemoryRegionSection *section)
 630{
 631    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 632                                         memory_listener);
 633    int i;
 634
 635    if (!vhost_section(section)) {
 636        return;
 637    }
 638
 639    vhost_set_memory(listener, section, false);
 640    memory_region_unref(section->mr);
 641    for (i = 0; i < dev->n_mem_sections; ++i) {
 642        if (dev->mem_sections[i].offset_within_address_space
 643            == section->offset_within_address_space) {
 644            --dev->n_mem_sections;
 645            memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
 646                    (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
 647            break;
 648        }
 649    }
 650}
 651
 652static void vhost_region_nop(MemoryListener *listener,
 653                             MemoryRegionSection *section)
 654{
 655}
 656
 657static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
 658                                    struct vhost_virtqueue *vq,
 659                                    unsigned idx, bool enable_log)
 660{
 661    struct vhost_vring_addr addr = {
 662        .index = idx,
 663        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
 664        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
 665        .used_user_addr = (uint64_t)(unsigned long)vq->used,
 666        .log_guest_addr = vq->used_phys,
 667        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
 668    };
 669    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
 670    if (r < 0) {
 671        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
 672        return -errno;
 673    }
 674    return 0;
 675}
 676
 677static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
 678{
 679    uint64_t features = dev->acked_features;
 680    int r;
 681    if (enable_log) {
 682        features |= 0x1ULL << VHOST_F_LOG_ALL;
 683    }
 684    r = dev->vhost_ops->vhost_set_features(dev, features);
 685    if (r < 0) {
 686        VHOST_OPS_DEBUG("vhost_set_features failed");
 687    }
 688    return r < 0 ? -errno : 0;
 689}
 690
 691static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
 692{
 693    int r, i, idx;
 694    r = vhost_dev_set_features(dev, enable_log);
 695    if (r < 0) {
 696        goto err_features;
 697    }
 698    for (i = 0; i < dev->nvqs; ++i) {
 699        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 700        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 701                                     enable_log);
 702        if (r < 0) {
 703            goto err_vq;
 704        }
 705    }
 706    return 0;
 707err_vq:
 708    for (; i >= 0; --i) {
 709        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 710        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 711                                 dev->log_enabled);
 712    }
 713    vhost_dev_set_features(dev, dev->log_enabled);
 714err_features:
 715    return r;
 716}
 717
 718static int vhost_migration_log(MemoryListener *listener, int enable)
 719{
 720    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 721                                         memory_listener);
 722    int r;
 723    if (!!enable == dev->log_enabled) {
 724        return 0;
 725    }
 726    if (!dev->started) {
 727        dev->log_enabled = enable;
 728        return 0;
 729    }
 730    if (!enable) {
 731        r = vhost_dev_set_log(dev, false);
 732        if (r < 0) {
 733            return r;
 734        }
 735        vhost_log_put(dev, false);
 736    } else {
 737        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
 738        r = vhost_dev_set_log(dev, true);
 739        if (r < 0) {
 740            return r;
 741        }
 742    }
 743    dev->log_enabled = enable;
 744    return 0;
 745}
 746
 747static void vhost_log_global_start(MemoryListener *listener)
 748{
 749    int r;
 750
 751    r = vhost_migration_log(listener, true);
 752    if (r < 0) {
 753        abort();
 754    }
 755}
 756
 757static void vhost_log_global_stop(MemoryListener *listener)
 758{
 759    int r;
 760
 761    r = vhost_migration_log(listener, false);
 762    if (r < 0) {
 763        abort();
 764    }
 765}
 766
 767static void vhost_log_start(MemoryListener *listener,
 768                            MemoryRegionSection *section,
 769                            int old, int new)
 770{
 771    /* FIXME: implement */
 772}
 773
 774static void vhost_log_stop(MemoryListener *listener,
 775                           MemoryRegionSection *section,
 776                           int old, int new)
 777{
 778    /* FIXME: implement */
 779}
 780
 781/* The vhost driver natively knows how to handle the vrings of non
 782 * cross-endian legacy devices and modern devices. Only legacy devices
 783 * exposed to a bi-endian guest may require the vhost driver to use a
 784 * specific endianness.
 785 */
 786static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
 787{
 788    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
 789        return false;
 790    }
 791#ifdef HOST_WORDS_BIGENDIAN
 792    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
 793#else
 794    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
 795#endif
 796}
 797
 798static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
 799                                                   bool is_big_endian,
 800                                                   int vhost_vq_index)
 801{
 802    struct vhost_vring_state s = {
 803        .index = vhost_vq_index,
 804        .num = is_big_endian
 805    };
 806
 807    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
 808        return 0;
 809    }
 810
 811    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
 812    if (errno == ENOTTY) {
 813        error_report("vhost does not support cross-endian");
 814        return -ENOSYS;
 815    }
 816
 817    return -errno;
 818}
 819
 820static int vhost_virtqueue_start(struct vhost_dev *dev,
 821                                struct VirtIODevice *vdev,
 822                                struct vhost_virtqueue *vq,
 823                                unsigned idx)
 824{
 825    hwaddr s, l, a;
 826    int r;
 827    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
 828    struct vhost_vring_file file = {
 829        .index = vhost_vq_index
 830    };
 831    struct vhost_vring_state state = {
 832        .index = vhost_vq_index
 833    };
 834    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
 835
 836
 837    vq->num = state.num = virtio_queue_get_num(vdev, idx);
 838    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
 839    if (r) {
 840        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
 841        return -errno;
 842    }
 843
 844    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
 845    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
 846    if (r) {
 847        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
 848        return -errno;
 849    }
 850
 851    if (vhost_needs_vring_endian(vdev)) {
 852        r = vhost_virtqueue_set_vring_endian_legacy(dev,
 853                                                    virtio_is_big_endian(vdev),
 854                                                    vhost_vq_index);
 855        if (r) {
 856            return -errno;
 857        }
 858    }
 859
 860    s = l = virtio_queue_get_desc_size(vdev, idx);
 861    a = virtio_queue_get_desc_addr(vdev, idx);
 862    vq->desc = cpu_physical_memory_map(a, &l, 0);
 863    if (!vq->desc || l != s) {
 864        r = -ENOMEM;
 865        goto fail_alloc_desc;
 866    }
 867    s = l = virtio_queue_get_avail_size(vdev, idx);
 868    a = virtio_queue_get_avail_addr(vdev, idx);
 869    vq->avail = cpu_physical_memory_map(a, &l, 0);
 870    if (!vq->avail || l != s) {
 871        r = -ENOMEM;
 872        goto fail_alloc_avail;
 873    }
 874    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
 875    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
 876    vq->used = cpu_physical_memory_map(a, &l, 1);
 877    if (!vq->used || l != s) {
 878        r = -ENOMEM;
 879        goto fail_alloc_used;
 880    }
 881
 882    vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
 883    vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
 884    vq->ring = cpu_physical_memory_map(a, &l, 1);
 885    if (!vq->ring || l != s) {
 886        r = -ENOMEM;
 887        goto fail_alloc_ring;
 888    }
 889
 890    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
 891    if (r < 0) {
 892        r = -errno;
 893        goto fail_alloc;
 894    }
 895
 896    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
 897    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
 898    if (r) {
 899        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
 900        r = -errno;
 901        goto fail_kick;
 902    }
 903
 904    /* Clear and discard previous events if any. */
 905    event_notifier_test_and_clear(&vq->masked_notifier);
 906
 907    /* Init vring in unmasked state, unless guest_notifier_mask
 908     * will do it later.
 909     */
 910    if (!vdev->use_guest_notifier_mask) {
 911        /* TODO: check and handle errors. */
 912        vhost_virtqueue_mask(dev, vdev, idx, false);
 913    }
 914
 915    return 0;
 916
 917fail_kick:
 918fail_alloc:
 919    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
 920                              0, 0);
 921fail_alloc_ring:
 922    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
 923                              0, 0);
 924fail_alloc_used:
 925    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
 926                              0, 0);
 927fail_alloc_avail:
 928    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
 929                              0, 0);
 930fail_alloc_desc:
 931    return r;
 932}
 933
 934static void vhost_virtqueue_stop(struct vhost_dev *dev,
 935                                    struct VirtIODevice *vdev,
 936                                    struct vhost_virtqueue *vq,
 937                                    unsigned idx)
 938{
 939    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
 940    struct vhost_vring_state state = {
 941        .index = vhost_vq_index,
 942    };
 943    int r;
 944
 945    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
 946    if (r < 0) {
 947        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
 948    } else {
 949        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
 950    }
 951    virtio_queue_invalidate_signalled_used(vdev, idx);
 952
 953    /* In the cross-endian case, we need to reset the vring endianness to
 954     * native as legacy devices expect so by default.
 955     */
 956    if (vhost_needs_vring_endian(vdev)) {
 957        vhost_virtqueue_set_vring_endian_legacy(dev,
 958                                                !virtio_is_big_endian(vdev),
 959                                                vhost_vq_index);
 960    }
 961
 962    cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
 963                              0, virtio_queue_get_ring_size(vdev, idx));
 964    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
 965                              1, virtio_queue_get_used_size(vdev, idx));
 966    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
 967                              0, virtio_queue_get_avail_size(vdev, idx));
 968    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
 969                              0, virtio_queue_get_desc_size(vdev, idx));
 970}
 971
 972static void vhost_eventfd_add(MemoryListener *listener,
 973                              MemoryRegionSection *section,
 974                              bool match_data, uint64_t data, EventNotifier *e)
 975{
 976}
 977
 978static void vhost_eventfd_del(MemoryListener *listener,
 979                              MemoryRegionSection *section,
 980                              bool match_data, uint64_t data, EventNotifier *e)
 981{
 982}
 983
 984static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
 985                                                int n, uint32_t timeout)
 986{
 987    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
 988    struct vhost_vring_state state = {
 989        .index = vhost_vq_index,
 990        .num = timeout,
 991    };
 992    int r;
 993
 994    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
 995        return -EINVAL;
 996    }
 997
 998    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
 999    if (r) {
1000        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1001        return r;
1002    }
1003
1004    return 0;
1005}
1006
1007static int vhost_virtqueue_init(struct vhost_dev *dev,
1008                                struct vhost_virtqueue *vq, int n)
1009{
1010    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1011    struct vhost_vring_file file = {
1012        .index = vhost_vq_index,
1013    };
1014    int r = event_notifier_init(&vq->masked_notifier, 0);
1015    if (r < 0) {
1016        return r;
1017    }
1018
1019    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1020    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1021    if (r) {
1022        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1023        r = -errno;
1024        goto fail_call;
1025    }
1026    return 0;
1027fail_call:
1028    event_notifier_cleanup(&vq->masked_notifier);
1029    return r;
1030}
1031
1032static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1033{
1034    event_notifier_cleanup(&vq->masked_notifier);
1035}
1036
1037int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1038                   VhostBackendType backend_type, uint32_t busyloop_timeout)
1039{
1040    uint64_t features;
1041    int i, r, n_initialized_vqs = 0;
1042
1043    hdev->migration_blocker = NULL;
1044
1045    r = vhost_set_backend_type(hdev, backend_type);
1046    assert(r >= 0);
1047
1048    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1049    if (r < 0) {
1050        goto fail;
1051    }
1052
1053    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1054        error_report("vhost backend memory slots limit is less"
1055                " than current number of present memory slots");
1056        r = -1;
1057        goto fail;
1058    }
1059
1060    r = hdev->vhost_ops->vhost_set_owner(hdev);
1061    if (r < 0) {
1062        VHOST_OPS_DEBUG("vhost_set_owner failed");
1063        goto fail;
1064    }
1065
1066    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1067    if (r < 0) {
1068        VHOST_OPS_DEBUG("vhost_get_features failed");
1069        goto fail;
1070    }
1071
1072    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1073        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1074        if (r < 0) {
1075            goto fail;
1076        }
1077    }
1078
1079    if (busyloop_timeout) {
1080        for (i = 0; i < hdev->nvqs; ++i) {
1081            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1082                                                     busyloop_timeout);
1083            if (r < 0) {
1084                goto fail_busyloop;
1085            }
1086        }
1087    }
1088
1089    hdev->features = features;
1090
1091    hdev->memory_listener = (MemoryListener) {
1092        .begin = vhost_begin,
1093        .commit = vhost_commit,
1094        .region_add = vhost_region_add,
1095        .region_del = vhost_region_del,
1096        .region_nop = vhost_region_nop,
1097        .log_start = vhost_log_start,
1098        .log_stop = vhost_log_stop,
1099        .log_sync = vhost_log_sync,
1100        .log_global_start = vhost_log_global_start,
1101        .log_global_stop = vhost_log_global_stop,
1102        .eventfd_add = vhost_eventfd_add,
1103        .eventfd_del = vhost_eventfd_del,
1104        .priority = 10
1105    };
1106
1107    if (hdev->migration_blocker == NULL) {
1108        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1109            error_setg(&hdev->migration_blocker,
1110                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1111        } else if (!qemu_memfd_check()) {
1112            error_setg(&hdev->migration_blocker,
1113                       "Migration disabled: failed to allocate shared memory");
1114        }
1115    }
1116
1117    if (hdev->migration_blocker != NULL) {
1118        migrate_add_blocker(hdev->migration_blocker);
1119    }
1120
1121    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1122    hdev->n_mem_sections = 0;
1123    hdev->mem_sections = NULL;
1124    hdev->log = NULL;
1125    hdev->log_size = 0;
1126    hdev->log_enabled = false;
1127    hdev->started = false;
1128    hdev->memory_changed = false;
1129    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1130    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1131    return 0;
1132
1133fail_busyloop:
1134    while (--i >= 0) {
1135        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1136    }
1137fail:
1138    hdev->nvqs = n_initialized_vqs;
1139    vhost_dev_cleanup(hdev);
1140    return r;
1141}
1142
1143void vhost_dev_cleanup(struct vhost_dev *hdev)
1144{
1145    int i;
1146
1147    for (i = 0; i < hdev->nvqs; ++i) {
1148        vhost_virtqueue_cleanup(hdev->vqs + i);
1149    }
1150    if (hdev->mem) {
1151        /* those are only safe after successful init */
1152        memory_listener_unregister(&hdev->memory_listener);
1153        QLIST_REMOVE(hdev, entry);
1154    }
1155    if (hdev->migration_blocker) {
1156        migrate_del_blocker(hdev->migration_blocker);
1157        error_free(hdev->migration_blocker);
1158    }
1159    g_free(hdev->mem);
1160    g_free(hdev->mem_sections);
1161    if (hdev->vhost_ops) {
1162        hdev->vhost_ops->vhost_backend_cleanup(hdev);
1163    }
1164    assert(!hdev->log);
1165
1166    memset(hdev, 0, sizeof(struct vhost_dev));
1167}
1168
1169/* Stop processing guest IO notifications in qemu.
1170 * Start processing them in vhost in kernel.
1171 */
1172int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1173{
1174    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1175    VirtioBusState *vbus = VIRTIO_BUS(qbus);
1176    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1177    int i, r, e;
1178
1179    if (!k->ioeventfd_started) {
1180        error_report("binding does not support host notifiers");
1181        r = -ENOSYS;
1182        goto fail;
1183    }
1184
1185    for (i = 0; i < hdev->nvqs; ++i) {
1186        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1187                                         true);
1188        if (r < 0) {
1189            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1190            goto fail_vq;
1191        }
1192    }
1193
1194    return 0;
1195fail_vq:
1196    while (--i >= 0) {
1197        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1198                                         false);
1199        if (e < 0) {
1200            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1201        }
1202        assert (e >= 0);
1203    }
1204fail:
1205    return r;
1206}
1207
1208/* Stop processing guest IO notifications in vhost.
1209 * Start processing them in qemu.
1210 * This might actually run the qemu handlers right away,
1211 * so virtio in qemu must be completely setup when this is called.
1212 */
1213void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1214{
1215    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1216    int i, r;
1217
1218    for (i = 0; i < hdev->nvqs; ++i) {
1219        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1220                                         false);
1221        if (r < 0) {
1222            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1223        }
1224        assert (r >= 0);
1225    }
1226}
1227
1228/* Test and clear event pending status.
1229 * Should be called after unmask to avoid losing events.
1230 */
1231bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1232{
1233    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1234    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1235    return event_notifier_test_and_clear(&vq->masked_notifier);
1236}
1237
1238/* Mask/unmask events from this vq. */
1239void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1240                         bool mask)
1241{
1242    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1243    int r, index = n - hdev->vq_index;
1244    struct vhost_vring_file file;
1245
1246    /* should only be called after backend is connected */
1247    assert(hdev->vhost_ops);
1248
1249    if (mask) {
1250        assert(vdev->use_guest_notifier_mask);
1251        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1252    } else {
1253        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1254    }
1255
1256    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1257    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1258    if (r < 0) {
1259        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1260    }
1261}
1262
1263uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1264                            uint64_t features)
1265{
1266    const int *bit = feature_bits;
1267    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1268        uint64_t bit_mask = (1ULL << *bit);
1269        if (!(hdev->features & bit_mask)) {
1270            features &= ~bit_mask;
1271        }
1272        bit++;
1273    }
1274    return features;
1275}
1276
1277void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1278                        uint64_t features)
1279{
1280    const int *bit = feature_bits;
1281    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1282        uint64_t bit_mask = (1ULL << *bit);
1283        if (features & bit_mask) {
1284            hdev->acked_features |= bit_mask;
1285        }
1286        bit++;
1287    }
1288}
1289
1290/* Host notifiers must be enabled at this point. */
1291int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1292{
1293    int i, r;
1294
1295    /* should only be called after backend is connected */
1296    assert(hdev->vhost_ops);
1297
1298    hdev->started = true;
1299
1300    r = vhost_dev_set_features(hdev, hdev->log_enabled);
1301    if (r < 0) {
1302        goto fail_features;
1303    }
1304    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1305    if (r < 0) {
1306        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1307        r = -errno;
1308        goto fail_mem;
1309    }
1310    for (i = 0; i < hdev->nvqs; ++i) {
1311        r = vhost_virtqueue_start(hdev,
1312                                  vdev,
1313                                  hdev->vqs + i,
1314                                  hdev->vq_index + i);
1315        if (r < 0) {
1316            goto fail_vq;
1317        }
1318    }
1319
1320    if (hdev->log_enabled) {
1321        uint64_t log_base;
1322
1323        hdev->log_size = vhost_get_log_size(hdev);
1324        hdev->log = vhost_log_get(hdev->log_size,
1325                                  vhost_dev_log_is_shared(hdev));
1326        log_base = (uintptr_t)hdev->log->log;
1327        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1328                                                hdev->log_size ? log_base : 0,
1329                                                hdev->log);
1330        if (r < 0) {
1331            VHOST_OPS_DEBUG("vhost_set_log_base failed");
1332            r = -errno;
1333            goto fail_log;
1334        }
1335    }
1336
1337    return 0;
1338fail_log:
1339    vhost_log_put(hdev, false);
1340fail_vq:
1341    while (--i >= 0) {
1342        vhost_virtqueue_stop(hdev,
1343                             vdev,
1344                             hdev->vqs + i,
1345                             hdev->vq_index + i);
1346    }
1347    i = hdev->nvqs;
1348fail_mem:
1349fail_features:
1350
1351    hdev->started = false;
1352    return r;
1353}
1354
1355/* Host notifiers must be enabled at this point. */
1356void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1357{
1358    int i;
1359
1360    /* should only be called after backend is connected */
1361    assert(hdev->vhost_ops);
1362
1363    for (i = 0; i < hdev->nvqs; ++i) {
1364        vhost_virtqueue_stop(hdev,
1365                             vdev,
1366                             hdev->vqs + i,
1367                             hdev->vq_index + i);
1368    }
1369
1370    vhost_log_put(hdev, true);
1371    hdev->started = false;
1372}
1373
1374int vhost_net_set_backend(struct vhost_dev *hdev,
1375                          struct vhost_vring_file *file)
1376{
1377    if (hdev->vhost_ops->vhost_net_set_backend) {
1378        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1379    }
1380
1381    return -1;
1382}
1383