qemu/hw/virtio/vhost.c
<<
>>
Prefs
   1/*
   2 * vhost support
   3 *
   4 * Copyright Red Hat, Inc. 2010
   5 *
   6 * Authors:
   7 *  Michael S. Tsirkin <mst@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "hw/virtio/vhost.h"
  19#include "hw/hw.h"
  20#include "qemu/atomic.h"
  21#include "qemu/range.h"
  22#include "qemu/error-report.h"
  23#include "qemu/memfd.h"
  24#include <linux/vhost.h>
  25#include "exec/address-spaces.h"
  26#include "hw/virtio/virtio-bus.h"
  27#include "hw/virtio/virtio-access.h"
  28#include "migration/migration.h"
  29
  30/* enabled until disconnected backend stabilizes */
  31#define _VHOST_DEBUG 1
  32
  33#ifdef _VHOST_DEBUG
  34#define VHOST_OPS_DEBUG(fmt, ...) \
  35    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
  36                      strerror(errno), errno); } while (0)
  37#else
  38#define VHOST_OPS_DEBUG(fmt, ...) \
  39    do { } while (0)
  40#endif
  41
  42static struct vhost_log *vhost_log;
  43static struct vhost_log *vhost_log_shm;
  44
  45static unsigned int used_memslots;
  46static QLIST_HEAD(, vhost_dev) vhost_devices =
  47    QLIST_HEAD_INITIALIZER(vhost_devices);
  48
  49bool vhost_has_free_slot(void)
  50{
  51    unsigned int slots_limit = ~0U;
  52    struct vhost_dev *hdev;
  53
  54    QLIST_FOREACH(hdev, &vhost_devices, entry) {
  55        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
  56        slots_limit = MIN(slots_limit, r);
  57    }
  58    return slots_limit > used_memslots;
  59}
  60
  61static void vhost_dev_sync_region(struct vhost_dev *dev,
  62                                  MemoryRegionSection *section,
  63                                  uint64_t mfirst, uint64_t mlast,
  64                                  uint64_t rfirst, uint64_t rlast)
  65{
  66    vhost_log_chunk_t *log = dev->log->log;
  67
  68    uint64_t start = MAX(mfirst, rfirst);
  69    uint64_t end = MIN(mlast, rlast);
  70    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
  71    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
  72    uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK;
  73
  74    if (end < start) {
  75        return;
  76    }
  77    assert(end / VHOST_LOG_CHUNK < dev->log_size);
  78    assert(start / VHOST_LOG_CHUNK < dev->log_size);
  79
  80    for (;from < to; ++from) {
  81        vhost_log_chunk_t log;
  82        /* We first check with non-atomic: much cheaper,
  83         * and we expect non-dirty to be the common case. */
  84        if (!*from) {
  85            addr += VHOST_LOG_CHUNK;
  86            continue;
  87        }
  88        /* Data must be read atomically. We don't really need barrier semantics
  89         * but it's easier to use atomic_* than roll our own. */
  90        log = atomic_xchg(from, 0);
  91        while (log) {
  92            int bit = ctzl(log);
  93            hwaddr page_addr;
  94            hwaddr section_offset;
  95            hwaddr mr_offset;
  96            page_addr = addr + bit * VHOST_LOG_PAGE;
  97            section_offset = page_addr - section->offset_within_address_space;
  98            mr_offset = section_offset + section->offset_within_region;
  99            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
 100            log &= ~(0x1ull << bit);
 101        }
 102        addr += VHOST_LOG_CHUNK;
 103    }
 104}
 105
 106static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
 107                                   MemoryRegionSection *section,
 108                                   hwaddr first,
 109                                   hwaddr last)
 110{
 111    int i;
 112    hwaddr start_addr;
 113    hwaddr end_addr;
 114
 115    if (!dev->log_enabled || !dev->started) {
 116        return 0;
 117    }
 118    start_addr = section->offset_within_address_space;
 119    end_addr = range_get_last(start_addr, int128_get64(section->size));
 120    start_addr = MAX(first, start_addr);
 121    end_addr = MIN(last, end_addr);
 122
 123    for (i = 0; i < dev->mem->nregions; ++i) {
 124        struct vhost_memory_region *reg = dev->mem->regions + i;
 125        vhost_dev_sync_region(dev, section, start_addr, end_addr,
 126                              reg->guest_phys_addr,
 127                              range_get_last(reg->guest_phys_addr,
 128                                             reg->memory_size));
 129    }
 130    for (i = 0; i < dev->nvqs; ++i) {
 131        struct vhost_virtqueue *vq = dev->vqs + i;
 132        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
 133                              range_get_last(vq->used_phys, vq->used_size));
 134    }
 135    return 0;
 136}
 137
 138static void vhost_log_sync(MemoryListener *listener,
 139                          MemoryRegionSection *section)
 140{
 141    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 142                                         memory_listener);
 143    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
 144}
 145
 146static void vhost_log_sync_range(struct vhost_dev *dev,
 147                                 hwaddr first, hwaddr last)
 148{
 149    int i;
 150    /* FIXME: this is N^2 in number of sections */
 151    for (i = 0; i < dev->n_mem_sections; ++i) {
 152        MemoryRegionSection *section = &dev->mem_sections[i];
 153        vhost_sync_dirty_bitmap(dev, section, first, last);
 154    }
 155}
 156
 157/* Assign/unassign. Keep an unsorted array of non-overlapping
 158 * memory regions in dev->mem. */
 159static void vhost_dev_unassign_memory(struct vhost_dev *dev,
 160                                      uint64_t start_addr,
 161                                      uint64_t size)
 162{
 163    int from, to, n = dev->mem->nregions;
 164    /* Track overlapping/split regions for sanity checking. */
 165    int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0;
 166
 167    for (from = 0, to = 0; from < n; ++from, ++to) {
 168        struct vhost_memory_region *reg = dev->mem->regions + to;
 169        uint64_t reglast;
 170        uint64_t memlast;
 171        uint64_t change;
 172
 173        /* clone old region */
 174        if (to != from) {
 175            memcpy(reg, dev->mem->regions + from, sizeof *reg);
 176        }
 177
 178        /* No overlap is simple */
 179        if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size,
 180                            start_addr, size)) {
 181            continue;
 182        }
 183
 184        /* Split only happens if supplied region
 185         * is in the middle of an existing one. Thus it can not
 186         * overlap with any other existing region. */
 187        assert(!split);
 188
 189        reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
 190        memlast = range_get_last(start_addr, size);
 191
 192        /* Remove whole region */
 193        if (start_addr <= reg->guest_phys_addr && memlast >= reglast) {
 194            --dev->mem->nregions;
 195            --to;
 196            ++overlap_middle;
 197            continue;
 198        }
 199
 200        /* Shrink region */
 201        if (memlast >= reglast) {
 202            reg->memory_size = start_addr - reg->guest_phys_addr;
 203            assert(reg->memory_size);
 204            assert(!overlap_end);
 205            ++overlap_end;
 206            continue;
 207        }
 208
 209        /* Shift region */
 210        if (start_addr <= reg->guest_phys_addr) {
 211            change = memlast + 1 - reg->guest_phys_addr;
 212            reg->memory_size -= change;
 213            reg->guest_phys_addr += change;
 214            reg->userspace_addr += change;
 215            assert(reg->memory_size);
 216            assert(!overlap_start);
 217            ++overlap_start;
 218            continue;
 219        }
 220
 221        /* This only happens if supplied region
 222         * is in the middle of an existing one. Thus it can not
 223         * overlap with any other existing region. */
 224        assert(!overlap_start);
 225        assert(!overlap_end);
 226        assert(!overlap_middle);
 227        /* Split region: shrink first part, shift second part. */
 228        memcpy(dev->mem->regions + n, reg, sizeof *reg);
 229        reg->memory_size = start_addr - reg->guest_phys_addr;
 230        assert(reg->memory_size);
 231        change = memlast + 1 - reg->guest_phys_addr;
 232        reg = dev->mem->regions + n;
 233        reg->memory_size -= change;
 234        assert(reg->memory_size);
 235        reg->guest_phys_addr += change;
 236        reg->userspace_addr += change;
 237        /* Never add more than 1 region */
 238        assert(dev->mem->nregions == n);
 239        ++dev->mem->nregions;
 240        ++split;
 241    }
 242}
 243
 244/* Called after unassign, so no regions overlap the given range. */
 245static void vhost_dev_assign_memory(struct vhost_dev *dev,
 246                                    uint64_t start_addr,
 247                                    uint64_t size,
 248                                    uint64_t uaddr)
 249{
 250    int from, to;
 251    struct vhost_memory_region *merged = NULL;
 252    for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) {
 253        struct vhost_memory_region *reg = dev->mem->regions + to;
 254        uint64_t prlast, urlast;
 255        uint64_t pmlast, umlast;
 256        uint64_t s, e, u;
 257
 258        /* clone old region */
 259        if (to != from) {
 260            memcpy(reg, dev->mem->regions + from, sizeof *reg);
 261        }
 262        prlast = range_get_last(reg->guest_phys_addr, reg->memory_size);
 263        pmlast = range_get_last(start_addr, size);
 264        urlast = range_get_last(reg->userspace_addr, reg->memory_size);
 265        umlast = range_get_last(uaddr, size);
 266
 267        /* check for overlapping regions: should never happen. */
 268        assert(prlast < start_addr || pmlast < reg->guest_phys_addr);
 269        /* Not an adjacent or overlapping region - do not merge. */
 270        if ((prlast + 1 != start_addr || urlast + 1 != uaddr) &&
 271            (pmlast + 1 != reg->guest_phys_addr ||
 272             umlast + 1 != reg->userspace_addr)) {
 273            continue;
 274        }
 275
 276        if (dev->vhost_ops->vhost_backend_can_merge &&
 277            !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size,
 278                                                     reg->userspace_addr,
 279                                                     reg->memory_size)) {
 280            continue;
 281        }
 282
 283        if (merged) {
 284            --to;
 285            assert(to >= 0);
 286        } else {
 287            merged = reg;
 288        }
 289        u = MIN(uaddr, reg->userspace_addr);
 290        s = MIN(start_addr, reg->guest_phys_addr);
 291        e = MAX(pmlast, prlast);
 292        uaddr = merged->userspace_addr = u;
 293        start_addr = merged->guest_phys_addr = s;
 294        size = merged->memory_size = e - s + 1;
 295        assert(merged->memory_size);
 296    }
 297
 298    if (!merged) {
 299        struct vhost_memory_region *reg = dev->mem->regions + to;
 300        memset(reg, 0, sizeof *reg);
 301        reg->memory_size = size;
 302        assert(reg->memory_size);
 303        reg->guest_phys_addr = start_addr;
 304        reg->userspace_addr = uaddr;
 305        ++to;
 306    }
 307    assert(to <= dev->mem->nregions + 1);
 308    dev->mem->nregions = to;
 309}
 310
 311static uint64_t vhost_get_log_size(struct vhost_dev *dev)
 312{
 313    uint64_t log_size = 0;
 314    int i;
 315    for (i = 0; i < dev->mem->nregions; ++i) {
 316        struct vhost_memory_region *reg = dev->mem->regions + i;
 317        uint64_t last = range_get_last(reg->guest_phys_addr,
 318                                       reg->memory_size);
 319        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 320    }
 321    for (i = 0; i < dev->nvqs; ++i) {
 322        struct vhost_virtqueue *vq = dev->vqs + i;
 323        uint64_t last = vq->used_phys + vq->used_size - 1;
 324        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 325    }
 326    return log_size;
 327}
 328
 329static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
 330{
 331    struct vhost_log *log;
 332    uint64_t logsize = size * sizeof(*(log->log));
 333    int fd = -1;
 334
 335    log = g_new0(struct vhost_log, 1);
 336    if (share) {
 337        log->log = qemu_memfd_alloc("vhost-log", logsize,
 338                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
 339                                    &fd);
 340        memset(log->log, 0, logsize);
 341    } else {
 342        log->log = g_malloc0(logsize);
 343    }
 344
 345    log->size = size;
 346    log->refcnt = 1;
 347    log->fd = fd;
 348
 349    return log;
 350}
 351
 352static struct vhost_log *vhost_log_get(uint64_t size, bool share)
 353{
 354    struct vhost_log *log = share ? vhost_log_shm : vhost_log;
 355
 356    if (!log || log->size != size) {
 357        log = vhost_log_alloc(size, share);
 358        if (share) {
 359            vhost_log_shm = log;
 360        } else {
 361            vhost_log = log;
 362        }
 363    } else {
 364        ++log->refcnt;
 365    }
 366
 367    return log;
 368}
 369
 370static void vhost_log_put(struct vhost_dev *dev, bool sync)
 371{
 372    struct vhost_log *log = dev->log;
 373
 374    if (!log) {
 375        return;
 376    }
 377    dev->log = NULL;
 378    dev->log_size = 0;
 379
 380    --log->refcnt;
 381    if (log->refcnt == 0) {
 382        /* Sync only the range covered by the old log */
 383        if (dev->log_size && sync) {
 384            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
 385        }
 386
 387        if (vhost_log == log) {
 388            g_free(log->log);
 389            vhost_log = NULL;
 390        } else if (vhost_log_shm == log) {
 391            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
 392                            log->fd);
 393            vhost_log_shm = NULL;
 394        }
 395
 396        g_free(log);
 397    }
 398}
 399
 400static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
 401{
 402    return dev->vhost_ops->vhost_requires_shm_log &&
 403           dev->vhost_ops->vhost_requires_shm_log(dev);
 404}
 405
 406static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
 407{
 408    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
 409    uint64_t log_base = (uintptr_t)log->log;
 410    int r;
 411
 412    /* inform backend of log switching, this must be done before
 413       releasing the current log, to ensure no logging is lost */
 414    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
 415    if (r < 0) {
 416        VHOST_OPS_DEBUG("vhost_set_log_base failed");
 417    }
 418
 419    vhost_log_put(dev, true);
 420    dev->log = log;
 421    dev->log_size = size;
 422}
 423
 424
 425static int vhost_verify_ring_part_mapping(void *part,
 426                                          uint64_t part_addr,
 427                                          uint64_t part_size,
 428                                          uint64_t start_addr,
 429                                          uint64_t size)
 430{
 431    hwaddr l;
 432    void *p;
 433    int r = 0;
 434
 435    if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
 436        return 0;
 437    }
 438    l = part_size;
 439    p = cpu_physical_memory_map(part_addr, &l, 1);
 440    if (!p || l != part_size) {
 441        r = -ENOMEM;
 442    }
 443    if (p != part) {
 444        r = -EBUSY;
 445    }
 446    cpu_physical_memory_unmap(p, l, 0, 0);
 447    return r;
 448}
 449
 450static int vhost_verify_ring_mappings(struct vhost_dev *dev,
 451                                      uint64_t start_addr,
 452                                      uint64_t size)
 453{
 454    int i, j;
 455    int r = 0;
 456    const char *part_name[] = {
 457        "descriptor table",
 458        "available ring",
 459        "used ring"
 460    };
 461
 462    for (i = 0; i < dev->nvqs; ++i) {
 463        struct vhost_virtqueue *vq = dev->vqs + i;
 464
 465        j = 0;
 466        r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
 467                                           vq->desc_size, start_addr, size);
 468        if (!r) {
 469            break;
 470        }
 471
 472        j++;
 473        r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
 474                                           vq->avail_size, start_addr, size);
 475        if (!r) {
 476            break;
 477        }
 478
 479        j++;
 480        r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
 481                                           vq->used_size, start_addr, size);
 482        if (!r) {
 483            break;
 484        }
 485    }
 486
 487    if (r == -ENOMEM) {
 488        error_report("Unable to map %s for ring %d", part_name[j], i);
 489    } else if (r == -EBUSY) {
 490        error_report("%s relocated for ring %d", part_name[j], i);
 491    }
 492    return r;
 493}
 494
 495static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev,
 496                                                      uint64_t start_addr,
 497                                                      uint64_t size)
 498{
 499    int i, n = dev->mem->nregions;
 500    for (i = 0; i < n; ++i) {
 501        struct vhost_memory_region *reg = dev->mem->regions + i;
 502        if (ranges_overlap(reg->guest_phys_addr, reg->memory_size,
 503                           start_addr, size)) {
 504            return reg;
 505        }
 506    }
 507    return NULL;
 508}
 509
 510static bool vhost_dev_cmp_memory(struct vhost_dev *dev,
 511                                 uint64_t start_addr,
 512                                 uint64_t size,
 513                                 uint64_t uaddr)
 514{
 515    struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size);
 516    uint64_t reglast;
 517    uint64_t memlast;
 518
 519    if (!reg) {
 520        return true;
 521    }
 522
 523    reglast = range_get_last(reg->guest_phys_addr, reg->memory_size);
 524    memlast = range_get_last(start_addr, size);
 525
 526    /* Need to extend region? */
 527    if (start_addr < reg->guest_phys_addr || memlast > reglast) {
 528        return true;
 529    }
 530    /* userspace_addr changed? */
 531    return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr;
 532}
 533
 534static void vhost_set_memory(MemoryListener *listener,
 535                             MemoryRegionSection *section,
 536                             bool add)
 537{
 538    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 539                                         memory_listener);
 540    hwaddr start_addr = section->offset_within_address_space;
 541    ram_addr_t size = int128_get64(section->size);
 542    bool log_dirty =
 543        memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION);
 544    int s = offsetof(struct vhost_memory, regions) +
 545        (dev->mem->nregions + 1) * sizeof dev->mem->regions[0];
 546    void *ram;
 547
 548    dev->mem = g_realloc(dev->mem, s);
 549
 550    if (log_dirty) {
 551        add = false;
 552    }
 553
 554    assert(size);
 555
 556    /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */
 557    ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region;
 558    if (add) {
 559        if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) {
 560            /* Region exists with same address. Nothing to do. */
 561            return;
 562        }
 563    } else {
 564        if (!vhost_dev_find_reg(dev, start_addr, size)) {
 565            /* Removing region that we don't access. Nothing to do. */
 566            return;
 567        }
 568    }
 569
 570    vhost_dev_unassign_memory(dev, start_addr, size);
 571    if (add) {
 572        /* Add given mapping, merging adjacent regions if any */
 573        vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram);
 574    } else {
 575        /* Remove old mapping for this memory, if any. */
 576        vhost_dev_unassign_memory(dev, start_addr, size);
 577    }
 578    dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr);
 579    dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1);
 580    dev->memory_changed = true;
 581    used_memslots = dev->mem->nregions;
 582}
 583
 584static bool vhost_section(MemoryRegionSection *section)
 585{
 586    return memory_region_is_ram(section->mr);
 587}
 588
 589static void vhost_begin(MemoryListener *listener)
 590{
 591    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 592                                         memory_listener);
 593    dev->mem_changed_end_addr = 0;
 594    dev->mem_changed_start_addr = -1;
 595}
 596
 597static void vhost_commit(MemoryListener *listener)
 598{
 599    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 600                                         memory_listener);
 601    hwaddr start_addr = 0;
 602    ram_addr_t size = 0;
 603    uint64_t log_size;
 604    int r;
 605
 606    if (!dev->memory_changed) {
 607        return;
 608    }
 609    if (!dev->started) {
 610        return;
 611    }
 612    if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) {
 613        return;
 614    }
 615
 616    if (dev->started) {
 617        start_addr = dev->mem_changed_start_addr;
 618        size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1;
 619
 620        r = vhost_verify_ring_mappings(dev, start_addr, size);
 621        assert(r >= 0);
 622    }
 623
 624    if (!dev->log_enabled) {
 625        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 626        if (r < 0) {
 627            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 628        }
 629        dev->memory_changed = false;
 630        return;
 631    }
 632    log_size = vhost_get_log_size(dev);
 633    /* We allocate an extra 4K bytes to log,
 634     * to reduce the * number of reallocations. */
 635#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
 636    /* To log more, must increase log size before table update. */
 637    if (dev->log_size < log_size) {
 638        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
 639    }
 640    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 641    if (r < 0) {
 642        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 643    }
 644    /* To log less, can only decrease log size after table update. */
 645    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
 646        vhost_dev_log_resize(dev, log_size);
 647    }
 648    dev->memory_changed = false;
 649}
 650
 651static void vhost_region_add(MemoryListener *listener,
 652                             MemoryRegionSection *section)
 653{
 654    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 655                                         memory_listener);
 656
 657    if (!vhost_section(section)) {
 658        return;
 659    }
 660
 661    ++dev->n_mem_sections;
 662    dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections,
 663                                dev->n_mem_sections);
 664    dev->mem_sections[dev->n_mem_sections - 1] = *section;
 665    memory_region_ref(section->mr);
 666    vhost_set_memory(listener, section, true);
 667}
 668
 669static void vhost_region_del(MemoryListener *listener,
 670                             MemoryRegionSection *section)
 671{
 672    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 673                                         memory_listener);
 674    int i;
 675
 676    if (!vhost_section(section)) {
 677        return;
 678    }
 679
 680    vhost_set_memory(listener, section, false);
 681    memory_region_unref(section->mr);
 682    for (i = 0; i < dev->n_mem_sections; ++i) {
 683        if (dev->mem_sections[i].offset_within_address_space
 684            == section->offset_within_address_space) {
 685            --dev->n_mem_sections;
 686            memmove(&dev->mem_sections[i], &dev->mem_sections[i+1],
 687                    (dev->n_mem_sections - i) * sizeof(*dev->mem_sections));
 688            break;
 689        }
 690    }
 691}
 692
 693static void vhost_region_nop(MemoryListener *listener,
 694                             MemoryRegionSection *section)
 695{
 696}
 697
 698static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
 699                                    struct vhost_virtqueue *vq,
 700                                    unsigned idx, bool enable_log)
 701{
 702    struct vhost_vring_addr addr = {
 703        .index = idx,
 704        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
 705        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
 706        .used_user_addr = (uint64_t)(unsigned long)vq->used,
 707        .log_guest_addr = vq->used_phys,
 708        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
 709    };
 710    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
 711    if (r < 0) {
 712        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
 713        return -errno;
 714    }
 715    return 0;
 716}
 717
 718static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
 719{
 720    uint64_t features = dev->acked_features;
 721    int r;
 722    if (enable_log) {
 723        features |= 0x1ULL << VHOST_F_LOG_ALL;
 724    }
 725    r = dev->vhost_ops->vhost_set_features(dev, features);
 726    if (r < 0) {
 727        VHOST_OPS_DEBUG("vhost_set_features failed");
 728    }
 729    return r < 0 ? -errno : 0;
 730}
 731
 732static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
 733{
 734    int r, i, idx;
 735    r = vhost_dev_set_features(dev, enable_log);
 736    if (r < 0) {
 737        goto err_features;
 738    }
 739    for (i = 0; i < dev->nvqs; ++i) {
 740        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 741        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 742                                     enable_log);
 743        if (r < 0) {
 744            goto err_vq;
 745        }
 746    }
 747    return 0;
 748err_vq:
 749    for (; i >= 0; --i) {
 750        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 751        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 752                                 dev->log_enabled);
 753    }
 754    vhost_dev_set_features(dev, dev->log_enabled);
 755err_features:
 756    return r;
 757}
 758
 759static int vhost_migration_log(MemoryListener *listener, int enable)
 760{
 761    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 762                                         memory_listener);
 763    int r;
 764    if (!!enable == dev->log_enabled) {
 765        return 0;
 766    }
 767    if (!dev->started) {
 768        dev->log_enabled = enable;
 769        return 0;
 770    }
 771    if (!enable) {
 772        r = vhost_dev_set_log(dev, false);
 773        if (r < 0) {
 774            return r;
 775        }
 776        vhost_log_put(dev, false);
 777    } else {
 778        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
 779        r = vhost_dev_set_log(dev, true);
 780        if (r < 0) {
 781            return r;
 782        }
 783    }
 784    dev->log_enabled = enable;
 785    return 0;
 786}
 787
 788static void vhost_log_global_start(MemoryListener *listener)
 789{
 790    int r;
 791
 792    r = vhost_migration_log(listener, true);
 793    if (r < 0) {
 794        abort();
 795    }
 796}
 797
 798static void vhost_log_global_stop(MemoryListener *listener)
 799{
 800    int r;
 801
 802    r = vhost_migration_log(listener, false);
 803    if (r < 0) {
 804        abort();
 805    }
 806}
 807
 808static void vhost_log_start(MemoryListener *listener,
 809                            MemoryRegionSection *section,
 810                            int old, int new)
 811{
 812    /* FIXME: implement */
 813}
 814
 815static void vhost_log_stop(MemoryListener *listener,
 816                           MemoryRegionSection *section,
 817                           int old, int new)
 818{
 819    /* FIXME: implement */
 820}
 821
 822/* The vhost driver natively knows how to handle the vrings of non
 823 * cross-endian legacy devices and modern devices. Only legacy devices
 824 * exposed to a bi-endian guest may require the vhost driver to use a
 825 * specific endianness.
 826 */
 827static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
 828{
 829    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
 830        return false;
 831    }
 832#ifdef HOST_WORDS_BIGENDIAN
 833    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
 834#else
 835    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
 836#endif
 837}
 838
 839static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
 840                                                   bool is_big_endian,
 841                                                   int vhost_vq_index)
 842{
 843    struct vhost_vring_state s = {
 844        .index = vhost_vq_index,
 845        .num = is_big_endian
 846    };
 847
 848    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
 849        return 0;
 850    }
 851
 852    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
 853    if (errno == ENOTTY) {
 854        error_report("vhost does not support cross-endian");
 855        return -ENOSYS;
 856    }
 857
 858    return -errno;
 859}
 860
 861static int vhost_virtqueue_start(struct vhost_dev *dev,
 862                                struct VirtIODevice *vdev,
 863                                struct vhost_virtqueue *vq,
 864                                unsigned idx)
 865{
 866    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
 867    VirtioBusState *vbus = VIRTIO_BUS(qbus);
 868    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
 869    hwaddr s, l, a;
 870    int r;
 871    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
 872    struct vhost_vring_file file = {
 873        .index = vhost_vq_index
 874    };
 875    struct vhost_vring_state state = {
 876        .index = vhost_vq_index
 877    };
 878    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
 879
 880
 881    vq->num = state.num = virtio_queue_get_num(vdev, idx);
 882    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
 883    if (r) {
 884        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
 885        return -errno;
 886    }
 887
 888    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
 889    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
 890    if (r) {
 891        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
 892        return -errno;
 893    }
 894
 895    if (vhost_needs_vring_endian(vdev)) {
 896        r = vhost_virtqueue_set_vring_endian_legacy(dev,
 897                                                    virtio_is_big_endian(vdev),
 898                                                    vhost_vq_index);
 899        if (r) {
 900            return -errno;
 901        }
 902    }
 903
 904    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
 905    vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
 906    vq->desc = cpu_physical_memory_map(a, &l, 0);
 907    if (!vq->desc || l != s) {
 908        r = -ENOMEM;
 909        goto fail_alloc_desc;
 910    }
 911    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
 912    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
 913    vq->avail = cpu_physical_memory_map(a, &l, 0);
 914    if (!vq->avail || l != s) {
 915        r = -ENOMEM;
 916        goto fail_alloc_avail;
 917    }
 918    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
 919    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
 920    vq->used = cpu_physical_memory_map(a, &l, 1);
 921    if (!vq->used || l != s) {
 922        r = -ENOMEM;
 923        goto fail_alloc_used;
 924    }
 925
 926    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
 927    if (r < 0) {
 928        r = -errno;
 929        goto fail_alloc;
 930    }
 931
 932    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
 933    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
 934    if (r) {
 935        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
 936        r = -errno;
 937        goto fail_kick;
 938    }
 939
 940    /* Clear and discard previous events if any. */
 941    event_notifier_test_and_clear(&vq->masked_notifier);
 942
 943    /* Init vring in unmasked state, unless guest_notifier_mask
 944     * will do it later.
 945     */
 946    if (!vdev->use_guest_notifier_mask) {
 947        /* TODO: check and handle errors. */
 948        vhost_virtqueue_mask(dev, vdev, idx, false);
 949    }
 950
 951    if (k->query_guest_notifiers &&
 952        k->query_guest_notifiers(qbus->parent) &&
 953        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
 954        file.fd = -1;
 955        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
 956        if (r) {
 957            goto fail_vector;
 958        }
 959    }
 960
 961    return 0;
 962
 963fail_vector:
 964fail_kick:
 965fail_alloc:
 966    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
 967                              0, 0);
 968fail_alloc_used:
 969    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
 970                              0, 0);
 971fail_alloc_avail:
 972    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
 973                              0, 0);
 974fail_alloc_desc:
 975    return r;
 976}
 977
 978static void vhost_virtqueue_stop(struct vhost_dev *dev,
 979                                    struct VirtIODevice *vdev,
 980                                    struct vhost_virtqueue *vq,
 981                                    unsigned idx)
 982{
 983    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
 984    struct vhost_vring_state state = {
 985        .index = vhost_vq_index,
 986    };
 987    int r;
 988
 989    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
 990    if (r < 0) {
 991        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
 992    } else {
 993        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
 994    }
 995    virtio_queue_invalidate_signalled_used(vdev, idx);
 996
 997    /* In the cross-endian case, we need to reset the vring endianness to
 998     * native as legacy devices expect so by default.
 999     */
1000    if (vhost_needs_vring_endian(vdev)) {
1001        vhost_virtqueue_set_vring_endian_legacy(dev,
1002                                                !virtio_is_big_endian(vdev),
1003                                                vhost_vq_index);
1004    }
1005
1006    cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
1007                              1, virtio_queue_get_used_size(vdev, idx));
1008    cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
1009                              0, virtio_queue_get_avail_size(vdev, idx));
1010    cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx),
1011                              0, virtio_queue_get_desc_size(vdev, idx));
1012}
1013
1014static void vhost_eventfd_add(MemoryListener *listener,
1015                              MemoryRegionSection *section,
1016                              bool match_data, uint64_t data, EventNotifier *e)
1017{
1018}
1019
1020static void vhost_eventfd_del(MemoryListener *listener,
1021                              MemoryRegionSection *section,
1022                              bool match_data, uint64_t data, EventNotifier *e)
1023{
1024}
1025
1026static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1027                                                int n, uint32_t timeout)
1028{
1029    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1030    struct vhost_vring_state state = {
1031        .index = vhost_vq_index,
1032        .num = timeout,
1033    };
1034    int r;
1035
1036    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1037        return -EINVAL;
1038    }
1039
1040    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1041    if (r) {
1042        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1043        return r;
1044    }
1045
1046    return 0;
1047}
1048
1049static int vhost_virtqueue_init(struct vhost_dev *dev,
1050                                struct vhost_virtqueue *vq, int n)
1051{
1052    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1053    struct vhost_vring_file file = {
1054        .index = vhost_vq_index,
1055    };
1056    int r = event_notifier_init(&vq->masked_notifier, 0);
1057    if (r < 0) {
1058        return r;
1059    }
1060
1061    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1062    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1063    if (r) {
1064        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1065        r = -errno;
1066        goto fail_call;
1067    }
1068    return 0;
1069fail_call:
1070    event_notifier_cleanup(&vq->masked_notifier);
1071    return r;
1072}
1073
1074static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1075{
1076    event_notifier_cleanup(&vq->masked_notifier);
1077}
1078
1079int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1080                   VhostBackendType backend_type, uint32_t busyloop_timeout)
1081{
1082    uint64_t features;
1083    int i, r, n_initialized_vqs = 0;
1084
1085    hdev->migration_blocker = NULL;
1086
1087    r = vhost_set_backend_type(hdev, backend_type);
1088    assert(r >= 0);
1089
1090    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1091    if (r < 0) {
1092        goto fail;
1093    }
1094
1095    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1096        error_report("vhost backend memory slots limit is less"
1097                " than current number of present memory slots");
1098        r = -1;
1099        goto fail;
1100    }
1101
1102    r = hdev->vhost_ops->vhost_set_owner(hdev);
1103    if (r < 0) {
1104        VHOST_OPS_DEBUG("vhost_set_owner failed");
1105        goto fail;
1106    }
1107
1108    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1109    if (r < 0) {
1110        VHOST_OPS_DEBUG("vhost_get_features failed");
1111        goto fail;
1112    }
1113
1114    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1115        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1116        if (r < 0) {
1117            goto fail;
1118        }
1119    }
1120
1121    if (busyloop_timeout) {
1122        for (i = 0; i < hdev->nvqs; ++i) {
1123            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1124                                                     busyloop_timeout);
1125            if (r < 0) {
1126                goto fail_busyloop;
1127            }
1128        }
1129    }
1130
1131    hdev->features = features;
1132
1133    hdev->memory_listener = (MemoryListener) {
1134        .begin = vhost_begin,
1135        .commit = vhost_commit,
1136        .region_add = vhost_region_add,
1137        .region_del = vhost_region_del,
1138        .region_nop = vhost_region_nop,
1139        .log_start = vhost_log_start,
1140        .log_stop = vhost_log_stop,
1141        .log_sync = vhost_log_sync,
1142        .log_global_start = vhost_log_global_start,
1143        .log_global_stop = vhost_log_global_stop,
1144        .eventfd_add = vhost_eventfd_add,
1145        .eventfd_del = vhost_eventfd_del,
1146        .priority = 10
1147    };
1148
1149    if (hdev->migration_blocker == NULL) {
1150        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1151            error_setg(&hdev->migration_blocker,
1152                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1153        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
1154            error_setg(&hdev->migration_blocker,
1155                       "Migration disabled: failed to allocate shared memory");
1156        }
1157    }
1158
1159    if (hdev->migration_blocker != NULL) {
1160        migrate_add_blocker(hdev->migration_blocker);
1161    }
1162
1163    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1164    hdev->n_mem_sections = 0;
1165    hdev->mem_sections = NULL;
1166    hdev->log = NULL;
1167    hdev->log_size = 0;
1168    hdev->log_enabled = false;
1169    hdev->started = false;
1170    hdev->memory_changed = false;
1171    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1172    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1173    return 0;
1174
1175fail_busyloop:
1176    while (--i >= 0) {
1177        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1178    }
1179fail:
1180    hdev->nvqs = n_initialized_vqs;
1181    vhost_dev_cleanup(hdev);
1182    return r;
1183}
1184
1185void vhost_dev_cleanup(struct vhost_dev *hdev)
1186{
1187    int i;
1188
1189    for (i = 0; i < hdev->nvqs; ++i) {
1190        vhost_virtqueue_cleanup(hdev->vqs + i);
1191    }
1192    if (hdev->mem) {
1193        /* those are only safe after successful init */
1194        memory_listener_unregister(&hdev->memory_listener);
1195        QLIST_REMOVE(hdev, entry);
1196    }
1197    if (hdev->migration_blocker) {
1198        migrate_del_blocker(hdev->migration_blocker);
1199        error_free(hdev->migration_blocker);
1200    }
1201    g_free(hdev->mem);
1202    g_free(hdev->mem_sections);
1203    if (hdev->vhost_ops) {
1204        hdev->vhost_ops->vhost_backend_cleanup(hdev);
1205    }
1206    assert(!hdev->log);
1207
1208    memset(hdev, 0, sizeof(struct vhost_dev));
1209}
1210
1211/* Stop processing guest IO notifications in qemu.
1212 * Start processing them in vhost in kernel.
1213 */
1214int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1215{
1216    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1217    int i, r, e;
1218
1219    /* We will pass the notifiers to the kernel, make sure that QEMU
1220     * doesn't interfere.
1221     */
1222    r = virtio_device_grab_ioeventfd(vdev);
1223    if (r < 0) {
1224        error_report("binding does not support host notifiers");
1225        goto fail;
1226    }
1227
1228    for (i = 0; i < hdev->nvqs; ++i) {
1229        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1230                                         true);
1231        if (r < 0) {
1232            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1233            goto fail_vq;
1234        }
1235    }
1236
1237    return 0;
1238fail_vq:
1239    while (--i >= 0) {
1240        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1241                                         false);
1242        if (e < 0) {
1243            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1244        }
1245        assert (e >= 0);
1246    }
1247    virtio_device_release_ioeventfd(vdev);
1248fail:
1249    return r;
1250}
1251
1252/* Stop processing guest IO notifications in vhost.
1253 * Start processing them in qemu.
1254 * This might actually run the qemu handlers right away,
1255 * so virtio in qemu must be completely setup when this is called.
1256 */
1257void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1258{
1259    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1260    int i, r;
1261
1262    for (i = 0; i < hdev->nvqs; ++i) {
1263        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1264                                         false);
1265        if (r < 0) {
1266            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1267        }
1268        assert (r >= 0);
1269    }
1270    virtio_device_release_ioeventfd(vdev);
1271}
1272
1273/* Test and clear event pending status.
1274 * Should be called after unmask to avoid losing events.
1275 */
1276bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1277{
1278    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1279    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1280    return event_notifier_test_and_clear(&vq->masked_notifier);
1281}
1282
1283/* Mask/unmask events from this vq. */
1284void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1285                         bool mask)
1286{
1287    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1288    int r, index = n - hdev->vq_index;
1289    struct vhost_vring_file file;
1290
1291    /* should only be called after backend is connected */
1292    assert(hdev->vhost_ops);
1293
1294    if (mask) {
1295        assert(vdev->use_guest_notifier_mask);
1296        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1297    } else {
1298        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1299    }
1300
1301    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1302    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1303    if (r < 0) {
1304        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1305    }
1306}
1307
1308uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1309                            uint64_t features)
1310{
1311    const int *bit = feature_bits;
1312    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1313        uint64_t bit_mask = (1ULL << *bit);
1314        if (!(hdev->features & bit_mask)) {
1315            features &= ~bit_mask;
1316        }
1317        bit++;
1318    }
1319    return features;
1320}
1321
1322void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1323                        uint64_t features)
1324{
1325    const int *bit = feature_bits;
1326    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1327        uint64_t bit_mask = (1ULL << *bit);
1328        if (features & bit_mask) {
1329            hdev->acked_features |= bit_mask;
1330        }
1331        bit++;
1332    }
1333}
1334
1335/* Host notifiers must be enabled at this point. */
1336int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1337{
1338    int i, r;
1339
1340    /* should only be called after backend is connected */
1341    assert(hdev->vhost_ops);
1342
1343    hdev->started = true;
1344
1345    r = vhost_dev_set_features(hdev, hdev->log_enabled);
1346    if (r < 0) {
1347        goto fail_features;
1348    }
1349    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1350    if (r < 0) {
1351        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1352        r = -errno;
1353        goto fail_mem;
1354    }
1355    for (i = 0; i < hdev->nvqs; ++i) {
1356        r = vhost_virtqueue_start(hdev,
1357                                  vdev,
1358                                  hdev->vqs + i,
1359                                  hdev->vq_index + i);
1360        if (r < 0) {
1361            goto fail_vq;
1362        }
1363    }
1364
1365    if (hdev->log_enabled) {
1366        uint64_t log_base;
1367
1368        hdev->log_size = vhost_get_log_size(hdev);
1369        hdev->log = vhost_log_get(hdev->log_size,
1370                                  vhost_dev_log_is_shared(hdev));
1371        log_base = (uintptr_t)hdev->log->log;
1372        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1373                                                hdev->log_size ? log_base : 0,
1374                                                hdev->log);
1375        if (r < 0) {
1376            VHOST_OPS_DEBUG("vhost_set_log_base failed");
1377            r = -errno;
1378            goto fail_log;
1379        }
1380    }
1381
1382    return 0;
1383fail_log:
1384    vhost_log_put(hdev, false);
1385fail_vq:
1386    while (--i >= 0) {
1387        vhost_virtqueue_stop(hdev,
1388                             vdev,
1389                             hdev->vqs + i,
1390                             hdev->vq_index + i);
1391    }
1392    i = hdev->nvqs;
1393fail_mem:
1394fail_features:
1395
1396    hdev->started = false;
1397    return r;
1398}
1399
1400/* Host notifiers must be enabled at this point. */
1401void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1402{
1403    int i;
1404
1405    /* should only be called after backend is connected */
1406    assert(hdev->vhost_ops);
1407
1408    for (i = 0; i < hdev->nvqs; ++i) {
1409        vhost_virtqueue_stop(hdev,
1410                             vdev,
1411                             hdev->vqs + i,
1412                             hdev->vq_index + i);
1413    }
1414
1415    vhost_log_put(hdev, true);
1416    hdev->started = false;
1417}
1418
1419int vhost_net_set_backend(struct vhost_dev *hdev,
1420                          struct vhost_vring_file *file)
1421{
1422    if (hdev->vhost_ops->vhost_net_set_backend) {
1423        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1424    }
1425
1426    return -1;
1427}
1428