qemu/hw/virtio/vhost.c
<<
>>
Prefs
   1/*
   2 * vhost support
   3 *
   4 * Copyright Red Hat, Inc. 2010
   5 *
   6 * Authors:
   7 *  Michael S. Tsirkin <mst@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "hw/virtio/vhost.h"
  19#include "hw/hw.h"
  20#include "qemu/atomic.h"
  21#include "qemu/range.h"
  22#include "qemu/error-report.h"
  23#include "qemu/memfd.h"
  24#include "standard-headers/linux/vhost_types.h"
  25#include "exec/address-spaces.h"
  26#include "hw/virtio/virtio-bus.h"
  27#include "hw/virtio/virtio-access.h"
  28#include "migration/blocker.h"
  29#include "sysemu/dma.h"
  30#include "trace.h"
  31
  32/* enabled until disconnected backend stabilizes */
  33#define _VHOST_DEBUG 1
  34
  35#ifdef _VHOST_DEBUG
  36#define VHOST_OPS_DEBUG(fmt, ...) \
  37    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
  38                      strerror(errno), errno); } while (0)
  39#else
  40#define VHOST_OPS_DEBUG(fmt, ...) \
  41    do { } while (0)
  42#endif
  43
  44static struct vhost_log *vhost_log;
  45static struct vhost_log *vhost_log_shm;
  46
  47static unsigned int used_memslots;
  48static QLIST_HEAD(, vhost_dev) vhost_devices =
  49    QLIST_HEAD_INITIALIZER(vhost_devices);
  50
  51bool vhost_has_free_slot(void)
  52{
  53    unsigned int slots_limit = ~0U;
  54    struct vhost_dev *hdev;
  55
  56    QLIST_FOREACH(hdev, &vhost_devices, entry) {
  57        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
  58        slots_limit = MIN(slots_limit, r);
  59    }
  60    return slots_limit > used_memslots;
  61}
  62
  63static void vhost_dev_sync_region(struct vhost_dev *dev,
  64                                  MemoryRegionSection *section,
  65                                  uint64_t mfirst, uint64_t mlast,
  66                                  uint64_t rfirst, uint64_t rlast)
  67{
  68    vhost_log_chunk_t *log = dev->log->log;
  69
  70    uint64_t start = MAX(mfirst, rfirst);
  71    uint64_t end = MIN(mlast, rlast);
  72    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
  73    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
  74    uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
  75
  76    if (end < start) {
  77        return;
  78    }
  79    assert(end / VHOST_LOG_CHUNK < dev->log_size);
  80    assert(start / VHOST_LOG_CHUNK < dev->log_size);
  81
  82    for (;from < to; ++from) {
  83        vhost_log_chunk_t log;
  84        /* We first check with non-atomic: much cheaper,
  85         * and we expect non-dirty to be the common case. */
  86        if (!*from) {
  87            addr += VHOST_LOG_CHUNK;
  88            continue;
  89        }
  90        /* Data must be read atomically. We don't really need barrier semantics
  91         * but it's easier to use atomic_* than roll our own. */
  92        log = atomic_xchg(from, 0);
  93        while (log) {
  94            int bit = ctzl(log);
  95            hwaddr page_addr;
  96            hwaddr section_offset;
  97            hwaddr mr_offset;
  98            page_addr = addr + bit * VHOST_LOG_PAGE;
  99            section_offset = page_addr - section->offset_within_address_space;
 100            mr_offset = section_offset + section->offset_within_region;
 101            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
 102            log &= ~(0x1ull << bit);
 103        }
 104        addr += VHOST_LOG_CHUNK;
 105    }
 106}
 107
 108static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
 109                                   MemoryRegionSection *section,
 110                                   hwaddr first,
 111                                   hwaddr last)
 112{
 113    int i;
 114    hwaddr start_addr;
 115    hwaddr end_addr;
 116
 117    if (!dev->log_enabled || !dev->started) {
 118        return 0;
 119    }
 120    start_addr = section->offset_within_address_space;
 121    end_addr = range_get_last(start_addr, int128_get64(section->size));
 122    start_addr = MAX(first, start_addr);
 123    end_addr = MIN(last, end_addr);
 124
 125    for (i = 0; i < dev->mem->nregions; ++i) {
 126        struct vhost_memory_region *reg = dev->mem->regions + i;
 127        vhost_dev_sync_region(dev, section, start_addr, end_addr,
 128                              reg->guest_phys_addr,
 129                              range_get_last(reg->guest_phys_addr,
 130                                             reg->memory_size));
 131    }
 132    for (i = 0; i < dev->nvqs; ++i) {
 133        struct vhost_virtqueue *vq = dev->vqs + i;
 134
 135        if (!vq->used_phys && !vq->used_size) {
 136            continue;
 137        }
 138
 139        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
 140                              range_get_last(vq->used_phys, vq->used_size));
 141    }
 142    return 0;
 143}
 144
 145static void vhost_log_sync(MemoryListener *listener,
 146                          MemoryRegionSection *section)
 147{
 148    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 149                                         memory_listener);
 150    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
 151}
 152
 153static void vhost_log_sync_range(struct vhost_dev *dev,
 154                                 hwaddr first, hwaddr last)
 155{
 156    int i;
 157    /* FIXME: this is N^2 in number of sections */
 158    for (i = 0; i < dev->n_mem_sections; ++i) {
 159        MemoryRegionSection *section = &dev->mem_sections[i];
 160        vhost_sync_dirty_bitmap(dev, section, first, last);
 161    }
 162}
 163
 164static uint64_t vhost_get_log_size(struct vhost_dev *dev)
 165{
 166    uint64_t log_size = 0;
 167    int i;
 168    for (i = 0; i < dev->mem->nregions; ++i) {
 169        struct vhost_memory_region *reg = dev->mem->regions + i;
 170        uint64_t last = range_get_last(reg->guest_phys_addr,
 171                                       reg->memory_size);
 172        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 173    }
 174    for (i = 0; i < dev->nvqs; ++i) {
 175        struct vhost_virtqueue *vq = dev->vqs + i;
 176
 177        if (!vq->used_phys && !vq->used_size) {
 178            continue;
 179        }
 180
 181        uint64_t last = vq->used_phys + vq->used_size - 1;
 182        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 183    }
 184    return log_size;
 185}
 186
 187static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
 188{
 189    Error *err = NULL;
 190    struct vhost_log *log;
 191    uint64_t logsize = size * sizeof(*(log->log));
 192    int fd = -1;
 193
 194    log = g_new0(struct vhost_log, 1);
 195    if (share) {
 196        log->log = qemu_memfd_alloc("vhost-log", logsize,
 197                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
 198                                    &fd, &err);
 199        if (err) {
 200            error_report_err(err);
 201            g_free(log);
 202            return NULL;
 203        }
 204        memset(log->log, 0, logsize);
 205    } else {
 206        log->log = g_malloc0(logsize);
 207    }
 208
 209    log->size = size;
 210    log->refcnt = 1;
 211    log->fd = fd;
 212
 213    return log;
 214}
 215
 216static struct vhost_log *vhost_log_get(uint64_t size, bool share)
 217{
 218    struct vhost_log *log = share ? vhost_log_shm : vhost_log;
 219
 220    if (!log || log->size != size) {
 221        log = vhost_log_alloc(size, share);
 222        if (share) {
 223            vhost_log_shm = log;
 224        } else {
 225            vhost_log = log;
 226        }
 227    } else {
 228        ++log->refcnt;
 229    }
 230
 231    return log;
 232}
 233
 234static void vhost_log_put(struct vhost_dev *dev, bool sync)
 235{
 236    struct vhost_log *log = dev->log;
 237
 238    if (!log) {
 239        return;
 240    }
 241
 242    --log->refcnt;
 243    if (log->refcnt == 0) {
 244        /* Sync only the range covered by the old log */
 245        if (dev->log_size && sync) {
 246            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
 247        }
 248
 249        if (vhost_log == log) {
 250            g_free(log->log);
 251            vhost_log = NULL;
 252        } else if (vhost_log_shm == log) {
 253            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
 254                            log->fd);
 255            vhost_log_shm = NULL;
 256        }
 257
 258        g_free(log);
 259    }
 260
 261    dev->log = NULL;
 262    dev->log_size = 0;
 263}
 264
 265static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
 266{
 267    return dev->vhost_ops->vhost_requires_shm_log &&
 268           dev->vhost_ops->vhost_requires_shm_log(dev);
 269}
 270
 271static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
 272{
 273    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
 274    uint64_t log_base = (uintptr_t)log->log;
 275    int r;
 276
 277    /* inform backend of log switching, this must be done before
 278       releasing the current log, to ensure no logging is lost */
 279    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
 280    if (r < 0) {
 281        VHOST_OPS_DEBUG("vhost_set_log_base failed");
 282    }
 283
 284    vhost_log_put(dev, true);
 285    dev->log = log;
 286    dev->log_size = size;
 287}
 288
 289static int vhost_dev_has_iommu(struct vhost_dev *dev)
 290{
 291    VirtIODevice *vdev = dev->vdev;
 292
 293    return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
 294}
 295
 296static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
 297                              hwaddr *plen, int is_write)
 298{
 299    if (!vhost_dev_has_iommu(dev)) {
 300        return cpu_physical_memory_map(addr, plen, is_write);
 301    } else {
 302        return (void *)(uintptr_t)addr;
 303    }
 304}
 305
 306static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
 307                               hwaddr len, int is_write,
 308                               hwaddr access_len)
 309{
 310    if (!vhost_dev_has_iommu(dev)) {
 311        cpu_physical_memory_unmap(buffer, len, is_write, access_len);
 312    }
 313}
 314
 315static int vhost_verify_ring_part_mapping(void *ring_hva,
 316                                          uint64_t ring_gpa,
 317                                          uint64_t ring_size,
 318                                          void *reg_hva,
 319                                          uint64_t reg_gpa,
 320                                          uint64_t reg_size)
 321{
 322    uint64_t hva_ring_offset;
 323    uint64_t ring_last = range_get_last(ring_gpa, ring_size);
 324    uint64_t reg_last = range_get_last(reg_gpa, reg_size);
 325
 326    if (ring_last < reg_gpa || ring_gpa > reg_last) {
 327        return 0;
 328    }
 329    /* check that whole ring's is mapped */
 330    if (ring_last > reg_last) {
 331        return -ENOMEM;
 332    }
 333    /* check that ring's MemoryRegion wasn't replaced */
 334    hva_ring_offset = ring_gpa - reg_gpa;
 335    if (ring_hva != reg_hva + hva_ring_offset) {
 336        return -EBUSY;
 337    }
 338
 339    return 0;
 340}
 341
 342static int vhost_verify_ring_mappings(struct vhost_dev *dev,
 343                                      void *reg_hva,
 344                                      uint64_t reg_gpa,
 345                                      uint64_t reg_size)
 346{
 347    int i, j;
 348    int r = 0;
 349    const char *part_name[] = {
 350        "descriptor table",
 351        "available ring",
 352        "used ring"
 353    };
 354
 355    if (vhost_dev_has_iommu(dev)) {
 356        return 0;
 357    }
 358
 359    for (i = 0; i < dev->nvqs; ++i) {
 360        struct vhost_virtqueue *vq = dev->vqs + i;
 361
 362        if (vq->desc_phys == 0) {
 363            continue;
 364        }
 365
 366        j = 0;
 367        r = vhost_verify_ring_part_mapping(
 368                vq->desc, vq->desc_phys, vq->desc_size,
 369                reg_hva, reg_gpa, reg_size);
 370        if (r) {
 371            break;
 372        }
 373
 374        j++;
 375        r = vhost_verify_ring_part_mapping(
 376                vq->avail, vq->avail_phys, vq->avail_size,
 377                reg_hva, reg_gpa, reg_size);
 378        if (r) {
 379            break;
 380        }
 381
 382        j++;
 383        r = vhost_verify_ring_part_mapping(
 384                vq->used, vq->used_phys, vq->used_size,
 385                reg_hva, reg_gpa, reg_size);
 386        if (r) {
 387            break;
 388        }
 389    }
 390
 391    if (r == -ENOMEM) {
 392        error_report("Unable to map %s for ring %d", part_name[j], i);
 393    } else if (r == -EBUSY) {
 394        error_report("%s relocated for ring %d", part_name[j], i);
 395    }
 396    return r;
 397}
 398
 399static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
 400{
 401    bool result;
 402    bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
 403                     ~(1 << DIRTY_MEMORY_MIGRATION);
 404    result = memory_region_is_ram(section->mr) &&
 405        !memory_region_is_rom(section->mr);
 406
 407    /* Vhost doesn't handle any block which is doing dirty-tracking other
 408     * than migration; this typically fires on VGA areas.
 409     */
 410    result &= !log_dirty;
 411
 412    if (result && dev->vhost_ops->vhost_backend_mem_section_filter) {
 413        result &=
 414            dev->vhost_ops->vhost_backend_mem_section_filter(dev, section);
 415    }
 416
 417    trace_vhost_section(section->mr->name, result);
 418    return result;
 419}
 420
 421static void vhost_begin(MemoryListener *listener)
 422{
 423    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 424                                         memory_listener);
 425    dev->tmp_sections = NULL;
 426    dev->n_tmp_sections = 0;
 427}
 428
 429static void vhost_commit(MemoryListener *listener)
 430{
 431    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 432                                         memory_listener);
 433    MemoryRegionSection *old_sections;
 434    int n_old_sections;
 435    uint64_t log_size;
 436    size_t regions_size;
 437    int r;
 438    int i;
 439    bool changed = false;
 440
 441    /* Note we can be called before the device is started, but then
 442     * starting the device calls set_mem_table, so we need to have
 443     * built the data structures.
 444     */
 445    old_sections = dev->mem_sections;
 446    n_old_sections = dev->n_mem_sections;
 447    dev->mem_sections = dev->tmp_sections;
 448    dev->n_mem_sections = dev->n_tmp_sections;
 449
 450    if (dev->n_mem_sections != n_old_sections) {
 451        changed = true;
 452    } else {
 453        /* Same size, lets check the contents */
 454        for (int i = 0; i < n_old_sections; i++) {
 455            if (!MemoryRegionSection_eq(&old_sections[i],
 456                                        &dev->mem_sections[i])) {
 457                changed = true;
 458                break;
 459            }
 460        }
 461    }
 462
 463    trace_vhost_commit(dev->started, changed);
 464    if (!changed) {
 465        goto out;
 466    }
 467
 468    /* Rebuild the regions list from the new sections list */
 469    regions_size = offsetof(struct vhost_memory, regions) +
 470                       dev->n_mem_sections * sizeof dev->mem->regions[0];
 471    dev->mem = g_realloc(dev->mem, regions_size);
 472    dev->mem->nregions = dev->n_mem_sections;
 473    used_memslots = dev->mem->nregions;
 474    for (i = 0; i < dev->n_mem_sections; i++) {
 475        struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
 476        struct MemoryRegionSection *mrs = dev->mem_sections + i;
 477
 478        cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
 479        cur_vmr->memory_size     = int128_get64(mrs->size);
 480        cur_vmr->userspace_addr  =
 481            (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
 482            mrs->offset_within_region;
 483        cur_vmr->flags_padding   = 0;
 484    }
 485
 486    if (!dev->started) {
 487        goto out;
 488    }
 489
 490    for (i = 0; i < dev->mem->nregions; i++) {
 491        if (vhost_verify_ring_mappings(dev,
 492                       (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
 493                       dev->mem->regions[i].guest_phys_addr,
 494                       dev->mem->regions[i].memory_size)) {
 495            error_report("Verify ring failure on region %d", i);
 496            abort();
 497        }
 498    }
 499
 500    if (!dev->log_enabled) {
 501        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 502        if (r < 0) {
 503            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 504        }
 505        goto out;
 506    }
 507    log_size = vhost_get_log_size(dev);
 508    /* We allocate an extra 4K bytes to log,
 509     * to reduce the * number of reallocations. */
 510#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
 511    /* To log more, must increase log size before table update. */
 512    if (dev->log_size < log_size) {
 513        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
 514    }
 515    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 516    if (r < 0) {
 517        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 518    }
 519    /* To log less, can only decrease log size after table update. */
 520    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
 521        vhost_dev_log_resize(dev, log_size);
 522    }
 523
 524out:
 525    /* Deref the old list of sections, this must happen _after_ the
 526     * vhost_set_mem_table to ensure the client isn't still using the
 527     * section we're about to unref.
 528     */
 529    while (n_old_sections--) {
 530        memory_region_unref(old_sections[n_old_sections].mr);
 531    }
 532    g_free(old_sections);
 533    return;
 534}
 535
 536/* Adds the section data to the tmp_section structure.
 537 * It relies on the listener calling us in memory address order
 538 * and for each region (via the _add and _nop methods) to
 539 * join neighbours.
 540 */
 541static void vhost_region_add_section(struct vhost_dev *dev,
 542                                     MemoryRegionSection *section)
 543{
 544    bool need_add = true;
 545    uint64_t mrs_size = int128_get64(section->size);
 546    uint64_t mrs_gpa = section->offset_within_address_space;
 547    uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
 548                         section->offset_within_region;
 549    RAMBlock *mrs_rb = section->mr->ram_block;
 550    size_t mrs_page = qemu_ram_pagesize(mrs_rb);
 551
 552    trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
 553                                   mrs_host);
 554
 555    /* Round the section to it's page size */
 556    /* First align the start down to a page boundary */
 557    uint64_t alignage = mrs_host & (mrs_page - 1);
 558    if (alignage) {
 559        mrs_host -= alignage;
 560        mrs_size += alignage;
 561        mrs_gpa  -= alignage;
 562    }
 563    /* Now align the size up to a page boundary */
 564    alignage = mrs_size & (mrs_page - 1);
 565    if (alignage) {
 566        mrs_size += mrs_page - alignage;
 567    }
 568    trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, mrs_size,
 569                                           mrs_host);
 570
 571    if (dev->n_tmp_sections) {
 572        /* Since we already have at least one section, lets see if
 573         * this extends it; since we're scanning in order, we only
 574         * have to look at the last one, and the FlatView that calls
 575         * us shouldn't have overlaps.
 576         */
 577        MemoryRegionSection *prev_sec = dev->tmp_sections +
 578                                               (dev->n_tmp_sections - 1);
 579        uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
 580        uint64_t prev_size = int128_get64(prev_sec->size);
 581        uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
 582        uint64_t prev_host_start =
 583                        (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
 584                        prev_sec->offset_within_region;
 585        uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
 586
 587        if (mrs_gpa <= (prev_gpa_end + 1)) {
 588            /* OK, looks like overlapping/intersecting - it's possible that
 589             * the rounding to page sizes has made them overlap, but they should
 590             * match up in the same RAMBlock if they do.
 591             */
 592            if (mrs_gpa < prev_gpa_start) {
 593                error_report("%s:Section rounded to %"PRIx64
 594                             " prior to previous %"PRIx64,
 595                             __func__, mrs_gpa, prev_gpa_start);
 596                /* A way to cleanly fail here would be better */
 597                return;
 598            }
 599            /* Offset from the start of the previous GPA to this GPA */
 600            size_t offset = mrs_gpa - prev_gpa_start;
 601
 602            if (prev_host_start + offset == mrs_host &&
 603                section->mr == prev_sec->mr &&
 604                (!dev->vhost_ops->vhost_backend_can_merge ||
 605                 dev->vhost_ops->vhost_backend_can_merge(dev,
 606                    mrs_host, mrs_size,
 607                    prev_host_start, prev_size))) {
 608                uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
 609                need_add = false;
 610                prev_sec->offset_within_address_space =
 611                    MIN(prev_gpa_start, mrs_gpa);
 612                prev_sec->offset_within_region =
 613                    MIN(prev_host_start, mrs_host) -
 614                    (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
 615                prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
 616                                               mrs_host));
 617                trace_vhost_region_add_section_merge(section->mr->name,
 618                                        int128_get64(prev_sec->size),
 619                                        prev_sec->offset_within_address_space,
 620                                        prev_sec->offset_within_region);
 621            } else {
 622                /* adjoining regions are fine, but overlapping ones with
 623                 * different blocks/offsets shouldn't happen
 624                 */
 625                if (mrs_gpa != prev_gpa_end + 1) {
 626                    error_report("%s: Overlapping but not coherent sections "
 627                                 "at %"PRIx64,
 628                                 __func__, mrs_gpa);
 629                    return;
 630                }
 631            }
 632        }
 633    }
 634
 635    if (need_add) {
 636        ++dev->n_tmp_sections;
 637        dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
 638                                    dev->n_tmp_sections);
 639        dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
 640        /* The flatview isn't stable and we don't use it, making it NULL
 641         * means we can memcmp the list.
 642         */
 643        dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
 644        memory_region_ref(section->mr);
 645    }
 646}
 647
 648/* Used for both add and nop callbacks */
 649static void vhost_region_addnop(MemoryListener *listener,
 650                                MemoryRegionSection *section)
 651{
 652    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 653                                         memory_listener);
 654
 655    if (!vhost_section(dev, section)) {
 656        return;
 657    }
 658    vhost_region_add_section(dev, section);
 659}
 660
 661static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 662{
 663    struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
 664    struct vhost_dev *hdev = iommu->hdev;
 665    hwaddr iova = iotlb->iova + iommu->iommu_offset;
 666
 667    if (vhost_backend_invalidate_device_iotlb(hdev, iova,
 668                                              iotlb->addr_mask + 1)) {
 669        error_report("Fail to invalidate device iotlb");
 670    }
 671}
 672
 673static void vhost_iommu_region_add(MemoryListener *listener,
 674                                   MemoryRegionSection *section)
 675{
 676    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 677                                         iommu_listener);
 678    struct vhost_iommu *iommu;
 679    Int128 end;
 680    int iommu_idx;
 681    IOMMUMemoryRegion *iommu_mr;
 682
 683    if (!memory_region_is_iommu(section->mr)) {
 684        return;
 685    }
 686
 687    iommu_mr = IOMMU_MEMORY_REGION(section->mr);
 688
 689    iommu = g_malloc0(sizeof(*iommu));
 690    end = int128_add(int128_make64(section->offset_within_region),
 691                     section->size);
 692    end = int128_sub(end, int128_one());
 693    iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
 694                                                   MEMTXATTRS_UNSPECIFIED);
 695    iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
 696                        IOMMU_NOTIFIER_UNMAP,
 697                        section->offset_within_region,
 698                        int128_get64(end),
 699                        iommu_idx);
 700    iommu->mr = section->mr;
 701    iommu->iommu_offset = section->offset_within_address_space -
 702                          section->offset_within_region;
 703    iommu->hdev = dev;
 704    memory_region_register_iommu_notifier(section->mr, &iommu->n);
 705    QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
 706    /* TODO: can replay help performance here? */
 707}
 708
 709static void vhost_iommu_region_del(MemoryListener *listener,
 710                                   MemoryRegionSection *section)
 711{
 712    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 713                                         iommu_listener);
 714    struct vhost_iommu *iommu;
 715
 716    if (!memory_region_is_iommu(section->mr)) {
 717        return;
 718    }
 719
 720    QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
 721        if (iommu->mr == section->mr &&
 722            iommu->n.start == section->offset_within_region) {
 723            memory_region_unregister_iommu_notifier(iommu->mr,
 724                                                    &iommu->n);
 725            QLIST_REMOVE(iommu, iommu_next);
 726            g_free(iommu);
 727            break;
 728        }
 729    }
 730}
 731
 732static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
 733                                    struct vhost_virtqueue *vq,
 734                                    unsigned idx, bool enable_log)
 735{
 736    struct vhost_vring_addr addr = {
 737        .index = idx,
 738        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
 739        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
 740        .used_user_addr = (uint64_t)(unsigned long)vq->used,
 741        .log_guest_addr = vq->used_phys,
 742        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
 743    };
 744    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
 745    if (r < 0) {
 746        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
 747        return -errno;
 748    }
 749    return 0;
 750}
 751
 752static int vhost_dev_set_features(struct vhost_dev *dev,
 753                                  bool enable_log)
 754{
 755    uint64_t features = dev->acked_features;
 756    int r;
 757    if (enable_log) {
 758        features |= 0x1ULL << VHOST_F_LOG_ALL;
 759    }
 760    r = dev->vhost_ops->vhost_set_features(dev, features);
 761    if (r < 0) {
 762        VHOST_OPS_DEBUG("vhost_set_features failed");
 763    }
 764    return r < 0 ? -errno : 0;
 765}
 766
 767static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
 768{
 769    int r, i, idx;
 770    r = vhost_dev_set_features(dev, enable_log);
 771    if (r < 0) {
 772        goto err_features;
 773    }
 774    for (i = 0; i < dev->nvqs; ++i) {
 775        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 776        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 777                                     enable_log);
 778        if (r < 0) {
 779            goto err_vq;
 780        }
 781    }
 782    return 0;
 783err_vq:
 784    for (; i >= 0; --i) {
 785        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 786        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 787                                 dev->log_enabled);
 788    }
 789    vhost_dev_set_features(dev, dev->log_enabled);
 790err_features:
 791    return r;
 792}
 793
 794static int vhost_migration_log(MemoryListener *listener, int enable)
 795{
 796    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 797                                         memory_listener);
 798    int r;
 799    if (!!enable == dev->log_enabled) {
 800        return 0;
 801    }
 802    if (!dev->started) {
 803        dev->log_enabled = enable;
 804        return 0;
 805    }
 806    if (!enable) {
 807        r = vhost_dev_set_log(dev, false);
 808        if (r < 0) {
 809            return r;
 810        }
 811        vhost_log_put(dev, false);
 812    } else {
 813        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
 814        r = vhost_dev_set_log(dev, true);
 815        if (r < 0) {
 816            return r;
 817        }
 818    }
 819    dev->log_enabled = enable;
 820    return 0;
 821}
 822
 823static void vhost_log_global_start(MemoryListener *listener)
 824{
 825    int r;
 826
 827    r = vhost_migration_log(listener, true);
 828    if (r < 0) {
 829        abort();
 830    }
 831}
 832
 833static void vhost_log_global_stop(MemoryListener *listener)
 834{
 835    int r;
 836
 837    r = vhost_migration_log(listener, false);
 838    if (r < 0) {
 839        abort();
 840    }
 841}
 842
 843static void vhost_log_start(MemoryListener *listener,
 844                            MemoryRegionSection *section,
 845                            int old, int new)
 846{
 847    /* FIXME: implement */
 848}
 849
 850static void vhost_log_stop(MemoryListener *listener,
 851                           MemoryRegionSection *section,
 852                           int old, int new)
 853{
 854    /* FIXME: implement */
 855}
 856
 857/* The vhost driver natively knows how to handle the vrings of non
 858 * cross-endian legacy devices and modern devices. Only legacy devices
 859 * exposed to a bi-endian guest may require the vhost driver to use a
 860 * specific endianness.
 861 */
 862static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
 863{
 864    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
 865        return false;
 866    }
 867#ifdef HOST_WORDS_BIGENDIAN
 868    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
 869#else
 870    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
 871#endif
 872}
 873
 874static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
 875                                                   bool is_big_endian,
 876                                                   int vhost_vq_index)
 877{
 878    struct vhost_vring_state s = {
 879        .index = vhost_vq_index,
 880        .num = is_big_endian
 881    };
 882
 883    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
 884        return 0;
 885    }
 886
 887    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
 888    if (errno == ENOTTY) {
 889        error_report("vhost does not support cross-endian");
 890        return -ENOSYS;
 891    }
 892
 893    return -errno;
 894}
 895
 896static int vhost_memory_region_lookup(struct vhost_dev *hdev,
 897                                      uint64_t gpa, uint64_t *uaddr,
 898                                      uint64_t *len)
 899{
 900    int i;
 901
 902    for (i = 0; i < hdev->mem->nregions; i++) {
 903        struct vhost_memory_region *reg = hdev->mem->regions + i;
 904
 905        if (gpa >= reg->guest_phys_addr &&
 906            reg->guest_phys_addr + reg->memory_size > gpa) {
 907            *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
 908            *len = reg->guest_phys_addr + reg->memory_size - gpa;
 909            return 0;
 910        }
 911    }
 912
 913    return -EFAULT;
 914}
 915
 916int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
 917{
 918    IOMMUTLBEntry iotlb;
 919    uint64_t uaddr, len;
 920    int ret = -EFAULT;
 921
 922    rcu_read_lock();
 923
 924    trace_vhost_iotlb_miss(dev, 1);
 925
 926    iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
 927                                          iova, write,
 928                                          MEMTXATTRS_UNSPECIFIED);
 929    if (iotlb.target_as != NULL) {
 930        ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
 931                                         &uaddr, &len);
 932        if (ret) {
 933            trace_vhost_iotlb_miss(dev, 3);
 934            error_report("Fail to lookup the translated address "
 935                         "%"PRIx64, iotlb.translated_addr);
 936            goto out;
 937        }
 938
 939        len = MIN(iotlb.addr_mask + 1, len);
 940        iova = iova & ~iotlb.addr_mask;
 941
 942        ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
 943                                                len, iotlb.perm);
 944        if (ret) {
 945            trace_vhost_iotlb_miss(dev, 4);
 946            error_report("Fail to update device iotlb");
 947            goto out;
 948        }
 949    }
 950
 951    trace_vhost_iotlb_miss(dev, 2);
 952
 953out:
 954    rcu_read_unlock();
 955
 956    return ret;
 957}
 958
 959static int vhost_virtqueue_start(struct vhost_dev *dev,
 960                                struct VirtIODevice *vdev,
 961                                struct vhost_virtqueue *vq,
 962                                unsigned idx)
 963{
 964    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
 965    VirtioBusState *vbus = VIRTIO_BUS(qbus);
 966    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
 967    hwaddr s, l, a;
 968    int r;
 969    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
 970    struct vhost_vring_file file = {
 971        .index = vhost_vq_index
 972    };
 973    struct vhost_vring_state state = {
 974        .index = vhost_vq_index
 975    };
 976    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
 977
 978    a = virtio_queue_get_desc_addr(vdev, idx);
 979    if (a == 0) {
 980        /* Queue might not be ready for start */
 981        return 0;
 982    }
 983
 984    vq->num = state.num = virtio_queue_get_num(vdev, idx);
 985    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
 986    if (r) {
 987        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
 988        return -errno;
 989    }
 990
 991    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
 992    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
 993    if (r) {
 994        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
 995        return -errno;
 996    }
 997
 998    if (vhost_needs_vring_endian(vdev)) {
 999        r = vhost_virtqueue_set_vring_endian_legacy(dev,
1000                                                    virtio_is_big_endian(vdev),
1001                                                    vhost_vq_index);
1002        if (r) {
1003            return -errno;
1004        }
1005    }
1006
1007    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1008    vq->desc_phys = a;
1009    vq->desc = vhost_memory_map(dev, a, &l, 0);
1010    if (!vq->desc || l != s) {
1011        r = -ENOMEM;
1012        goto fail_alloc_desc;
1013    }
1014    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1015    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1016    vq->avail = vhost_memory_map(dev, a, &l, 0);
1017    if (!vq->avail || l != s) {
1018        r = -ENOMEM;
1019        goto fail_alloc_avail;
1020    }
1021    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1022    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1023    vq->used = vhost_memory_map(dev, a, &l, 1);
1024    if (!vq->used || l != s) {
1025        r = -ENOMEM;
1026        goto fail_alloc_used;
1027    }
1028
1029    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1030    if (r < 0) {
1031        r = -errno;
1032        goto fail_alloc;
1033    }
1034
1035    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1036    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1037    if (r) {
1038        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1039        r = -errno;
1040        goto fail_kick;
1041    }
1042
1043    /* Clear and discard previous events if any. */
1044    event_notifier_test_and_clear(&vq->masked_notifier);
1045
1046    /* Init vring in unmasked state, unless guest_notifier_mask
1047     * will do it later.
1048     */
1049    if (!vdev->use_guest_notifier_mask) {
1050        /* TODO: check and handle errors. */
1051        vhost_virtqueue_mask(dev, vdev, idx, false);
1052    }
1053
1054    if (k->query_guest_notifiers &&
1055        k->query_guest_notifiers(qbus->parent) &&
1056        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1057        file.fd = -1;
1058        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1059        if (r) {
1060            goto fail_vector;
1061        }
1062    }
1063
1064    return 0;
1065
1066fail_vector:
1067fail_kick:
1068fail_alloc:
1069    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1070                       0, 0);
1071fail_alloc_used:
1072    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1073                       0, 0);
1074fail_alloc_avail:
1075    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1076                       0, 0);
1077fail_alloc_desc:
1078    return r;
1079}
1080
1081static void vhost_virtqueue_stop(struct vhost_dev *dev,
1082                                    struct VirtIODevice *vdev,
1083                                    struct vhost_virtqueue *vq,
1084                                    unsigned idx)
1085{
1086    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1087    struct vhost_vring_state state = {
1088        .index = vhost_vq_index,
1089    };
1090    int r;
1091
1092    if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1093        /* Don't stop the virtqueue which might have not been started */
1094        return;
1095    }
1096
1097    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1098    if (r < 0) {
1099        VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
1100        /* Connection to the backend is broken, so let's sync internal
1101         * last avail idx to the device used idx.
1102         */
1103        virtio_queue_restore_last_avail_idx(vdev, idx);
1104    } else {
1105        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1106    }
1107    virtio_queue_invalidate_signalled_used(vdev, idx);
1108    virtio_queue_update_used_idx(vdev, idx);
1109
1110    /* In the cross-endian case, we need to reset the vring endianness to
1111     * native as legacy devices expect so by default.
1112     */
1113    if (vhost_needs_vring_endian(vdev)) {
1114        vhost_virtqueue_set_vring_endian_legacy(dev,
1115                                                !virtio_is_big_endian(vdev),
1116                                                vhost_vq_index);
1117    }
1118
1119    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1120                       1, virtio_queue_get_used_size(vdev, idx));
1121    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1122                       0, virtio_queue_get_avail_size(vdev, idx));
1123    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1124                       0, virtio_queue_get_desc_size(vdev, idx));
1125}
1126
1127static void vhost_eventfd_add(MemoryListener *listener,
1128                              MemoryRegionSection *section,
1129                              bool match_data, uint64_t data, EventNotifier *e)
1130{
1131}
1132
1133static void vhost_eventfd_del(MemoryListener *listener,
1134                              MemoryRegionSection *section,
1135                              bool match_data, uint64_t data, EventNotifier *e)
1136{
1137}
1138
1139static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1140                                                int n, uint32_t timeout)
1141{
1142    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1143    struct vhost_vring_state state = {
1144        .index = vhost_vq_index,
1145        .num = timeout,
1146    };
1147    int r;
1148
1149    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1150        return -EINVAL;
1151    }
1152
1153    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1154    if (r) {
1155        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1156        return r;
1157    }
1158
1159    return 0;
1160}
1161
1162static int vhost_virtqueue_init(struct vhost_dev *dev,
1163                                struct vhost_virtqueue *vq, int n)
1164{
1165    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1166    struct vhost_vring_file file = {
1167        .index = vhost_vq_index,
1168    };
1169    int r = event_notifier_init(&vq->masked_notifier, 0);
1170    if (r < 0) {
1171        return r;
1172    }
1173
1174    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1175    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1176    if (r) {
1177        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1178        r = -errno;
1179        goto fail_call;
1180    }
1181
1182    vq->dev = dev;
1183
1184    return 0;
1185fail_call:
1186    event_notifier_cleanup(&vq->masked_notifier);
1187    return r;
1188}
1189
1190static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1191{
1192    event_notifier_cleanup(&vq->masked_notifier);
1193}
1194
1195int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1196                   VhostBackendType backend_type, uint32_t busyloop_timeout)
1197{
1198    uint64_t features;
1199    int i, r, n_initialized_vqs = 0;
1200    Error *local_err = NULL;
1201
1202    hdev->vdev = NULL;
1203    hdev->migration_blocker = NULL;
1204
1205    r = vhost_set_backend_type(hdev, backend_type);
1206    assert(r >= 0);
1207
1208    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1209    if (r < 0) {
1210        goto fail;
1211    }
1212
1213    r = hdev->vhost_ops->vhost_set_owner(hdev);
1214    if (r < 0) {
1215        VHOST_OPS_DEBUG("vhost_set_owner failed");
1216        goto fail;
1217    }
1218
1219    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1220    if (r < 0) {
1221        VHOST_OPS_DEBUG("vhost_get_features failed");
1222        goto fail;
1223    }
1224
1225    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1226        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1227        if (r < 0) {
1228            goto fail;
1229        }
1230    }
1231
1232    if (busyloop_timeout) {
1233        for (i = 0; i < hdev->nvqs; ++i) {
1234            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1235                                                     busyloop_timeout);
1236            if (r < 0) {
1237                goto fail_busyloop;
1238            }
1239        }
1240    }
1241
1242    hdev->features = features;
1243
1244    hdev->memory_listener = (MemoryListener) {
1245        .begin = vhost_begin,
1246        .commit = vhost_commit,
1247        .region_add = vhost_region_addnop,
1248        .region_nop = vhost_region_addnop,
1249        .log_start = vhost_log_start,
1250        .log_stop = vhost_log_stop,
1251        .log_sync = vhost_log_sync,
1252        .log_global_start = vhost_log_global_start,
1253        .log_global_stop = vhost_log_global_stop,
1254        .eventfd_add = vhost_eventfd_add,
1255        .eventfd_del = vhost_eventfd_del,
1256        .priority = 10
1257    };
1258
1259    hdev->iommu_listener = (MemoryListener) {
1260        .region_add = vhost_iommu_region_add,
1261        .region_del = vhost_iommu_region_del,
1262    };
1263
1264    if (hdev->migration_blocker == NULL) {
1265        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1266            error_setg(&hdev->migration_blocker,
1267                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1268        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1269            error_setg(&hdev->migration_blocker,
1270                       "Migration disabled: failed to allocate shared memory");
1271        }
1272    }
1273
1274    if (hdev->migration_blocker != NULL) {
1275        r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1276        if (local_err) {
1277            error_report_err(local_err);
1278            error_free(hdev->migration_blocker);
1279            goto fail_busyloop;
1280        }
1281    }
1282
1283    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1284    hdev->n_mem_sections = 0;
1285    hdev->mem_sections = NULL;
1286    hdev->log = NULL;
1287    hdev->log_size = 0;
1288    hdev->log_enabled = false;
1289    hdev->started = false;
1290    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1291    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1292
1293    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1294        error_report("vhost backend memory slots limit is less"
1295                " than current number of present memory slots");
1296        r = -1;
1297        if (busyloop_timeout) {
1298            goto fail_busyloop;
1299        } else {
1300            goto fail;
1301        }
1302    }
1303
1304    return 0;
1305
1306fail_busyloop:
1307    while (--i >= 0) {
1308        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1309    }
1310fail:
1311    hdev->nvqs = n_initialized_vqs;
1312    vhost_dev_cleanup(hdev);
1313    return r;
1314}
1315
1316void vhost_dev_cleanup(struct vhost_dev *hdev)
1317{
1318    int i;
1319
1320    for (i = 0; i < hdev->nvqs; ++i) {
1321        vhost_virtqueue_cleanup(hdev->vqs + i);
1322    }
1323    if (hdev->mem) {
1324        /* those are only safe after successful init */
1325        memory_listener_unregister(&hdev->memory_listener);
1326        QLIST_REMOVE(hdev, entry);
1327    }
1328    if (hdev->migration_blocker) {
1329        migrate_del_blocker(hdev->migration_blocker);
1330        error_free(hdev->migration_blocker);
1331    }
1332    g_free(hdev->mem);
1333    g_free(hdev->mem_sections);
1334    if (hdev->vhost_ops) {
1335        hdev->vhost_ops->vhost_backend_cleanup(hdev);
1336    }
1337    assert(!hdev->log);
1338
1339    memset(hdev, 0, sizeof(struct vhost_dev));
1340}
1341
1342/* Stop processing guest IO notifications in qemu.
1343 * Start processing them in vhost in kernel.
1344 */
1345int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1346{
1347    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1348    int i, r, e;
1349
1350    /* We will pass the notifiers to the kernel, make sure that QEMU
1351     * doesn't interfere.
1352     */
1353    r = virtio_device_grab_ioeventfd(vdev);
1354    if (r < 0) {
1355        error_report("binding does not support host notifiers");
1356        goto fail;
1357    }
1358
1359    for (i = 0; i < hdev->nvqs; ++i) {
1360        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1361                                         true);
1362        if (r < 0) {
1363            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1364            goto fail_vq;
1365        }
1366    }
1367
1368    return 0;
1369fail_vq:
1370    while (--i >= 0) {
1371        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1372                                         false);
1373        if (e < 0) {
1374            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1375        }
1376        assert (e >= 0);
1377        virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1378    }
1379    virtio_device_release_ioeventfd(vdev);
1380fail:
1381    return r;
1382}
1383
1384/* Stop processing guest IO notifications in vhost.
1385 * Start processing them in qemu.
1386 * This might actually run the qemu handlers right away,
1387 * so virtio in qemu must be completely setup when this is called.
1388 */
1389void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1390{
1391    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1392    int i, r;
1393
1394    for (i = 0; i < hdev->nvqs; ++i) {
1395        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1396                                         false);
1397        if (r < 0) {
1398            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1399        }
1400        assert (r >= 0);
1401        virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1402    }
1403    virtio_device_release_ioeventfd(vdev);
1404}
1405
1406/* Test and clear event pending status.
1407 * Should be called after unmask to avoid losing events.
1408 */
1409bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1410{
1411    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1412    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1413    return event_notifier_test_and_clear(&vq->masked_notifier);
1414}
1415
1416/* Mask/unmask events from this vq. */
1417void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1418                         bool mask)
1419{
1420    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1421    int r, index = n - hdev->vq_index;
1422    struct vhost_vring_file file;
1423
1424    /* should only be called after backend is connected */
1425    assert(hdev->vhost_ops);
1426
1427    if (mask) {
1428        assert(vdev->use_guest_notifier_mask);
1429        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1430    } else {
1431        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1432    }
1433
1434    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1435    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1436    if (r < 0) {
1437        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1438    }
1439}
1440
1441uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1442                            uint64_t features)
1443{
1444    const int *bit = feature_bits;
1445    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1446        uint64_t bit_mask = (1ULL << *bit);
1447        if (!(hdev->features & bit_mask)) {
1448            features &= ~bit_mask;
1449        }
1450        bit++;
1451    }
1452    return features;
1453}
1454
1455void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1456                        uint64_t features)
1457{
1458    const int *bit = feature_bits;
1459    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1460        uint64_t bit_mask = (1ULL << *bit);
1461        if (features & bit_mask) {
1462            hdev->acked_features |= bit_mask;
1463        }
1464        bit++;
1465    }
1466}
1467
1468int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1469                         uint32_t config_len)
1470{
1471    assert(hdev->vhost_ops);
1472
1473    if (hdev->vhost_ops->vhost_get_config) {
1474        return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1475    }
1476
1477    return -1;
1478}
1479
1480int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1481                         uint32_t offset, uint32_t size, uint32_t flags)
1482{
1483    assert(hdev->vhost_ops);
1484
1485    if (hdev->vhost_ops->vhost_set_config) {
1486        return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1487                                                 size, flags);
1488    }
1489
1490    return -1;
1491}
1492
1493void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1494                                   const VhostDevConfigOps *ops)
1495{
1496    hdev->config_ops = ops;
1497}
1498
1499void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1500{
1501    if (inflight->addr) {
1502        qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1503        inflight->addr = NULL;
1504        inflight->fd = -1;
1505    }
1506}
1507
1508static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1509                                     uint64_t new_size)
1510{
1511    Error *err = NULL;
1512    int fd = -1;
1513    void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1514                                  F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1515                                  &fd, &err);
1516
1517    if (err) {
1518        error_report_err(err);
1519        return -1;
1520    }
1521
1522    vhost_dev_free_inflight(inflight);
1523    inflight->offset = 0;
1524    inflight->addr = addr;
1525    inflight->fd = fd;
1526    inflight->size = new_size;
1527
1528    return 0;
1529}
1530
1531void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1532{
1533    if (inflight->addr) {
1534        qemu_put_be64(f, inflight->size);
1535        qemu_put_be16(f, inflight->queue_size);
1536        qemu_put_buffer(f, inflight->addr, inflight->size);
1537    } else {
1538        qemu_put_be64(f, 0);
1539    }
1540}
1541
1542int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1543{
1544    uint64_t size;
1545
1546    size = qemu_get_be64(f);
1547    if (!size) {
1548        return 0;
1549    }
1550
1551    if (inflight->size != size) {
1552        if (vhost_dev_resize_inflight(inflight, size)) {
1553            return -1;
1554        }
1555    }
1556    inflight->queue_size = qemu_get_be16(f);
1557
1558    qemu_get_buffer(f, inflight->addr, size);
1559
1560    return 0;
1561}
1562
1563int vhost_dev_set_inflight(struct vhost_dev *dev,
1564                           struct vhost_inflight *inflight)
1565{
1566    int r;
1567
1568    if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1569        r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1570        if (r) {
1571            VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
1572            return -errno;
1573        }
1574    }
1575
1576    return 0;
1577}
1578
1579int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1580                           struct vhost_inflight *inflight)
1581{
1582    int r;
1583
1584    if (dev->vhost_ops->vhost_get_inflight_fd) {
1585        r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1586        if (r) {
1587            VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
1588            return -errno;
1589        }
1590    }
1591
1592    return 0;
1593}
1594
1595/* Host notifiers must be enabled at this point. */
1596int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1597{
1598    int i, r;
1599
1600    /* should only be called after backend is connected */
1601    assert(hdev->vhost_ops);
1602
1603    hdev->started = true;
1604    hdev->vdev = vdev;
1605
1606    r = vhost_dev_set_features(hdev, hdev->log_enabled);
1607    if (r < 0) {
1608        goto fail_features;
1609    }
1610
1611    if (vhost_dev_has_iommu(hdev)) {
1612        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1613    }
1614
1615    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1616    if (r < 0) {
1617        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1618        r = -errno;
1619        goto fail_mem;
1620    }
1621    for (i = 0; i < hdev->nvqs; ++i) {
1622        r = vhost_virtqueue_start(hdev,
1623                                  vdev,
1624                                  hdev->vqs + i,
1625                                  hdev->vq_index + i);
1626        if (r < 0) {
1627            goto fail_vq;
1628        }
1629    }
1630
1631    if (hdev->log_enabled) {
1632        uint64_t log_base;
1633
1634        hdev->log_size = vhost_get_log_size(hdev);
1635        hdev->log = vhost_log_get(hdev->log_size,
1636                                  vhost_dev_log_is_shared(hdev));
1637        log_base = (uintptr_t)hdev->log->log;
1638        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1639                                                hdev->log_size ? log_base : 0,
1640                                                hdev->log);
1641        if (r < 0) {
1642            VHOST_OPS_DEBUG("vhost_set_log_base failed");
1643            r = -errno;
1644            goto fail_log;
1645        }
1646    }
1647
1648    if (vhost_dev_has_iommu(hdev)) {
1649        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1650
1651        /* Update used ring information for IOTLB to work correctly,
1652         * vhost-kernel code requires for this.*/
1653        for (i = 0; i < hdev->nvqs; ++i) {
1654            struct vhost_virtqueue *vq = hdev->vqs + i;
1655            vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1656        }
1657    }
1658    return 0;
1659fail_log:
1660    vhost_log_put(hdev, false);
1661fail_vq:
1662    while (--i >= 0) {
1663        vhost_virtqueue_stop(hdev,
1664                             vdev,
1665                             hdev->vqs + i,
1666                             hdev->vq_index + i);
1667    }
1668
1669fail_mem:
1670fail_features:
1671
1672    hdev->started = false;
1673    return r;
1674}
1675
1676/* Host notifiers must be enabled at this point. */
1677void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1678{
1679    int i;
1680
1681    /* should only be called after backend is connected */
1682    assert(hdev->vhost_ops);
1683
1684    for (i = 0; i < hdev->nvqs; ++i) {
1685        vhost_virtqueue_stop(hdev,
1686                             vdev,
1687                             hdev->vqs + i,
1688                             hdev->vq_index + i);
1689    }
1690
1691    if (vhost_dev_has_iommu(hdev)) {
1692        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1693        memory_listener_unregister(&hdev->iommu_listener);
1694    }
1695    vhost_log_put(hdev, true);
1696    hdev->started = false;
1697    hdev->vdev = NULL;
1698}
1699
1700int vhost_net_set_backend(struct vhost_dev *hdev,
1701                          struct vhost_vring_file *file)
1702{
1703    if (hdev->vhost_ops->vhost_net_set_backend) {
1704        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1705    }
1706
1707    return -1;
1708}
1709