qemu/hw/virtio/vhost.c
<<
>>
Prefs
   1/*
   2 * vhost support
   3 *
   4 * Copyright Red Hat, Inc. 2010
   5 *
   6 * Authors:
   7 *  Michael S. Tsirkin <mst@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "hw/virtio/vhost.h"
  19#include "hw/hw.h"
  20#include "qemu/atomic.h"
  21#include "qemu/range.h"
  22#include "qemu/error-report.h"
  23#include "qemu/memfd.h"
  24#include <linux/vhost.h>
  25#include "exec/address-spaces.h"
  26#include "hw/virtio/virtio-bus.h"
  27#include "hw/virtio/virtio-access.h"
  28#include "migration/blocker.h"
  29#include "sysemu/dma.h"
  30#include "trace.h"
  31
  32/* enabled until disconnected backend stabilizes */
  33#define _VHOST_DEBUG 1
  34
  35#ifdef _VHOST_DEBUG
  36#define VHOST_OPS_DEBUG(fmt, ...) \
  37    do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
  38                      strerror(errno), errno); } while (0)
  39#else
  40#define VHOST_OPS_DEBUG(fmt, ...) \
  41    do { } while (0)
  42#endif
  43
  44static struct vhost_log *vhost_log;
  45static struct vhost_log *vhost_log_shm;
  46
  47static unsigned int used_memslots;
  48static QLIST_HEAD(, vhost_dev) vhost_devices =
  49    QLIST_HEAD_INITIALIZER(vhost_devices);
  50
  51bool vhost_has_free_slot(void)
  52{
  53    unsigned int slots_limit = ~0U;
  54    struct vhost_dev *hdev;
  55
  56    QLIST_FOREACH(hdev, &vhost_devices, entry) {
  57        unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
  58        slots_limit = MIN(slots_limit, r);
  59    }
  60    return slots_limit > used_memslots;
  61}
  62
  63static void vhost_dev_sync_region(struct vhost_dev *dev,
  64                                  MemoryRegionSection *section,
  65                                  uint64_t mfirst, uint64_t mlast,
  66                                  uint64_t rfirst, uint64_t rlast)
  67{
  68    vhost_log_chunk_t *log = dev->log->log;
  69
  70    uint64_t start = MAX(mfirst, rfirst);
  71    uint64_t end = MIN(mlast, rlast);
  72    vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
  73    vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
  74    uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
  75
  76    if (end < start) {
  77        return;
  78    }
  79    assert(end / VHOST_LOG_CHUNK < dev->log_size);
  80    assert(start / VHOST_LOG_CHUNK < dev->log_size);
  81
  82    for (;from < to; ++from) {
  83        vhost_log_chunk_t log;
  84        /* We first check with non-atomic: much cheaper,
  85         * and we expect non-dirty to be the common case. */
  86        if (!*from) {
  87            addr += VHOST_LOG_CHUNK;
  88            continue;
  89        }
  90        /* Data must be read atomically. We don't really need barrier semantics
  91         * but it's easier to use atomic_* than roll our own. */
  92        log = atomic_xchg(from, 0);
  93        while (log) {
  94            int bit = ctzl(log);
  95            hwaddr page_addr;
  96            hwaddr section_offset;
  97            hwaddr mr_offset;
  98            page_addr = addr + bit * VHOST_LOG_PAGE;
  99            section_offset = page_addr - section->offset_within_address_space;
 100            mr_offset = section_offset + section->offset_within_region;
 101            memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
 102            log &= ~(0x1ull << bit);
 103        }
 104        addr += VHOST_LOG_CHUNK;
 105    }
 106}
 107
 108static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
 109                                   MemoryRegionSection *section,
 110                                   hwaddr first,
 111                                   hwaddr last)
 112{
 113    int i;
 114    hwaddr start_addr;
 115    hwaddr end_addr;
 116
 117    if (!dev->log_enabled || !dev->started) {
 118        return 0;
 119    }
 120    start_addr = section->offset_within_address_space;
 121    end_addr = range_get_last(start_addr, int128_get64(section->size));
 122    start_addr = MAX(first, start_addr);
 123    end_addr = MIN(last, end_addr);
 124
 125    for (i = 0; i < dev->mem->nregions; ++i) {
 126        struct vhost_memory_region *reg = dev->mem->regions + i;
 127        vhost_dev_sync_region(dev, section, start_addr, end_addr,
 128                              reg->guest_phys_addr,
 129                              range_get_last(reg->guest_phys_addr,
 130                                             reg->memory_size));
 131    }
 132    for (i = 0; i < dev->nvqs; ++i) {
 133        struct vhost_virtqueue *vq = dev->vqs + i;
 134        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
 135                              range_get_last(vq->used_phys, vq->used_size));
 136    }
 137    return 0;
 138}
 139
 140static void vhost_log_sync(MemoryListener *listener,
 141                          MemoryRegionSection *section)
 142{
 143    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 144                                         memory_listener);
 145    vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
 146}
 147
 148static void vhost_log_sync_range(struct vhost_dev *dev,
 149                                 hwaddr first, hwaddr last)
 150{
 151    int i;
 152    /* FIXME: this is N^2 in number of sections */
 153    for (i = 0; i < dev->n_mem_sections; ++i) {
 154        MemoryRegionSection *section = &dev->mem_sections[i];
 155        vhost_sync_dirty_bitmap(dev, section, first, last);
 156    }
 157}
 158
 159static uint64_t vhost_get_log_size(struct vhost_dev *dev)
 160{
 161    uint64_t log_size = 0;
 162    int i;
 163    for (i = 0; i < dev->mem->nregions; ++i) {
 164        struct vhost_memory_region *reg = dev->mem->regions + i;
 165        uint64_t last = range_get_last(reg->guest_phys_addr,
 166                                       reg->memory_size);
 167        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 168    }
 169    for (i = 0; i < dev->nvqs; ++i) {
 170        struct vhost_virtqueue *vq = dev->vqs + i;
 171        uint64_t last = vq->used_phys + vq->used_size - 1;
 172        log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
 173    }
 174    return log_size;
 175}
 176
 177static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
 178{
 179    Error *err = NULL;
 180    struct vhost_log *log;
 181    uint64_t logsize = size * sizeof(*(log->log));
 182    int fd = -1;
 183
 184    log = g_new0(struct vhost_log, 1);
 185    if (share) {
 186        log->log = qemu_memfd_alloc("vhost-log", logsize,
 187                                    F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
 188                                    &fd, &err);
 189        if (err) {
 190            error_report_err(err);
 191            g_free(log);
 192            return NULL;
 193        }
 194        memset(log->log, 0, logsize);
 195    } else {
 196        log->log = g_malloc0(logsize);
 197    }
 198
 199    log->size = size;
 200    log->refcnt = 1;
 201    log->fd = fd;
 202
 203    return log;
 204}
 205
 206static struct vhost_log *vhost_log_get(uint64_t size, bool share)
 207{
 208    struct vhost_log *log = share ? vhost_log_shm : vhost_log;
 209
 210    if (!log || log->size != size) {
 211        log = vhost_log_alloc(size, share);
 212        if (share) {
 213            vhost_log_shm = log;
 214        } else {
 215            vhost_log = log;
 216        }
 217    } else {
 218        ++log->refcnt;
 219    }
 220
 221    return log;
 222}
 223
 224static void vhost_log_put(struct vhost_dev *dev, bool sync)
 225{
 226    struct vhost_log *log = dev->log;
 227
 228    if (!log) {
 229        return;
 230    }
 231
 232    --log->refcnt;
 233    if (log->refcnt == 0) {
 234        /* Sync only the range covered by the old log */
 235        if (dev->log_size && sync) {
 236            vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
 237        }
 238
 239        if (vhost_log == log) {
 240            g_free(log->log);
 241            vhost_log = NULL;
 242        } else if (vhost_log_shm == log) {
 243            qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
 244                            log->fd);
 245            vhost_log_shm = NULL;
 246        }
 247
 248        g_free(log);
 249    }
 250
 251    dev->log = NULL;
 252    dev->log_size = 0;
 253}
 254
 255static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
 256{
 257    return dev->vhost_ops->vhost_requires_shm_log &&
 258           dev->vhost_ops->vhost_requires_shm_log(dev);
 259}
 260
 261static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
 262{
 263    struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
 264    uint64_t log_base = (uintptr_t)log->log;
 265    int r;
 266
 267    /* inform backend of log switching, this must be done before
 268       releasing the current log, to ensure no logging is lost */
 269    r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
 270    if (r < 0) {
 271        VHOST_OPS_DEBUG("vhost_set_log_base failed");
 272    }
 273
 274    vhost_log_put(dev, true);
 275    dev->log = log;
 276    dev->log_size = size;
 277}
 278
 279static int vhost_dev_has_iommu(struct vhost_dev *dev)
 280{
 281    VirtIODevice *vdev = dev->vdev;
 282
 283    return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
 284}
 285
 286static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
 287                              hwaddr *plen, int is_write)
 288{
 289    if (!vhost_dev_has_iommu(dev)) {
 290        return cpu_physical_memory_map(addr, plen, is_write);
 291    } else {
 292        return (void *)(uintptr_t)addr;
 293    }
 294}
 295
 296static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
 297                               hwaddr len, int is_write,
 298                               hwaddr access_len)
 299{
 300    if (!vhost_dev_has_iommu(dev)) {
 301        cpu_physical_memory_unmap(buffer, len, is_write, access_len);
 302    }
 303}
 304
 305static int vhost_verify_ring_part_mapping(void *ring_hva,
 306                                          uint64_t ring_gpa,
 307                                          uint64_t ring_size,
 308                                          void *reg_hva,
 309                                          uint64_t reg_gpa,
 310                                          uint64_t reg_size)
 311{
 312    uint64_t hva_ring_offset;
 313    uint64_t ring_last = range_get_last(ring_gpa, ring_size);
 314    uint64_t reg_last = range_get_last(reg_gpa, reg_size);
 315
 316    if (ring_last < reg_gpa || ring_gpa > reg_last) {
 317        return 0;
 318    }
 319    /* check that whole ring's is mapped */
 320    if (ring_last > reg_last) {
 321        return -ENOMEM;
 322    }
 323    /* check that ring's MemoryRegion wasn't replaced */
 324    hva_ring_offset = ring_gpa - reg_gpa;
 325    if (ring_hva != reg_hva + hva_ring_offset) {
 326        return -EBUSY;
 327    }
 328
 329    return 0;
 330}
 331
 332static int vhost_verify_ring_mappings(struct vhost_dev *dev,
 333                                      void *reg_hva,
 334                                      uint64_t reg_gpa,
 335                                      uint64_t reg_size)
 336{
 337    int i, j;
 338    int r = 0;
 339    const char *part_name[] = {
 340        "descriptor table",
 341        "available ring",
 342        "used ring"
 343    };
 344
 345    if (vhost_dev_has_iommu(dev)) {
 346        return 0;
 347    }
 348
 349    for (i = 0; i < dev->nvqs; ++i) {
 350        struct vhost_virtqueue *vq = dev->vqs + i;
 351
 352        if (vq->desc_phys == 0) {
 353            continue;
 354        }
 355
 356        j = 0;
 357        r = vhost_verify_ring_part_mapping(
 358                vq->desc, vq->desc_phys, vq->desc_size,
 359                reg_hva, reg_gpa, reg_size);
 360        if (r) {
 361            break;
 362        }
 363
 364        j++;
 365        r = vhost_verify_ring_part_mapping(
 366                vq->avail, vq->avail_phys, vq->avail_size,
 367                reg_hva, reg_gpa, reg_size);
 368        if (r) {
 369            break;
 370        }
 371
 372        j++;
 373        r = vhost_verify_ring_part_mapping(
 374                vq->used, vq->used_phys, vq->used_size,
 375                reg_hva, reg_gpa, reg_size);
 376        if (r) {
 377            break;
 378        }
 379    }
 380
 381    if (r == -ENOMEM) {
 382        error_report("Unable to map %s for ring %d", part_name[j], i);
 383    } else if (r == -EBUSY) {
 384        error_report("%s relocated for ring %d", part_name[j], i);
 385    }
 386    return r;
 387}
 388
 389static bool vhost_section(MemoryRegionSection *section)
 390{
 391    bool result;
 392    bool log_dirty = memory_region_get_dirty_log_mask(section->mr) &
 393                     ~(1 << DIRTY_MEMORY_MIGRATION);
 394    result = memory_region_is_ram(section->mr) &&
 395        !memory_region_is_rom(section->mr);
 396
 397    /* Vhost doesn't handle any block which is doing dirty-tracking other
 398     * than migration; this typically fires on VGA areas.
 399     */
 400    result &= !log_dirty;
 401
 402    trace_vhost_section(section->mr->name, result);
 403    return result;
 404}
 405
 406static void vhost_begin(MemoryListener *listener)
 407{
 408    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 409                                         memory_listener);
 410    dev->tmp_sections = NULL;
 411    dev->n_tmp_sections = 0;
 412}
 413
 414static void vhost_commit(MemoryListener *listener)
 415{
 416    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 417                                         memory_listener);
 418    MemoryRegionSection *old_sections;
 419    int n_old_sections;
 420    uint64_t log_size;
 421    size_t regions_size;
 422    int r;
 423    int i;
 424    bool changed = false;
 425
 426    /* Note we can be called before the device is started, but then
 427     * starting the device calls set_mem_table, so we need to have
 428     * built the data structures.
 429     */
 430    old_sections = dev->mem_sections;
 431    n_old_sections = dev->n_mem_sections;
 432    dev->mem_sections = dev->tmp_sections;
 433    dev->n_mem_sections = dev->n_tmp_sections;
 434
 435    if (dev->n_mem_sections != n_old_sections) {
 436        changed = true;
 437    } else {
 438        /* Same size, lets check the contents */
 439        changed = n_old_sections && memcmp(dev->mem_sections, old_sections,
 440                         n_old_sections * sizeof(old_sections[0])) != 0;
 441    }
 442
 443    trace_vhost_commit(dev->started, changed);
 444    if (!changed) {
 445        goto out;
 446    }
 447
 448    /* Rebuild the regions list from the new sections list */
 449    regions_size = offsetof(struct vhost_memory, regions) +
 450                       dev->n_mem_sections * sizeof dev->mem->regions[0];
 451    dev->mem = g_realloc(dev->mem, regions_size);
 452    dev->mem->nregions = dev->n_mem_sections;
 453    used_memslots = dev->mem->nregions;
 454    for (i = 0; i < dev->n_mem_sections; i++) {
 455        struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
 456        struct MemoryRegionSection *mrs = dev->mem_sections + i;
 457
 458        cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
 459        cur_vmr->memory_size     = int128_get64(mrs->size);
 460        cur_vmr->userspace_addr  =
 461            (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
 462            mrs->offset_within_region;
 463        cur_vmr->flags_padding   = 0;
 464    }
 465
 466    if (!dev->started) {
 467        goto out;
 468    }
 469
 470    for (i = 0; i < dev->mem->nregions; i++) {
 471        if (vhost_verify_ring_mappings(dev,
 472                       (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
 473                       dev->mem->regions[i].guest_phys_addr,
 474                       dev->mem->regions[i].memory_size)) {
 475            error_report("Verify ring failure on region %d", i);
 476            abort();
 477        }
 478    }
 479
 480    if (!dev->log_enabled) {
 481        r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 482        if (r < 0) {
 483            VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 484        }
 485        goto out;
 486    }
 487    log_size = vhost_get_log_size(dev);
 488    /* We allocate an extra 4K bytes to log,
 489     * to reduce the * number of reallocations. */
 490#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
 491    /* To log more, must increase log size before table update. */
 492    if (dev->log_size < log_size) {
 493        vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
 494    }
 495    r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
 496    if (r < 0) {
 497        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
 498    }
 499    /* To log less, can only decrease log size after table update. */
 500    if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
 501        vhost_dev_log_resize(dev, log_size);
 502    }
 503
 504out:
 505    /* Deref the old list of sections, this must happen _after_ the
 506     * vhost_set_mem_table to ensure the client isn't still using the
 507     * section we're about to unref.
 508     */
 509    while (n_old_sections--) {
 510        memory_region_unref(old_sections[n_old_sections].mr);
 511    }
 512    g_free(old_sections);
 513    return;
 514}
 515
 516/* Adds the section data to the tmp_section structure.
 517 * It relies on the listener calling us in memory address order
 518 * and for each region (via the _add and _nop methods) to
 519 * join neighbours.
 520 */
 521static void vhost_region_add_section(struct vhost_dev *dev,
 522                                     MemoryRegionSection *section)
 523{
 524    bool need_add = true;
 525    uint64_t mrs_size = int128_get64(section->size);
 526    uint64_t mrs_gpa = section->offset_within_address_space;
 527    uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
 528                         section->offset_within_region;
 529    RAMBlock *mrs_rb = section->mr->ram_block;
 530    size_t mrs_page = qemu_ram_pagesize(mrs_rb);
 531
 532    trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
 533                                   mrs_host);
 534
 535    /* Round the section to it's page size */
 536    /* First align the start down to a page boundary */
 537    uint64_t alignage = mrs_host & (mrs_page - 1);
 538    if (alignage) {
 539        mrs_host -= alignage;
 540        mrs_size += alignage;
 541        mrs_gpa  -= alignage;
 542    }
 543    /* Now align the size up to a page boundary */
 544    alignage = mrs_size & (mrs_page - 1);
 545    if (alignage) {
 546        mrs_size += mrs_page - alignage;
 547    }
 548    trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, mrs_size,
 549                                           mrs_host);
 550
 551    if (dev->n_tmp_sections) {
 552        /* Since we already have at least one section, lets see if
 553         * this extends it; since we're scanning in order, we only
 554         * have to look at the last one, and the FlatView that calls
 555         * us shouldn't have overlaps.
 556         */
 557        MemoryRegionSection *prev_sec = dev->tmp_sections +
 558                                               (dev->n_tmp_sections - 1);
 559        uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
 560        uint64_t prev_size = int128_get64(prev_sec->size);
 561        uint64_t prev_gpa_end   = range_get_last(prev_gpa_start, prev_size);
 562        uint64_t prev_host_start =
 563                        (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
 564                        prev_sec->offset_within_region;
 565        uint64_t prev_host_end   = range_get_last(prev_host_start, prev_size);
 566
 567        if (mrs_gpa <= (prev_gpa_end + 1)) {
 568            /* OK, looks like overlapping/intersecting - it's possible that
 569             * the rounding to page sizes has made them overlap, but they should
 570             * match up in the same RAMBlock if they do.
 571             */
 572            if (mrs_gpa < prev_gpa_start) {
 573                error_report("%s:Section rounded to %"PRIx64
 574                             " prior to previous %"PRIx64,
 575                             __func__, mrs_gpa, prev_gpa_start);
 576                /* A way to cleanly fail here would be better */
 577                return;
 578            }
 579            /* Offset from the start of the previous GPA to this GPA */
 580            size_t offset = mrs_gpa - prev_gpa_start;
 581
 582            if (prev_host_start + offset == mrs_host &&
 583                section->mr == prev_sec->mr &&
 584                (!dev->vhost_ops->vhost_backend_can_merge ||
 585                 dev->vhost_ops->vhost_backend_can_merge(dev,
 586                    mrs_host, mrs_size,
 587                    prev_host_start, prev_size))) {
 588                uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
 589                need_add = false;
 590                prev_sec->offset_within_address_space =
 591                    MIN(prev_gpa_start, mrs_gpa);
 592                prev_sec->offset_within_region =
 593                    MIN(prev_host_start, mrs_host) -
 594                    (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
 595                prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
 596                                               mrs_host));
 597                trace_vhost_region_add_section_merge(section->mr->name,
 598                                        int128_get64(prev_sec->size),
 599                                        prev_sec->offset_within_address_space,
 600                                        prev_sec->offset_within_region);
 601            } else {
 602                /* adjoining regions are fine, but overlapping ones with
 603                 * different blocks/offsets shouldn't happen
 604                 */
 605                if (mrs_gpa != prev_gpa_end + 1) {
 606                    error_report("%s: Overlapping but not coherent sections "
 607                                 "at %"PRIx64,
 608                                 __func__, mrs_gpa);
 609                    return;
 610                }
 611            }
 612        }
 613    }
 614
 615    if (need_add) {
 616        ++dev->n_tmp_sections;
 617        dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
 618                                    dev->n_tmp_sections);
 619        dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
 620        /* The flatview isn't stable and we don't use it, making it NULL
 621         * means we can memcmp the list.
 622         */
 623        dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
 624        memory_region_ref(section->mr);
 625    }
 626}
 627
 628/* Used for both add and nop callbacks */
 629static void vhost_region_addnop(MemoryListener *listener,
 630                                MemoryRegionSection *section)
 631{
 632    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 633                                         memory_listener);
 634
 635    if (!vhost_section(section)) {
 636        return;
 637    }
 638    vhost_region_add_section(dev, section);
 639}
 640
 641static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
 642{
 643    struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
 644    struct vhost_dev *hdev = iommu->hdev;
 645    hwaddr iova = iotlb->iova + iommu->iommu_offset;
 646
 647    if (vhost_backend_invalidate_device_iotlb(hdev, iova,
 648                                              iotlb->addr_mask + 1)) {
 649        error_report("Fail to invalidate device iotlb");
 650    }
 651}
 652
 653static void vhost_iommu_region_add(MemoryListener *listener,
 654                                   MemoryRegionSection *section)
 655{
 656    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 657                                         iommu_listener);
 658    struct vhost_iommu *iommu;
 659    Int128 end;
 660
 661    if (!memory_region_is_iommu(section->mr)) {
 662        return;
 663    }
 664
 665    iommu = g_malloc0(sizeof(*iommu));
 666    end = int128_add(int128_make64(section->offset_within_region),
 667                     section->size);
 668    end = int128_sub(end, int128_one());
 669    iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
 670                        IOMMU_NOTIFIER_UNMAP,
 671                        section->offset_within_region,
 672                        int128_get64(end));
 673    iommu->mr = section->mr;
 674    iommu->iommu_offset = section->offset_within_address_space -
 675                          section->offset_within_region;
 676    iommu->hdev = dev;
 677    memory_region_register_iommu_notifier(section->mr, &iommu->n);
 678    QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
 679    /* TODO: can replay help performance here? */
 680}
 681
 682static void vhost_iommu_region_del(MemoryListener *listener,
 683                                   MemoryRegionSection *section)
 684{
 685    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 686                                         iommu_listener);
 687    struct vhost_iommu *iommu;
 688
 689    if (!memory_region_is_iommu(section->mr)) {
 690        return;
 691    }
 692
 693    QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
 694        if (iommu->mr == section->mr &&
 695            iommu->n.start == section->offset_within_region) {
 696            memory_region_unregister_iommu_notifier(iommu->mr,
 697                                                    &iommu->n);
 698            QLIST_REMOVE(iommu, iommu_next);
 699            g_free(iommu);
 700            break;
 701        }
 702    }
 703}
 704
 705static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
 706                                    struct vhost_virtqueue *vq,
 707                                    unsigned idx, bool enable_log)
 708{
 709    struct vhost_vring_addr addr = {
 710        .index = idx,
 711        .desc_user_addr = (uint64_t)(unsigned long)vq->desc,
 712        .avail_user_addr = (uint64_t)(unsigned long)vq->avail,
 713        .used_user_addr = (uint64_t)(unsigned long)vq->used,
 714        .log_guest_addr = vq->used_phys,
 715        .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0,
 716    };
 717    int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
 718    if (r < 0) {
 719        VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
 720        return -errno;
 721    }
 722    return 0;
 723}
 724
 725static int vhost_dev_set_features(struct vhost_dev *dev,
 726                                  bool enable_log)
 727{
 728    uint64_t features = dev->acked_features;
 729    int r;
 730    if (enable_log) {
 731        features |= 0x1ULL << VHOST_F_LOG_ALL;
 732    }
 733    r = dev->vhost_ops->vhost_set_features(dev, features);
 734    if (r < 0) {
 735        VHOST_OPS_DEBUG("vhost_set_features failed");
 736    }
 737    return r < 0 ? -errno : 0;
 738}
 739
 740static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
 741{
 742    int r, i, idx;
 743    r = vhost_dev_set_features(dev, enable_log);
 744    if (r < 0) {
 745        goto err_features;
 746    }
 747    for (i = 0; i < dev->nvqs; ++i) {
 748        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 749        r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 750                                     enable_log);
 751        if (r < 0) {
 752            goto err_vq;
 753        }
 754    }
 755    return 0;
 756err_vq:
 757    for (; i >= 0; --i) {
 758        idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
 759        vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
 760                                 dev->log_enabled);
 761    }
 762    vhost_dev_set_features(dev, dev->log_enabled);
 763err_features:
 764    return r;
 765}
 766
 767static int vhost_migration_log(MemoryListener *listener, int enable)
 768{
 769    struct vhost_dev *dev = container_of(listener, struct vhost_dev,
 770                                         memory_listener);
 771    int r;
 772    if (!!enable == dev->log_enabled) {
 773        return 0;
 774    }
 775    if (!dev->started) {
 776        dev->log_enabled = enable;
 777        return 0;
 778    }
 779    if (!enable) {
 780        r = vhost_dev_set_log(dev, false);
 781        if (r < 0) {
 782            return r;
 783        }
 784        vhost_log_put(dev, false);
 785    } else {
 786        vhost_dev_log_resize(dev, vhost_get_log_size(dev));
 787        r = vhost_dev_set_log(dev, true);
 788        if (r < 0) {
 789            return r;
 790        }
 791    }
 792    dev->log_enabled = enable;
 793    return 0;
 794}
 795
 796static void vhost_log_global_start(MemoryListener *listener)
 797{
 798    int r;
 799
 800    r = vhost_migration_log(listener, true);
 801    if (r < 0) {
 802        abort();
 803    }
 804}
 805
 806static void vhost_log_global_stop(MemoryListener *listener)
 807{
 808    int r;
 809
 810    r = vhost_migration_log(listener, false);
 811    if (r < 0) {
 812        abort();
 813    }
 814}
 815
 816static void vhost_log_start(MemoryListener *listener,
 817                            MemoryRegionSection *section,
 818                            int old, int new)
 819{
 820    /* FIXME: implement */
 821}
 822
 823static void vhost_log_stop(MemoryListener *listener,
 824                           MemoryRegionSection *section,
 825                           int old, int new)
 826{
 827    /* FIXME: implement */
 828}
 829
 830/* The vhost driver natively knows how to handle the vrings of non
 831 * cross-endian legacy devices and modern devices. Only legacy devices
 832 * exposed to a bi-endian guest may require the vhost driver to use a
 833 * specific endianness.
 834 */
 835static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
 836{
 837    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
 838        return false;
 839    }
 840#ifdef HOST_WORDS_BIGENDIAN
 841    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
 842#else
 843    return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
 844#endif
 845}
 846
 847static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
 848                                                   bool is_big_endian,
 849                                                   int vhost_vq_index)
 850{
 851    struct vhost_vring_state s = {
 852        .index = vhost_vq_index,
 853        .num = is_big_endian
 854    };
 855
 856    if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
 857        return 0;
 858    }
 859
 860    VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
 861    if (errno == ENOTTY) {
 862        error_report("vhost does not support cross-endian");
 863        return -ENOSYS;
 864    }
 865
 866    return -errno;
 867}
 868
 869static int vhost_memory_region_lookup(struct vhost_dev *hdev,
 870                                      uint64_t gpa, uint64_t *uaddr,
 871                                      uint64_t *len)
 872{
 873    int i;
 874
 875    for (i = 0; i < hdev->mem->nregions; i++) {
 876        struct vhost_memory_region *reg = hdev->mem->regions + i;
 877
 878        if (gpa >= reg->guest_phys_addr &&
 879            reg->guest_phys_addr + reg->memory_size > gpa) {
 880            *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
 881            *len = reg->guest_phys_addr + reg->memory_size - gpa;
 882            return 0;
 883        }
 884    }
 885
 886    return -EFAULT;
 887}
 888
 889int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
 890{
 891    IOMMUTLBEntry iotlb;
 892    uint64_t uaddr, len;
 893    int ret = -EFAULT;
 894
 895    rcu_read_lock();
 896
 897    iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
 898                                          iova, write);
 899    if (iotlb.target_as != NULL) {
 900        ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
 901                                         &uaddr, &len);
 902        if (ret) {
 903            error_report("Fail to lookup the translated address "
 904                         "%"PRIx64, iotlb.translated_addr);
 905            goto out;
 906        }
 907
 908        len = MIN(iotlb.addr_mask + 1, len);
 909        iova = iova & ~iotlb.addr_mask;
 910
 911        ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
 912                                                len, iotlb.perm);
 913        if (ret) {
 914            error_report("Fail to update device iotlb");
 915            goto out;
 916        }
 917    }
 918out:
 919    rcu_read_unlock();
 920
 921    return ret;
 922}
 923
 924static int vhost_virtqueue_start(struct vhost_dev *dev,
 925                                struct VirtIODevice *vdev,
 926                                struct vhost_virtqueue *vq,
 927                                unsigned idx)
 928{
 929    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
 930    VirtioBusState *vbus = VIRTIO_BUS(qbus);
 931    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
 932    hwaddr s, l, a;
 933    int r;
 934    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
 935    struct vhost_vring_file file = {
 936        .index = vhost_vq_index
 937    };
 938    struct vhost_vring_state state = {
 939        .index = vhost_vq_index
 940    };
 941    struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
 942
 943    a = virtio_queue_get_desc_addr(vdev, idx);
 944    if (a == 0) {
 945        /* Queue might not be ready for start */
 946        return 0;
 947    }
 948
 949    vq->num = state.num = virtio_queue_get_num(vdev, idx);
 950    r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
 951    if (r) {
 952        VHOST_OPS_DEBUG("vhost_set_vring_num failed");
 953        return -errno;
 954    }
 955
 956    state.num = virtio_queue_get_last_avail_idx(vdev, idx);
 957    r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
 958    if (r) {
 959        VHOST_OPS_DEBUG("vhost_set_vring_base failed");
 960        return -errno;
 961    }
 962
 963    if (vhost_needs_vring_endian(vdev)) {
 964        r = vhost_virtqueue_set_vring_endian_legacy(dev,
 965                                                    virtio_is_big_endian(vdev),
 966                                                    vhost_vq_index);
 967        if (r) {
 968            return -errno;
 969        }
 970    }
 971
 972    vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
 973    vq->desc_phys = a;
 974    vq->desc = vhost_memory_map(dev, a, &l, 0);
 975    if (!vq->desc || l != s) {
 976        r = -ENOMEM;
 977        goto fail_alloc_desc;
 978    }
 979    vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
 980    vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
 981    vq->avail = vhost_memory_map(dev, a, &l, 0);
 982    if (!vq->avail || l != s) {
 983        r = -ENOMEM;
 984        goto fail_alloc_avail;
 985    }
 986    vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
 987    vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
 988    vq->used = vhost_memory_map(dev, a, &l, 1);
 989    if (!vq->used || l != s) {
 990        r = -ENOMEM;
 991        goto fail_alloc_used;
 992    }
 993
 994    r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
 995    if (r < 0) {
 996        r = -errno;
 997        goto fail_alloc;
 998    }
 999
1000    file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1001    r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1002    if (r) {
1003        VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
1004        r = -errno;
1005        goto fail_kick;
1006    }
1007
1008    /* Clear and discard previous events if any. */
1009    event_notifier_test_and_clear(&vq->masked_notifier);
1010
1011    /* Init vring in unmasked state, unless guest_notifier_mask
1012     * will do it later.
1013     */
1014    if (!vdev->use_guest_notifier_mask) {
1015        /* TODO: check and handle errors. */
1016        vhost_virtqueue_mask(dev, vdev, idx, false);
1017    }
1018
1019    if (k->query_guest_notifiers &&
1020        k->query_guest_notifiers(qbus->parent) &&
1021        virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1022        file.fd = -1;
1023        r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1024        if (r) {
1025            goto fail_vector;
1026        }
1027    }
1028
1029    return 0;
1030
1031fail_vector:
1032fail_kick:
1033fail_alloc:
1034    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1035                       0, 0);
1036fail_alloc_used:
1037    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1038                       0, 0);
1039fail_alloc_avail:
1040    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1041                       0, 0);
1042fail_alloc_desc:
1043    return r;
1044}
1045
1046static void vhost_virtqueue_stop(struct vhost_dev *dev,
1047                                    struct VirtIODevice *vdev,
1048                                    struct vhost_virtqueue *vq,
1049                                    unsigned idx)
1050{
1051    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1052    struct vhost_vring_state state = {
1053        .index = vhost_vq_index,
1054    };
1055    int r;
1056    int a;
1057
1058    a = virtio_queue_get_desc_addr(vdev, idx);
1059    if (a == 0) {
1060        /* Don't stop the virtqueue which might have not been started */
1061        return;
1062    }
1063
1064    r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1065    if (r < 0) {
1066        VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r);
1067        /* Connection to the backend is broken, so let's sync internal
1068         * last avail idx to the device used idx.
1069         */
1070        virtio_queue_restore_last_avail_idx(vdev, idx);
1071    } else {
1072        virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1073    }
1074    virtio_queue_invalidate_signalled_used(vdev, idx);
1075    virtio_queue_update_used_idx(vdev, idx);
1076
1077    /* In the cross-endian case, we need to reset the vring endianness to
1078     * native as legacy devices expect so by default.
1079     */
1080    if (vhost_needs_vring_endian(vdev)) {
1081        vhost_virtqueue_set_vring_endian_legacy(dev,
1082                                                !virtio_is_big_endian(vdev),
1083                                                vhost_vq_index);
1084    }
1085
1086    vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1087                       1, virtio_queue_get_used_size(vdev, idx));
1088    vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1089                       0, virtio_queue_get_avail_size(vdev, idx));
1090    vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1091                       0, virtio_queue_get_desc_size(vdev, idx));
1092}
1093
1094static void vhost_eventfd_add(MemoryListener *listener,
1095                              MemoryRegionSection *section,
1096                              bool match_data, uint64_t data, EventNotifier *e)
1097{
1098}
1099
1100static void vhost_eventfd_del(MemoryListener *listener,
1101                              MemoryRegionSection *section,
1102                              bool match_data, uint64_t data, EventNotifier *e)
1103{
1104}
1105
1106static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1107                                                int n, uint32_t timeout)
1108{
1109    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1110    struct vhost_vring_state state = {
1111        .index = vhost_vq_index,
1112        .num = timeout,
1113    };
1114    int r;
1115
1116    if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1117        return -EINVAL;
1118    }
1119
1120    r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1121    if (r) {
1122        VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
1123        return r;
1124    }
1125
1126    return 0;
1127}
1128
1129static int vhost_virtqueue_init(struct vhost_dev *dev,
1130                                struct vhost_virtqueue *vq, int n)
1131{
1132    int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1133    struct vhost_vring_file file = {
1134        .index = vhost_vq_index,
1135    };
1136    int r = event_notifier_init(&vq->masked_notifier, 0);
1137    if (r < 0) {
1138        return r;
1139    }
1140
1141    file.fd = event_notifier_get_fd(&vq->masked_notifier);
1142    r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1143    if (r) {
1144        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1145        r = -errno;
1146        goto fail_call;
1147    }
1148
1149    vq->dev = dev;
1150
1151    return 0;
1152fail_call:
1153    event_notifier_cleanup(&vq->masked_notifier);
1154    return r;
1155}
1156
1157static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1158{
1159    event_notifier_cleanup(&vq->masked_notifier);
1160}
1161
1162int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1163                   VhostBackendType backend_type, uint32_t busyloop_timeout)
1164{
1165    uint64_t features;
1166    int i, r, n_initialized_vqs = 0;
1167    Error *local_err = NULL;
1168
1169    hdev->vdev = NULL;
1170    hdev->migration_blocker = NULL;
1171
1172    r = vhost_set_backend_type(hdev, backend_type);
1173    assert(r >= 0);
1174
1175    r = hdev->vhost_ops->vhost_backend_init(hdev, opaque);
1176    if (r < 0) {
1177        goto fail;
1178    }
1179
1180    r = hdev->vhost_ops->vhost_set_owner(hdev);
1181    if (r < 0) {
1182        VHOST_OPS_DEBUG("vhost_set_owner failed");
1183        goto fail;
1184    }
1185
1186    r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1187    if (r < 0) {
1188        VHOST_OPS_DEBUG("vhost_get_features failed");
1189        goto fail;
1190    }
1191
1192    for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1193        r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1194        if (r < 0) {
1195            goto fail;
1196        }
1197    }
1198
1199    if (busyloop_timeout) {
1200        for (i = 0; i < hdev->nvqs; ++i) {
1201            r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1202                                                     busyloop_timeout);
1203            if (r < 0) {
1204                goto fail_busyloop;
1205            }
1206        }
1207    }
1208
1209    hdev->features = features;
1210
1211    hdev->memory_listener = (MemoryListener) {
1212        .begin = vhost_begin,
1213        .commit = vhost_commit,
1214        .region_add = vhost_region_addnop,
1215        .region_nop = vhost_region_addnop,
1216        .log_start = vhost_log_start,
1217        .log_stop = vhost_log_stop,
1218        .log_sync = vhost_log_sync,
1219        .log_global_start = vhost_log_global_start,
1220        .log_global_stop = vhost_log_global_stop,
1221        .eventfd_add = vhost_eventfd_add,
1222        .eventfd_del = vhost_eventfd_del,
1223        .priority = 10
1224    };
1225
1226    hdev->iommu_listener = (MemoryListener) {
1227        .region_add = vhost_iommu_region_add,
1228        .region_del = vhost_iommu_region_del,
1229    };
1230
1231    if (hdev->migration_blocker == NULL) {
1232        if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1233            error_setg(&hdev->migration_blocker,
1234                       "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1235        } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1236            error_setg(&hdev->migration_blocker,
1237                       "Migration disabled: failed to allocate shared memory");
1238        }
1239    }
1240
1241    if (hdev->migration_blocker != NULL) {
1242        r = migrate_add_blocker(hdev->migration_blocker, &local_err);
1243        if (local_err) {
1244            error_report_err(local_err);
1245            error_free(hdev->migration_blocker);
1246            goto fail_busyloop;
1247        }
1248    }
1249
1250    hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1251    hdev->n_mem_sections = 0;
1252    hdev->mem_sections = NULL;
1253    hdev->log = NULL;
1254    hdev->log_size = 0;
1255    hdev->log_enabled = false;
1256    hdev->started = false;
1257    memory_listener_register(&hdev->memory_listener, &address_space_memory);
1258    QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1259
1260    if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1261        error_report("vhost backend memory slots limit is less"
1262                " than current number of present memory slots");
1263        r = -1;
1264        if (busyloop_timeout) {
1265            goto fail_busyloop;
1266        } else {
1267            goto fail;
1268        }
1269    }
1270
1271    return 0;
1272
1273fail_busyloop:
1274    while (--i >= 0) {
1275        vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1276    }
1277fail:
1278    hdev->nvqs = n_initialized_vqs;
1279    vhost_dev_cleanup(hdev);
1280    return r;
1281}
1282
1283void vhost_dev_cleanup(struct vhost_dev *hdev)
1284{
1285    int i;
1286
1287    for (i = 0; i < hdev->nvqs; ++i) {
1288        vhost_virtqueue_cleanup(hdev->vqs + i);
1289    }
1290    if (hdev->mem) {
1291        /* those are only safe after successful init */
1292        memory_listener_unregister(&hdev->memory_listener);
1293        QLIST_REMOVE(hdev, entry);
1294    }
1295    if (hdev->migration_blocker) {
1296        migrate_del_blocker(hdev->migration_blocker);
1297        error_free(hdev->migration_blocker);
1298    }
1299    g_free(hdev->mem);
1300    g_free(hdev->mem_sections);
1301    if (hdev->vhost_ops) {
1302        hdev->vhost_ops->vhost_backend_cleanup(hdev);
1303    }
1304    assert(!hdev->log);
1305
1306    memset(hdev, 0, sizeof(struct vhost_dev));
1307}
1308
1309/* Stop processing guest IO notifications in qemu.
1310 * Start processing them in vhost in kernel.
1311 */
1312int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1313{
1314    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1315    int i, r, e;
1316
1317    /* We will pass the notifiers to the kernel, make sure that QEMU
1318     * doesn't interfere.
1319     */
1320    r = virtio_device_grab_ioeventfd(vdev);
1321    if (r < 0) {
1322        error_report("binding does not support host notifiers");
1323        goto fail;
1324    }
1325
1326    for (i = 0; i < hdev->nvqs; ++i) {
1327        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1328                                         true);
1329        if (r < 0) {
1330            error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1331            goto fail_vq;
1332        }
1333    }
1334
1335    return 0;
1336fail_vq:
1337    while (--i >= 0) {
1338        e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1339                                         false);
1340        if (e < 0) {
1341            error_report("vhost VQ %d notifier cleanup error: %d", i, -r);
1342        }
1343        assert (e >= 0);
1344        virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1345    }
1346    virtio_device_release_ioeventfd(vdev);
1347fail:
1348    return r;
1349}
1350
1351/* Stop processing guest IO notifications in vhost.
1352 * Start processing them in qemu.
1353 * This might actually run the qemu handlers right away,
1354 * so virtio in qemu must be completely setup when this is called.
1355 */
1356void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1357{
1358    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1359    int i, r;
1360
1361    for (i = 0; i < hdev->nvqs; ++i) {
1362        r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1363                                         false);
1364        if (r < 0) {
1365            error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1366        }
1367        assert (r >= 0);
1368        virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1369    }
1370    virtio_device_release_ioeventfd(vdev);
1371}
1372
1373/* Test and clear event pending status.
1374 * Should be called after unmask to avoid losing events.
1375 */
1376bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1377{
1378    struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1379    assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1380    return event_notifier_test_and_clear(&vq->masked_notifier);
1381}
1382
1383/* Mask/unmask events from this vq. */
1384void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1385                         bool mask)
1386{
1387    struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1388    int r, index = n - hdev->vq_index;
1389    struct vhost_vring_file file;
1390
1391    /* should only be called after backend is connected */
1392    assert(hdev->vhost_ops);
1393
1394    if (mask) {
1395        assert(vdev->use_guest_notifier_mask);
1396        file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier);
1397    } else {
1398        file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
1399    }
1400
1401    file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1402    r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1403    if (r < 0) {
1404        VHOST_OPS_DEBUG("vhost_set_vring_call failed");
1405    }
1406}
1407
1408uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1409                            uint64_t features)
1410{
1411    const int *bit = feature_bits;
1412    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1413        uint64_t bit_mask = (1ULL << *bit);
1414        if (!(hdev->features & bit_mask)) {
1415            features &= ~bit_mask;
1416        }
1417        bit++;
1418    }
1419    return features;
1420}
1421
1422void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1423                        uint64_t features)
1424{
1425    const int *bit = feature_bits;
1426    while (*bit != VHOST_INVALID_FEATURE_BIT) {
1427        uint64_t bit_mask = (1ULL << *bit);
1428        if (features & bit_mask) {
1429            hdev->acked_features |= bit_mask;
1430        }
1431        bit++;
1432    }
1433}
1434
1435int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1436                         uint32_t config_len)
1437{
1438    assert(hdev->vhost_ops);
1439
1440    if (hdev->vhost_ops->vhost_get_config) {
1441        return hdev->vhost_ops->vhost_get_config(hdev, config, config_len);
1442    }
1443
1444    return -1;
1445}
1446
1447int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1448                         uint32_t offset, uint32_t size, uint32_t flags)
1449{
1450    assert(hdev->vhost_ops);
1451
1452    if (hdev->vhost_ops->vhost_set_config) {
1453        return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1454                                                 size, flags);
1455    }
1456
1457    return -1;
1458}
1459
1460void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1461                                   const VhostDevConfigOps *ops)
1462{
1463    hdev->config_ops = ops;
1464}
1465
1466/* Host notifiers must be enabled at this point. */
1467int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
1468{
1469    int i, r;
1470
1471    /* should only be called after backend is connected */
1472    assert(hdev->vhost_ops);
1473
1474    hdev->started = true;
1475    hdev->vdev = vdev;
1476
1477    r = vhost_dev_set_features(hdev, hdev->log_enabled);
1478    if (r < 0) {
1479        goto fail_features;
1480    }
1481
1482    if (vhost_dev_has_iommu(hdev)) {
1483        memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1484    }
1485
1486    r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1487    if (r < 0) {
1488        VHOST_OPS_DEBUG("vhost_set_mem_table failed");
1489        r = -errno;
1490        goto fail_mem;
1491    }
1492    for (i = 0; i < hdev->nvqs; ++i) {
1493        r = vhost_virtqueue_start(hdev,
1494                                  vdev,
1495                                  hdev->vqs + i,
1496                                  hdev->vq_index + i);
1497        if (r < 0) {
1498            goto fail_vq;
1499        }
1500    }
1501
1502    if (hdev->log_enabled) {
1503        uint64_t log_base;
1504
1505        hdev->log_size = vhost_get_log_size(hdev);
1506        hdev->log = vhost_log_get(hdev->log_size,
1507                                  vhost_dev_log_is_shared(hdev));
1508        log_base = (uintptr_t)hdev->log->log;
1509        r = hdev->vhost_ops->vhost_set_log_base(hdev,
1510                                                hdev->log_size ? log_base : 0,
1511                                                hdev->log);
1512        if (r < 0) {
1513            VHOST_OPS_DEBUG("vhost_set_log_base failed");
1514            r = -errno;
1515            goto fail_log;
1516        }
1517    }
1518
1519    if (vhost_dev_has_iommu(hdev)) {
1520        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
1521
1522        /* Update used ring information for IOTLB to work correctly,
1523         * vhost-kernel code requires for this.*/
1524        for (i = 0; i < hdev->nvqs; ++i) {
1525            struct vhost_virtqueue *vq = hdev->vqs + i;
1526            vhost_device_iotlb_miss(hdev, vq->used_phys, true);
1527        }
1528    }
1529    return 0;
1530fail_log:
1531    vhost_log_put(hdev, false);
1532fail_vq:
1533    while (--i >= 0) {
1534        vhost_virtqueue_stop(hdev,
1535                             vdev,
1536                             hdev->vqs + i,
1537                             hdev->vq_index + i);
1538    }
1539    i = hdev->nvqs;
1540
1541fail_mem:
1542fail_features:
1543
1544    hdev->started = false;
1545    return r;
1546}
1547
1548/* Host notifiers must be enabled at this point. */
1549void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
1550{
1551    int i;
1552
1553    /* should only be called after backend is connected */
1554    assert(hdev->vhost_ops);
1555
1556    for (i = 0; i < hdev->nvqs; ++i) {
1557        vhost_virtqueue_stop(hdev,
1558                             vdev,
1559                             hdev->vqs + i,
1560                             hdev->vq_index + i);
1561    }
1562
1563    if (vhost_dev_has_iommu(hdev)) {
1564        hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
1565        memory_listener_unregister(&hdev->iommu_listener);
1566    }
1567    vhost_log_put(hdev, true);
1568    hdev->started = false;
1569    hdev->vdev = NULL;
1570}
1571
1572int vhost_net_set_backend(struct vhost_dev *hdev,
1573                          struct vhost_vring_file *file)
1574{
1575    if (hdev->vhost_ops->vhost_net_set_backend) {
1576        return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
1577    }
1578
1579    return -1;
1580}
1581