qemu/accel/kvm/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18
  19#include <linux/kvm.h>
  20
  21#include "qemu/atomic.h"
  22#include "qemu/option.h"
  23#include "qemu/config-file.h"
  24#include "qemu/error-report.h"
  25#include "qapi/error.h"
  26#include "hw/hw.h"
  27#include "hw/pci/msi.h"
  28#include "hw/pci/msix.h"
  29#include "hw/s390x/adapter.h"
  30#include "exec/gdbstub.h"
  31#include "sysemu/kvm_int.h"
  32#include "sysemu/cpus.h"
  33#include "qemu/bswap.h"
  34#include "exec/memory.h"
  35#include "exec/ram_addr.h"
  36#include "exec/address-spaces.h"
  37#include "qemu/event_notifier.h"
  38#include "trace.h"
  39#include "hw/irq.h"
  40#include "sysemu/sev.h"
  41#include "sysemu/balloon.h"
  42
  43#include "hw/boards.h"
  44
  45/* This check must be after config-host.h is included */
  46#ifdef CONFIG_EVENTFD
  47#include <sys/eventfd.h>
  48#endif
  49
  50/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  51 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  52 */
  53#define PAGE_SIZE getpagesize()
  54
  55//#define DEBUG_KVM
  56
  57#ifdef DEBUG_KVM
  58#define DPRINTF(fmt, ...) \
  59    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  60#else
  61#define DPRINTF(fmt, ...) \
  62    do { } while (0)
  63#endif
  64
  65#define KVM_MSI_HASHTAB_SIZE    256
  66
  67struct KVMParkedVcpu {
  68    unsigned long vcpu_id;
  69    int kvm_fd;
  70    QLIST_ENTRY(KVMParkedVcpu) node;
  71};
  72
  73struct KVMState
  74{
  75    AccelState parent_obj;
  76
  77    int nr_slots;
  78    int fd;
  79    int vmfd;
  80    int coalesced_mmio;
  81    int coalesced_pio;
  82    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  83    bool coalesced_flush_in_progress;
  84    int vcpu_events;
  85    int robust_singlestep;
  86    int debugregs;
  87#ifdef KVM_CAP_SET_GUEST_DEBUG
  88    QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
  89#endif
  90    int max_nested_state_len;
  91    int many_ioeventfds;
  92    int intx_set_mask;
  93    bool sync_mmu;
  94    bool manual_dirty_log_protect;
  95    /* The man page (and posix) say ioctl numbers are signed int, but
  96     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
  97     * unsigned, and treating them as signed here can break things */
  98    unsigned irq_set_ioctl;
  99    unsigned int sigmask_len;
 100    GHashTable *gsimap;
 101#ifdef KVM_CAP_IRQ_ROUTING
 102    struct kvm_irq_routing *irq_routes;
 103    int nr_allocated_irq_routes;
 104    unsigned long *used_gsi_bitmap;
 105    unsigned int gsi_count;
 106    QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 107#endif
 108    KVMMemoryListener memory_listener;
 109    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 110
 111    /* memory encryption */
 112    void *memcrypt_handle;
 113    int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
 114
 115    /* For "info mtree -f" to tell if an MR is registered in KVM */
 116    int nr_as;
 117    struct KVMAs {
 118        KVMMemoryListener *ml;
 119        AddressSpace *as;
 120    } *as;
 121};
 122
 123KVMState *kvm_state;
 124bool kvm_kernel_irqchip;
 125bool kvm_split_irqchip;
 126bool kvm_async_interrupts_allowed;
 127bool kvm_halt_in_kernel_allowed;
 128bool kvm_eventfds_allowed;
 129bool kvm_irqfds_allowed;
 130bool kvm_resamplefds_allowed;
 131bool kvm_msi_via_irqfd_allowed;
 132bool kvm_gsi_routing_allowed;
 133bool kvm_gsi_direct_mapping;
 134bool kvm_allowed;
 135bool kvm_readonly_mem_allowed;
 136bool kvm_vm_attributes_allowed;
 137bool kvm_direct_msi_allowed;
 138bool kvm_ioeventfd_any_length_allowed;
 139bool kvm_msi_use_devid;
 140static bool kvm_immediate_exit;
 141
 142static const KVMCapabilityInfo kvm_required_capabilites[] = {
 143    KVM_CAP_INFO(USER_MEMORY),
 144    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 145    KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
 146    KVM_CAP_LAST_INFO
 147};
 148
 149#define kvm_slots_lock(kml)      qemu_mutex_lock(&(kml)->slots_lock)
 150#define kvm_slots_unlock(kml)    qemu_mutex_unlock(&(kml)->slots_lock)
 151
 152int kvm_get_max_memslots(void)
 153{
 154    KVMState *s = KVM_STATE(current_machine->accelerator);
 155
 156    return s->nr_slots;
 157}
 158
 159bool kvm_memcrypt_enabled(void)
 160{
 161    if (kvm_state && kvm_state->memcrypt_handle) {
 162        return true;
 163    }
 164
 165    return false;
 166}
 167
 168int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
 169{
 170    if (kvm_state->memcrypt_handle &&
 171        kvm_state->memcrypt_encrypt_data) {
 172        return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
 173                                              ptr, len);
 174    }
 175
 176    return 1;
 177}
 178
 179/* Called with KVMMemoryListener.slots_lock held */
 180static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 181{
 182    KVMState *s = kvm_state;
 183    int i;
 184
 185    for (i = 0; i < s->nr_slots; i++) {
 186        if (kml->slots[i].memory_size == 0) {
 187            return &kml->slots[i];
 188        }
 189    }
 190
 191    return NULL;
 192}
 193
 194bool kvm_has_free_slot(MachineState *ms)
 195{
 196    KVMState *s = KVM_STATE(ms->accelerator);
 197    bool result;
 198    KVMMemoryListener *kml = &s->memory_listener;
 199
 200    kvm_slots_lock(kml);
 201    result = !!kvm_get_free_slot(kml);
 202    kvm_slots_unlock(kml);
 203
 204    return result;
 205}
 206
 207/* Called with KVMMemoryListener.slots_lock held */
 208static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 209{
 210    KVMSlot *slot = kvm_get_free_slot(kml);
 211
 212    if (slot) {
 213        return slot;
 214    }
 215
 216    fprintf(stderr, "%s: no free slot available\n", __func__);
 217    abort();
 218}
 219
 220static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 221                                         hwaddr start_addr,
 222                                         hwaddr size)
 223{
 224    KVMState *s = kvm_state;
 225    int i;
 226
 227    for (i = 0; i < s->nr_slots; i++) {
 228        KVMSlot *mem = &kml->slots[i];
 229
 230        if (start_addr == mem->start_addr && size == mem->memory_size) {
 231            return mem;
 232        }
 233    }
 234
 235    return NULL;
 236}
 237
 238/*
 239 * Calculate and align the start address and the size of the section.
 240 * Return the size. If the size is 0, the aligned section is empty.
 241 */
 242static hwaddr kvm_align_section(MemoryRegionSection *section,
 243                                hwaddr *start)
 244{
 245    hwaddr size = int128_get64(section->size);
 246    hwaddr delta, aligned;
 247
 248    /* kvm works in page size chunks, but the function may be called
 249       with sub-page size and unaligned start address. Pad the start
 250       address to next and truncate size to previous page boundary. */
 251    aligned = ROUND_UP(section->offset_within_address_space,
 252                       qemu_real_host_page_size);
 253    delta = aligned - section->offset_within_address_space;
 254    *start = aligned;
 255    if (delta > size) {
 256        return 0;
 257    }
 258
 259    return (size - delta) & qemu_real_host_page_mask;
 260}
 261
 262int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 263                                       hwaddr *phys_addr)
 264{
 265    KVMMemoryListener *kml = &s->memory_listener;
 266    int i, ret = 0;
 267
 268    kvm_slots_lock(kml);
 269    for (i = 0; i < s->nr_slots; i++) {
 270        KVMSlot *mem = &kml->slots[i];
 271
 272        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 273            *phys_addr = mem->start_addr + (ram - mem->ram);
 274            ret = 1;
 275            break;
 276        }
 277    }
 278    kvm_slots_unlock(kml);
 279
 280    return ret;
 281}
 282
 283static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
 284{
 285    KVMState *s = kvm_state;
 286    struct kvm_userspace_memory_region mem;
 287    int ret;
 288
 289    mem.slot = slot->slot | (kml->as_id << 16);
 290    mem.guest_phys_addr = slot->start_addr;
 291    mem.userspace_addr = (unsigned long)slot->ram;
 292    mem.flags = slot->flags;
 293
 294    if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
 295        /* Set the slot size to 0 before setting the slot to the desired
 296         * value. This is needed based on KVM commit 75d61fbc. */
 297        mem.memory_size = 0;
 298        kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 299    }
 300    mem.memory_size = slot->memory_size;
 301    ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 302    slot->old_flags = mem.flags;
 303    trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
 304                              mem.memory_size, mem.userspace_addr, ret);
 305    return ret;
 306}
 307
 308int kvm_destroy_vcpu(CPUState *cpu)
 309{
 310    KVMState *s = kvm_state;
 311    long mmap_size;
 312    struct KVMParkedVcpu *vcpu = NULL;
 313    int ret = 0;
 314
 315    DPRINTF("kvm_destroy_vcpu\n");
 316
 317    ret = kvm_arch_destroy_vcpu(cpu);
 318    if (ret < 0) {
 319        goto err;
 320    }
 321
 322    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 323    if (mmap_size < 0) {
 324        ret = mmap_size;
 325        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 326        goto err;
 327    }
 328
 329    ret = munmap(cpu->kvm_run, mmap_size);
 330    if (ret < 0) {
 331        goto err;
 332    }
 333
 334    vcpu = g_malloc0(sizeof(*vcpu));
 335    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 336    vcpu->kvm_fd = cpu->kvm_fd;
 337    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 338err:
 339    return ret;
 340}
 341
 342static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 343{
 344    struct KVMParkedVcpu *cpu;
 345
 346    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 347        if (cpu->vcpu_id == vcpu_id) {
 348            int kvm_fd;
 349
 350            QLIST_REMOVE(cpu, node);
 351            kvm_fd = cpu->kvm_fd;
 352            g_free(cpu);
 353            return kvm_fd;
 354        }
 355    }
 356
 357    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 358}
 359
 360int kvm_init_vcpu(CPUState *cpu)
 361{
 362    KVMState *s = kvm_state;
 363    long mmap_size;
 364    int ret;
 365
 366    DPRINTF("kvm_init_vcpu\n");
 367
 368    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 369    if (ret < 0) {
 370        DPRINTF("kvm_create_vcpu failed\n");
 371        goto err;
 372    }
 373
 374    cpu->kvm_fd = ret;
 375    cpu->kvm_state = s;
 376    cpu->vcpu_dirty = true;
 377
 378    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 379    if (mmap_size < 0) {
 380        ret = mmap_size;
 381        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 382        goto err;
 383    }
 384
 385    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 386                        cpu->kvm_fd, 0);
 387    if (cpu->kvm_run == MAP_FAILED) {
 388        ret = -errno;
 389        DPRINTF("mmap'ing vcpu state failed\n");
 390        goto err;
 391    }
 392
 393    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 394        s->coalesced_mmio_ring =
 395            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 396    }
 397
 398    ret = kvm_arch_init_vcpu(cpu);
 399err:
 400    return ret;
 401}
 402
 403/*
 404 * dirty pages logging control
 405 */
 406
 407static int kvm_mem_flags(MemoryRegion *mr)
 408{
 409    bool readonly = mr->readonly || memory_region_is_romd(mr);
 410    int flags = 0;
 411
 412    if (memory_region_get_dirty_log_mask(mr) != 0) {
 413        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 414    }
 415    if (readonly && kvm_readonly_mem_allowed) {
 416        flags |= KVM_MEM_READONLY;
 417    }
 418    return flags;
 419}
 420
 421/* Called with KVMMemoryListener.slots_lock held */
 422static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 423                                 MemoryRegion *mr)
 424{
 425    mem->flags = kvm_mem_flags(mr);
 426
 427    /* If nothing changed effectively, no need to issue ioctl */
 428    if (mem->flags == mem->old_flags) {
 429        return 0;
 430    }
 431
 432    return kvm_set_user_memory_region(kml, mem, false);
 433}
 434
 435static int kvm_section_update_flags(KVMMemoryListener *kml,
 436                                    MemoryRegionSection *section)
 437{
 438    hwaddr start_addr, size;
 439    KVMSlot *mem;
 440    int ret = 0;
 441
 442    size = kvm_align_section(section, &start_addr);
 443    if (!size) {
 444        return 0;
 445    }
 446
 447    kvm_slots_lock(kml);
 448
 449    mem = kvm_lookup_matching_slot(kml, start_addr, size);
 450    if (!mem) {
 451        /* We don't have a slot if we want to trap every access. */
 452        goto out;
 453    }
 454
 455    ret = kvm_slot_update_flags(kml, mem, section->mr);
 456
 457out:
 458    kvm_slots_unlock(kml);
 459    return ret;
 460}
 461
 462static void kvm_log_start(MemoryListener *listener,
 463                          MemoryRegionSection *section,
 464                          int old, int new)
 465{
 466    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 467    int r;
 468
 469    if (old != 0) {
 470        return;
 471    }
 472
 473    r = kvm_section_update_flags(kml, section);
 474    if (r < 0) {
 475        abort();
 476    }
 477}
 478
 479static void kvm_log_stop(MemoryListener *listener,
 480                          MemoryRegionSection *section,
 481                          int old, int new)
 482{
 483    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 484    int r;
 485
 486    if (new != 0) {
 487        return;
 488    }
 489
 490    r = kvm_section_update_flags(kml, section);
 491    if (r < 0) {
 492        abort();
 493    }
 494}
 495
 496/* get kvm's dirty pages bitmap and update qemu's */
 497static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
 498                                         unsigned long *bitmap)
 499{
 500    ram_addr_t start = section->offset_within_region +
 501                       memory_region_get_ram_addr(section->mr);
 502    ram_addr_t pages = int128_get64(section->size) / getpagesize();
 503
 504    cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
 505    return 0;
 506}
 507
 508#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 509
 510/**
 511 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
 512 *
 513 * This function will first try to fetch dirty bitmap from the kernel,
 514 * and then updates qemu's dirty bitmap.
 515 *
 516 * NOTE: caller must be with kml->slots_lock held.
 517 *
 518 * @kml: the KVM memory listener object
 519 * @section: the memory section to sync the dirty bitmap with
 520 */
 521static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 522                                          MemoryRegionSection *section)
 523{
 524    KVMState *s = kvm_state;
 525    struct kvm_dirty_log d = {};
 526    KVMSlot *mem;
 527    hwaddr start_addr, size;
 528    int ret = 0;
 529
 530    size = kvm_align_section(section, &start_addr);
 531    if (size) {
 532        mem = kvm_lookup_matching_slot(kml, start_addr, size);
 533        if (!mem) {
 534            /* We don't have a slot if we want to trap every access. */
 535            goto out;
 536        }
 537
 538        /* XXX bad kernel interface alert
 539         * For dirty bitmap, kernel allocates array of size aligned to
 540         * bits-per-long.  But for case when the kernel is 64bits and
 541         * the userspace is 32bits, userspace can't align to the same
 542         * bits-per-long, since sizeof(long) is different between kernel
 543         * and user space.  This way, userspace will provide buffer which
 544         * may be 4 bytes less than the kernel will use, resulting in
 545         * userspace memory corruption (which is not detectable by valgrind
 546         * too, in most cases).
 547         * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 548         * a hope that sizeof(long) won't become >8 any time soon.
 549         */
 550        size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
 551                     /*HOST_LONG_BITS*/ 64) / 8;
 552        if (!mem->dirty_bmap) {
 553            /* Allocate on the first log_sync, once and for all */
 554            mem->dirty_bmap = g_malloc0(size);
 555        }
 556
 557        d.dirty_bitmap = mem->dirty_bmap;
 558        d.slot = mem->slot | (kml->as_id << 16);
 559        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
 560            DPRINTF("ioctl failed %d\n", errno);
 561            ret = -1;
 562            goto out;
 563        }
 564
 565        kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
 566    }
 567out:
 568    return ret;
 569}
 570
 571/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
 572#define KVM_CLEAR_LOG_SHIFT  6
 573#define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
 574#define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
 575
 576/**
 577 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
 578 *
 579 * NOTE: this will be a no-op if we haven't enabled manual dirty log
 580 * protection in the host kernel because in that case this operation
 581 * will be done within log_sync().
 582 *
 583 * @kml:     the kvm memory listener
 584 * @section: the memory range to clear dirty bitmap
 585 */
 586static int kvm_physical_log_clear(KVMMemoryListener *kml,
 587                                  MemoryRegionSection *section)
 588{
 589    KVMState *s = kvm_state;
 590    struct kvm_clear_dirty_log d;
 591    uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
 592    unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
 593    KVMSlot *mem = NULL;
 594    int ret, i;
 595
 596    if (!s->manual_dirty_log_protect) {
 597        /* No need to do explicit clear */
 598        return 0;
 599    }
 600
 601    start = section->offset_within_address_space;
 602    size = int128_get64(section->size);
 603
 604    if (!size) {
 605        /* Nothing more we can do... */
 606        return 0;
 607    }
 608
 609    kvm_slots_lock(kml);
 610
 611    /* Find any possible slot that covers the section */
 612    for (i = 0; i < s->nr_slots; i++) {
 613        mem = &kml->slots[i];
 614        if (mem->start_addr <= start &&
 615            start + size <= mem->start_addr + mem->memory_size) {
 616            break;
 617        }
 618    }
 619
 620    /*
 621     * We should always find one memslot until this point, otherwise
 622     * there could be something wrong from the upper layer
 623     */
 624    assert(mem && i != s->nr_slots);
 625
 626    /*
 627     * We need to extend either the start or the size or both to
 628     * satisfy the KVM interface requirement.  Firstly, do the start
 629     * page alignment on 64 host pages
 630     */
 631    bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
 632    start_delta = start - mem->start_addr - bmap_start;
 633    bmap_start /= psize;
 634
 635    /*
 636     * The kernel interface has restriction on the size too, that either:
 637     *
 638     * (1) the size is 64 host pages aligned (just like the start), or
 639     * (2) the size fills up until the end of the KVM memslot.
 640     */
 641    bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
 642        << KVM_CLEAR_LOG_SHIFT;
 643    end = mem->memory_size / psize;
 644    if (bmap_npages > end - bmap_start) {
 645        bmap_npages = end - bmap_start;
 646    }
 647    start_delta /= psize;
 648
 649    /*
 650     * Prepare the bitmap to clear dirty bits.  Here we must guarantee
 651     * that we won't clear any unknown dirty bits otherwise we might
 652     * accidentally clear some set bits which are not yet synced from
 653     * the kernel into QEMU's bitmap, then we'll lose track of the
 654     * guest modifications upon those pages (which can directly lead
 655     * to guest data loss or panic after migration).
 656     *
 657     * Layout of the KVMSlot.dirty_bmap:
 658     *
 659     *                   |<-------- bmap_npages -----------..>|
 660     *                                                     [1]
 661     *                     start_delta         size
 662     *  |----------------|-------------|------------------|------------|
 663     *  ^                ^             ^                               ^
 664     *  |                |             |                               |
 665     * start          bmap_start     (start)                         end
 666     * of memslot                                             of memslot
 667     *
 668     * [1] bmap_npages can be aligned to either 64 pages or the end of slot
 669     */
 670
 671    assert(bmap_start % BITS_PER_LONG == 0);
 672    /* We should never do log_clear before log_sync */
 673    assert(mem->dirty_bmap);
 674    if (start_delta) {
 675        /* Slow path - we need to manipulate a temp bitmap */
 676        bmap_clear = bitmap_new(bmap_npages);
 677        bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
 678                                    bmap_start, start_delta + size / psize);
 679        /*
 680         * We need to fill the holes at start because that was not
 681         * specified by the caller and we extended the bitmap only for
 682         * 64 pages alignment
 683         */
 684        bitmap_clear(bmap_clear, 0, start_delta);
 685        d.dirty_bitmap = bmap_clear;
 686    } else {
 687        /* Fast path - start address aligns well with BITS_PER_LONG */
 688        d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
 689    }
 690
 691    d.first_page = bmap_start;
 692    /* It should never overflow.  If it happens, say something */
 693    assert(bmap_npages <= UINT32_MAX);
 694    d.num_pages = bmap_npages;
 695    d.slot = mem->slot | (kml->as_id << 16);
 696
 697    if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
 698        ret = -errno;
 699        error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
 700                     "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
 701                     __func__, d.slot, (uint64_t)d.first_page,
 702                     (uint32_t)d.num_pages, ret);
 703    } else {
 704        ret = 0;
 705        trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
 706    }
 707
 708    /*
 709     * After we have updated the remote dirty bitmap, we update the
 710     * cached bitmap as well for the memslot, then if another user
 711     * clears the same region we know we shouldn't clear it again on
 712     * the remote otherwise it's data loss as well.
 713     */
 714    bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
 715                 size / psize);
 716    /* This handles the NULL case well */
 717    g_free(bmap_clear);
 718
 719    kvm_slots_unlock(kml);
 720
 721    return ret;
 722}
 723
 724static void kvm_coalesce_mmio_region(MemoryListener *listener,
 725                                     MemoryRegionSection *secion,
 726                                     hwaddr start, hwaddr size)
 727{
 728    KVMState *s = kvm_state;
 729
 730    if (s->coalesced_mmio) {
 731        struct kvm_coalesced_mmio_zone zone;
 732
 733        zone.addr = start;
 734        zone.size = size;
 735        zone.pad = 0;
 736
 737        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 738    }
 739}
 740
 741static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
 742                                       MemoryRegionSection *secion,
 743                                       hwaddr start, hwaddr size)
 744{
 745    KVMState *s = kvm_state;
 746
 747    if (s->coalesced_mmio) {
 748        struct kvm_coalesced_mmio_zone zone;
 749
 750        zone.addr = start;
 751        zone.size = size;
 752        zone.pad = 0;
 753
 754        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 755    }
 756}
 757
 758static void kvm_coalesce_pio_add(MemoryListener *listener,
 759                                MemoryRegionSection *section,
 760                                hwaddr start, hwaddr size)
 761{
 762    KVMState *s = kvm_state;
 763
 764    if (s->coalesced_pio) {
 765        struct kvm_coalesced_mmio_zone zone;
 766
 767        zone.addr = start;
 768        zone.size = size;
 769        zone.pio = 1;
 770
 771        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 772    }
 773}
 774
 775static void kvm_coalesce_pio_del(MemoryListener *listener,
 776                                MemoryRegionSection *section,
 777                                hwaddr start, hwaddr size)
 778{
 779    KVMState *s = kvm_state;
 780
 781    if (s->coalesced_pio) {
 782        struct kvm_coalesced_mmio_zone zone;
 783
 784        zone.addr = start;
 785        zone.size = size;
 786        zone.pio = 1;
 787
 788        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 789     }
 790}
 791
 792static MemoryListener kvm_coalesced_pio_listener = {
 793    .coalesced_io_add = kvm_coalesce_pio_add,
 794    .coalesced_io_del = kvm_coalesce_pio_del,
 795};
 796
 797int kvm_check_extension(KVMState *s, unsigned int extension)
 798{
 799    int ret;
 800
 801    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 802    if (ret < 0) {
 803        ret = 0;
 804    }
 805
 806    return ret;
 807}
 808
 809int kvm_vm_check_extension(KVMState *s, unsigned int extension)
 810{
 811    int ret;
 812
 813    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 814    if (ret < 0) {
 815        /* VM wide version not implemented, use global one instead */
 816        ret = kvm_check_extension(s, extension);
 817    }
 818
 819    return ret;
 820}
 821
 822static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
 823{
 824#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
 825    /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
 826     * endianness, but the memory core hands them in target endianness.
 827     * For example, PPC is always treated as big-endian even if running
 828     * on KVM and on PPC64LE.  Correct here.
 829     */
 830    switch (size) {
 831    case 2:
 832        val = bswap16(val);
 833        break;
 834    case 4:
 835        val = bswap32(val);
 836        break;
 837    }
 838#endif
 839    return val;
 840}
 841
 842static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
 843                                  bool assign, uint32_t size, bool datamatch)
 844{
 845    int ret;
 846    struct kvm_ioeventfd iofd = {
 847        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 848        .addr = addr,
 849        .len = size,
 850        .flags = 0,
 851        .fd = fd,
 852    };
 853
 854    trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
 855                                 datamatch);
 856    if (!kvm_enabled()) {
 857        return -ENOSYS;
 858    }
 859
 860    if (datamatch) {
 861        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 862    }
 863    if (!assign) {
 864        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 865    }
 866
 867    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
 868
 869    if (ret < 0) {
 870        return -errno;
 871    }
 872
 873    return 0;
 874}
 875
 876static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
 877                                 bool assign, uint32_t size, bool datamatch)
 878{
 879    struct kvm_ioeventfd kick = {
 880        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 881        .addr = addr,
 882        .flags = KVM_IOEVENTFD_FLAG_PIO,
 883        .len = size,
 884        .fd = fd,
 885    };
 886    int r;
 887    trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
 888    if (!kvm_enabled()) {
 889        return -ENOSYS;
 890    }
 891    if (datamatch) {
 892        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 893    }
 894    if (!assign) {
 895        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 896    }
 897    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
 898    if (r < 0) {
 899        return r;
 900    }
 901    return 0;
 902}
 903
 904
 905static int kvm_check_many_ioeventfds(void)
 906{
 907    /* Userspace can use ioeventfd for io notification.  This requires a host
 908     * that supports eventfd(2) and an I/O thread; since eventfd does not
 909     * support SIGIO it cannot interrupt the vcpu.
 910     *
 911     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
 912     * can avoid creating too many ioeventfds.
 913     */
 914#if defined(CONFIG_EVENTFD)
 915    int ioeventfds[7];
 916    int i, ret = 0;
 917    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
 918        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
 919        if (ioeventfds[i] < 0) {
 920            break;
 921        }
 922        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
 923        if (ret < 0) {
 924            close(ioeventfds[i]);
 925            break;
 926        }
 927    }
 928
 929    /* Decide whether many devices are supported or not */
 930    ret = i == ARRAY_SIZE(ioeventfds);
 931
 932    while (i-- > 0) {
 933        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
 934        close(ioeventfds[i]);
 935    }
 936    return ret;
 937#else
 938    return 0;
 939#endif
 940}
 941
 942static const KVMCapabilityInfo *
 943kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
 944{
 945    while (list->name) {
 946        if (!kvm_check_extension(s, list->value)) {
 947            return list;
 948        }
 949        list++;
 950    }
 951    return NULL;
 952}
 953
 954static void kvm_set_phys_mem(KVMMemoryListener *kml,
 955                             MemoryRegionSection *section, bool add)
 956{
 957    KVMSlot *mem;
 958    int err;
 959    MemoryRegion *mr = section->mr;
 960    bool writeable = !mr->readonly && !mr->rom_device;
 961    hwaddr start_addr, size;
 962    void *ram;
 963
 964    if (!memory_region_is_ram(mr)) {
 965        if (writeable || !kvm_readonly_mem_allowed) {
 966            return;
 967        } else if (!mr->romd_mode) {
 968            /* If the memory device is not in romd_mode, then we actually want
 969             * to remove the kvm memory slot so all accesses will trap. */
 970            add = false;
 971        }
 972    }
 973
 974    size = kvm_align_section(section, &start_addr);
 975    if (!size) {
 976        return;
 977    }
 978
 979    /* use aligned delta to align the ram address */
 980    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
 981          (start_addr - section->offset_within_address_space);
 982
 983    kvm_slots_lock(kml);
 984
 985    if (!add) {
 986        mem = kvm_lookup_matching_slot(kml, start_addr, size);
 987        if (!mem) {
 988            goto out;
 989        }
 990        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
 991            kvm_physical_sync_dirty_bitmap(kml, section);
 992        }
 993
 994        /* unregister the slot */
 995        g_free(mem->dirty_bmap);
 996        mem->dirty_bmap = NULL;
 997        mem->memory_size = 0;
 998        mem->flags = 0;
 999        err = kvm_set_user_memory_region(kml, mem, false);
1000        if (err) {
1001            fprintf(stderr, "%s: error unregistering slot: %s\n",
1002                    __func__, strerror(-err));
1003            abort();
1004        }
1005        goto out;
1006    }
1007
1008    /* register the new slot */
1009    mem = kvm_alloc_slot(kml);
1010    mem->memory_size = size;
1011    mem->start_addr = start_addr;
1012    mem->ram = ram;
1013    mem->flags = kvm_mem_flags(mr);
1014
1015    err = kvm_set_user_memory_region(kml, mem, true);
1016    if (err) {
1017        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1018                strerror(-err));
1019        abort();
1020    }
1021
1022out:
1023    kvm_slots_unlock(kml);
1024}
1025
1026static void kvm_region_add(MemoryListener *listener,
1027                           MemoryRegionSection *section)
1028{
1029    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1030
1031    memory_region_ref(section->mr);
1032    kvm_set_phys_mem(kml, section, true);
1033}
1034
1035static void kvm_region_del(MemoryListener *listener,
1036                           MemoryRegionSection *section)
1037{
1038    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1039
1040    kvm_set_phys_mem(kml, section, false);
1041    memory_region_unref(section->mr);
1042}
1043
1044static void kvm_log_sync(MemoryListener *listener,
1045                         MemoryRegionSection *section)
1046{
1047    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1048    int r;
1049
1050    kvm_slots_lock(kml);
1051    r = kvm_physical_sync_dirty_bitmap(kml, section);
1052    kvm_slots_unlock(kml);
1053    if (r < 0) {
1054        abort();
1055    }
1056}
1057
1058static void kvm_log_clear(MemoryListener *listener,
1059                          MemoryRegionSection *section)
1060{
1061    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1062    int r;
1063
1064    r = kvm_physical_log_clear(kml, section);
1065    if (r < 0) {
1066        error_report_once("%s: kvm log clear failed: mr=%s "
1067                          "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1068                          section->mr->name, section->offset_within_region,
1069                          int128_get64(section->size));
1070        abort();
1071    }
1072}
1073
1074static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1075                                  MemoryRegionSection *section,
1076                                  bool match_data, uint64_t data,
1077                                  EventNotifier *e)
1078{
1079    int fd = event_notifier_get_fd(e);
1080    int r;
1081
1082    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1083                               data, true, int128_get64(section->size),
1084                               match_data);
1085    if (r < 0) {
1086        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1087                __func__, strerror(-r), -r);
1088        abort();
1089    }
1090}
1091
1092static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1093                                  MemoryRegionSection *section,
1094                                  bool match_data, uint64_t data,
1095                                  EventNotifier *e)
1096{
1097    int fd = event_notifier_get_fd(e);
1098    int r;
1099
1100    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1101                               data, false, int128_get64(section->size),
1102                               match_data);
1103    if (r < 0) {
1104        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1105                __func__, strerror(-r), -r);
1106        abort();
1107    }
1108}
1109
1110static void kvm_io_ioeventfd_add(MemoryListener *listener,
1111                                 MemoryRegionSection *section,
1112                                 bool match_data, uint64_t data,
1113                                 EventNotifier *e)
1114{
1115    int fd = event_notifier_get_fd(e);
1116    int r;
1117
1118    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1119                              data, true, int128_get64(section->size),
1120                              match_data);
1121    if (r < 0) {
1122        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1123                __func__, strerror(-r), -r);
1124        abort();
1125    }
1126}
1127
1128static void kvm_io_ioeventfd_del(MemoryListener *listener,
1129                                 MemoryRegionSection *section,
1130                                 bool match_data, uint64_t data,
1131                                 EventNotifier *e)
1132
1133{
1134    int fd = event_notifier_get_fd(e);
1135    int r;
1136
1137    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1138                              data, false, int128_get64(section->size),
1139                              match_data);
1140    if (r < 0) {
1141        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1142                __func__, strerror(-r), -r);
1143        abort();
1144    }
1145}
1146
1147void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1148                                  AddressSpace *as, int as_id)
1149{
1150    int i;
1151
1152    qemu_mutex_init(&kml->slots_lock);
1153    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1154    kml->as_id = as_id;
1155
1156    for (i = 0; i < s->nr_slots; i++) {
1157        kml->slots[i].slot = i;
1158    }
1159
1160    kml->listener.region_add = kvm_region_add;
1161    kml->listener.region_del = kvm_region_del;
1162    kml->listener.log_start = kvm_log_start;
1163    kml->listener.log_stop = kvm_log_stop;
1164    kml->listener.log_sync = kvm_log_sync;
1165    kml->listener.log_clear = kvm_log_clear;
1166    kml->listener.priority = 10;
1167
1168    memory_listener_register(&kml->listener, as);
1169
1170    for (i = 0; i < s->nr_as; ++i) {
1171        if (!s->as[i].as) {
1172            s->as[i].as = as;
1173            s->as[i].ml = kml;
1174            break;
1175        }
1176    }
1177}
1178
1179static MemoryListener kvm_io_listener = {
1180    .eventfd_add = kvm_io_ioeventfd_add,
1181    .eventfd_del = kvm_io_ioeventfd_del,
1182    .priority = 10,
1183};
1184
1185int kvm_set_irq(KVMState *s, int irq, int level)
1186{
1187    struct kvm_irq_level event;
1188    int ret;
1189
1190    assert(kvm_async_interrupts_enabled());
1191
1192    event.level = level;
1193    event.irq = irq;
1194    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1195    if (ret < 0) {
1196        perror("kvm_set_irq");
1197        abort();
1198    }
1199
1200    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1201}
1202
1203#ifdef KVM_CAP_IRQ_ROUTING
1204typedef struct KVMMSIRoute {
1205    struct kvm_irq_routing_entry kroute;
1206    QTAILQ_ENTRY(KVMMSIRoute) entry;
1207} KVMMSIRoute;
1208
1209static void set_gsi(KVMState *s, unsigned int gsi)
1210{
1211    set_bit(gsi, s->used_gsi_bitmap);
1212}
1213
1214static void clear_gsi(KVMState *s, unsigned int gsi)
1215{
1216    clear_bit(gsi, s->used_gsi_bitmap);
1217}
1218
1219void kvm_init_irq_routing(KVMState *s)
1220{
1221    int gsi_count, i;
1222
1223    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1224    if (gsi_count > 0) {
1225        /* Round up so we can search ints using ffs */
1226        s->used_gsi_bitmap = bitmap_new(gsi_count);
1227        s->gsi_count = gsi_count;
1228    }
1229
1230    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1231    s->nr_allocated_irq_routes = 0;
1232
1233    if (!kvm_direct_msi_allowed) {
1234        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1235            QTAILQ_INIT(&s->msi_hashtab[i]);
1236        }
1237    }
1238
1239    kvm_arch_init_irq_routing(s);
1240}
1241
1242void kvm_irqchip_commit_routes(KVMState *s)
1243{
1244    int ret;
1245
1246    if (kvm_gsi_direct_mapping()) {
1247        return;
1248    }
1249
1250    if (!kvm_gsi_routing_enabled()) {
1251        return;
1252    }
1253
1254    s->irq_routes->flags = 0;
1255    trace_kvm_irqchip_commit_routes();
1256    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1257    assert(ret == 0);
1258}
1259
1260static void kvm_add_routing_entry(KVMState *s,
1261                                  struct kvm_irq_routing_entry *entry)
1262{
1263    struct kvm_irq_routing_entry *new;
1264    int n, size;
1265
1266    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1267        n = s->nr_allocated_irq_routes * 2;
1268        if (n < 64) {
1269            n = 64;
1270        }
1271        size = sizeof(struct kvm_irq_routing);
1272        size += n * sizeof(*new);
1273        s->irq_routes = g_realloc(s->irq_routes, size);
1274        s->nr_allocated_irq_routes = n;
1275    }
1276    n = s->irq_routes->nr++;
1277    new = &s->irq_routes->entries[n];
1278
1279    *new = *entry;
1280
1281    set_gsi(s, entry->gsi);
1282}
1283
1284static int kvm_update_routing_entry(KVMState *s,
1285                                    struct kvm_irq_routing_entry *new_entry)
1286{
1287    struct kvm_irq_routing_entry *entry;
1288    int n;
1289
1290    for (n = 0; n < s->irq_routes->nr; n++) {
1291        entry = &s->irq_routes->entries[n];
1292        if (entry->gsi != new_entry->gsi) {
1293            continue;
1294        }
1295
1296        if(!memcmp(entry, new_entry, sizeof *entry)) {
1297            return 0;
1298        }
1299
1300        *entry = *new_entry;
1301
1302        return 0;
1303    }
1304
1305    return -ESRCH;
1306}
1307
1308void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1309{
1310    struct kvm_irq_routing_entry e = {};
1311
1312    assert(pin < s->gsi_count);
1313
1314    e.gsi = irq;
1315    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1316    e.flags = 0;
1317    e.u.irqchip.irqchip = irqchip;
1318    e.u.irqchip.pin = pin;
1319    kvm_add_routing_entry(s, &e);
1320}
1321
1322void kvm_irqchip_release_virq(KVMState *s, int virq)
1323{
1324    struct kvm_irq_routing_entry *e;
1325    int i;
1326
1327    if (kvm_gsi_direct_mapping()) {
1328        return;
1329    }
1330
1331    for (i = 0; i < s->irq_routes->nr; i++) {
1332        e = &s->irq_routes->entries[i];
1333        if (e->gsi == virq) {
1334            s->irq_routes->nr--;
1335            *e = s->irq_routes->entries[s->irq_routes->nr];
1336        }
1337    }
1338    clear_gsi(s, virq);
1339    kvm_arch_release_virq_post(virq);
1340    trace_kvm_irqchip_release_virq(virq);
1341}
1342
1343static unsigned int kvm_hash_msi(uint32_t data)
1344{
1345    /* This is optimized for IA32 MSI layout. However, no other arch shall
1346     * repeat the mistake of not providing a direct MSI injection API. */
1347    return data & 0xff;
1348}
1349
1350static void kvm_flush_dynamic_msi_routes(KVMState *s)
1351{
1352    KVMMSIRoute *route, *next;
1353    unsigned int hash;
1354
1355    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1356        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1357            kvm_irqchip_release_virq(s, route->kroute.gsi);
1358            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1359            g_free(route);
1360        }
1361    }
1362}
1363
1364static int kvm_irqchip_get_virq(KVMState *s)
1365{
1366    int next_virq;
1367
1368    /*
1369     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1370     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1371     * number can succeed even though a new route entry cannot be added.
1372     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1373     */
1374    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1375        kvm_flush_dynamic_msi_routes(s);
1376    }
1377
1378    /* Return the lowest unused GSI in the bitmap */
1379    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1380    if (next_virq >= s->gsi_count) {
1381        return -ENOSPC;
1382    } else {
1383        return next_virq;
1384    }
1385}
1386
1387static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1388{
1389    unsigned int hash = kvm_hash_msi(msg.data);
1390    KVMMSIRoute *route;
1391
1392    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1393        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1394            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1395            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1396            return route;
1397        }
1398    }
1399    return NULL;
1400}
1401
1402int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1403{
1404    struct kvm_msi msi;
1405    KVMMSIRoute *route;
1406
1407    if (kvm_direct_msi_allowed) {
1408        msi.address_lo = (uint32_t)msg.address;
1409        msi.address_hi = msg.address >> 32;
1410        msi.data = le32_to_cpu(msg.data);
1411        msi.flags = 0;
1412        memset(msi.pad, 0, sizeof(msi.pad));
1413
1414        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1415    }
1416
1417    route = kvm_lookup_msi_route(s, msg);
1418    if (!route) {
1419        int virq;
1420
1421        virq = kvm_irqchip_get_virq(s);
1422        if (virq < 0) {
1423            return virq;
1424        }
1425
1426        route = g_malloc0(sizeof(KVMMSIRoute));
1427        route->kroute.gsi = virq;
1428        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1429        route->kroute.flags = 0;
1430        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1431        route->kroute.u.msi.address_hi = msg.address >> 32;
1432        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1433
1434        kvm_add_routing_entry(s, &route->kroute);
1435        kvm_irqchip_commit_routes(s);
1436
1437        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1438                           entry);
1439    }
1440
1441    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1442
1443    return kvm_set_irq(s, route->kroute.gsi, 1);
1444}
1445
1446int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1447{
1448    struct kvm_irq_routing_entry kroute = {};
1449    int virq;
1450    MSIMessage msg = {0, 0};
1451
1452    if (pci_available && dev) {
1453        msg = pci_get_msi_message(dev, vector);
1454    }
1455
1456    if (kvm_gsi_direct_mapping()) {
1457        return kvm_arch_msi_data_to_gsi(msg.data);
1458    }
1459
1460    if (!kvm_gsi_routing_enabled()) {
1461        return -ENOSYS;
1462    }
1463
1464    virq = kvm_irqchip_get_virq(s);
1465    if (virq < 0) {
1466        return virq;
1467    }
1468
1469    kroute.gsi = virq;
1470    kroute.type = KVM_IRQ_ROUTING_MSI;
1471    kroute.flags = 0;
1472    kroute.u.msi.address_lo = (uint32_t)msg.address;
1473    kroute.u.msi.address_hi = msg.address >> 32;
1474    kroute.u.msi.data = le32_to_cpu(msg.data);
1475    if (pci_available && kvm_msi_devid_required()) {
1476        kroute.flags = KVM_MSI_VALID_DEVID;
1477        kroute.u.msi.devid = pci_requester_id(dev);
1478    }
1479    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1480        kvm_irqchip_release_virq(s, virq);
1481        return -EINVAL;
1482    }
1483
1484    trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1485                                    vector, virq);
1486
1487    kvm_add_routing_entry(s, &kroute);
1488    kvm_arch_add_msi_route_post(&kroute, vector, dev);
1489    kvm_irqchip_commit_routes(s);
1490
1491    return virq;
1492}
1493
1494int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1495                                 PCIDevice *dev)
1496{
1497    struct kvm_irq_routing_entry kroute = {};
1498
1499    if (kvm_gsi_direct_mapping()) {
1500        return 0;
1501    }
1502
1503    if (!kvm_irqchip_in_kernel()) {
1504        return -ENOSYS;
1505    }
1506
1507    kroute.gsi = virq;
1508    kroute.type = KVM_IRQ_ROUTING_MSI;
1509    kroute.flags = 0;
1510    kroute.u.msi.address_lo = (uint32_t)msg.address;
1511    kroute.u.msi.address_hi = msg.address >> 32;
1512    kroute.u.msi.data = le32_to_cpu(msg.data);
1513    if (pci_available && kvm_msi_devid_required()) {
1514        kroute.flags = KVM_MSI_VALID_DEVID;
1515        kroute.u.msi.devid = pci_requester_id(dev);
1516    }
1517    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1518        return -EINVAL;
1519    }
1520
1521    trace_kvm_irqchip_update_msi_route(virq);
1522
1523    return kvm_update_routing_entry(s, &kroute);
1524}
1525
1526static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1527                                    bool assign)
1528{
1529    struct kvm_irqfd irqfd = {
1530        .fd = fd,
1531        .gsi = virq,
1532        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1533    };
1534
1535    if (rfd != -1) {
1536        irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1537        irqfd.resamplefd = rfd;
1538    }
1539
1540    if (!kvm_irqfds_enabled()) {
1541        return -ENOSYS;
1542    }
1543
1544    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1545}
1546
1547int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1548{
1549    struct kvm_irq_routing_entry kroute = {};
1550    int virq;
1551
1552    if (!kvm_gsi_routing_enabled()) {
1553        return -ENOSYS;
1554    }
1555
1556    virq = kvm_irqchip_get_virq(s);
1557    if (virq < 0) {
1558        return virq;
1559    }
1560
1561    kroute.gsi = virq;
1562    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1563    kroute.flags = 0;
1564    kroute.u.adapter.summary_addr = adapter->summary_addr;
1565    kroute.u.adapter.ind_addr = adapter->ind_addr;
1566    kroute.u.adapter.summary_offset = adapter->summary_offset;
1567    kroute.u.adapter.ind_offset = adapter->ind_offset;
1568    kroute.u.adapter.adapter_id = adapter->adapter_id;
1569
1570    kvm_add_routing_entry(s, &kroute);
1571
1572    return virq;
1573}
1574
1575int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1576{
1577    struct kvm_irq_routing_entry kroute = {};
1578    int virq;
1579
1580    if (!kvm_gsi_routing_enabled()) {
1581        return -ENOSYS;
1582    }
1583    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1584        return -ENOSYS;
1585    }
1586    virq = kvm_irqchip_get_virq(s);
1587    if (virq < 0) {
1588        return virq;
1589    }
1590
1591    kroute.gsi = virq;
1592    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1593    kroute.flags = 0;
1594    kroute.u.hv_sint.vcpu = vcpu;
1595    kroute.u.hv_sint.sint = sint;
1596
1597    kvm_add_routing_entry(s, &kroute);
1598    kvm_irqchip_commit_routes(s);
1599
1600    return virq;
1601}
1602
1603#else /* !KVM_CAP_IRQ_ROUTING */
1604
1605void kvm_init_irq_routing(KVMState *s)
1606{
1607}
1608
1609void kvm_irqchip_release_virq(KVMState *s, int virq)
1610{
1611}
1612
1613int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1614{
1615    abort();
1616}
1617
1618int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1619{
1620    return -ENOSYS;
1621}
1622
1623int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1624{
1625    return -ENOSYS;
1626}
1627
1628int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1629{
1630    return -ENOSYS;
1631}
1632
1633static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1634{
1635    abort();
1636}
1637
1638int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1639{
1640    return -ENOSYS;
1641}
1642#endif /* !KVM_CAP_IRQ_ROUTING */
1643
1644int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1645                                       EventNotifier *rn, int virq)
1646{
1647    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1648           rn ? event_notifier_get_fd(rn) : -1, virq, true);
1649}
1650
1651int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1652                                          int virq)
1653{
1654    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1655           false);
1656}
1657
1658int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1659                                   EventNotifier *rn, qemu_irq irq)
1660{
1661    gpointer key, gsi;
1662    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1663
1664    if (!found) {
1665        return -ENXIO;
1666    }
1667    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1668}
1669
1670int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1671                                      qemu_irq irq)
1672{
1673    gpointer key, gsi;
1674    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1675
1676    if (!found) {
1677        return -ENXIO;
1678    }
1679    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1680}
1681
1682void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1683{
1684    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1685}
1686
1687static void kvm_irqchip_create(MachineState *machine, KVMState *s)
1688{
1689    int ret;
1690
1691    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1692        ;
1693    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1694        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1695        if (ret < 0) {
1696            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1697            exit(1);
1698        }
1699    } else {
1700        return;
1701    }
1702
1703    /* First probe and see if there's a arch-specific hook to create the
1704     * in-kernel irqchip for us */
1705    ret = kvm_arch_irqchip_create(machine, s);
1706    if (ret == 0) {
1707        if (machine_kernel_irqchip_split(machine)) {
1708            perror("Split IRQ chip mode not supported.");
1709            exit(1);
1710        } else {
1711            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1712        }
1713    }
1714    if (ret < 0) {
1715        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1716        exit(1);
1717    }
1718
1719    kvm_kernel_irqchip = true;
1720    /* If we have an in-kernel IRQ chip then we must have asynchronous
1721     * interrupt delivery (though the reverse is not necessarily true)
1722     */
1723    kvm_async_interrupts_allowed = true;
1724    kvm_halt_in_kernel_allowed = true;
1725
1726    kvm_init_irq_routing(s);
1727
1728    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1729}
1730
1731/* Find number of supported CPUs using the recommended
1732 * procedure from the kernel API documentation to cope with
1733 * older kernels that may be missing capabilities.
1734 */
1735static int kvm_recommended_vcpus(KVMState *s)
1736{
1737    int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1738    return (ret) ? ret : 4;
1739}
1740
1741static int kvm_max_vcpus(KVMState *s)
1742{
1743    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1744    return (ret) ? ret : kvm_recommended_vcpus(s);
1745}
1746
1747static int kvm_max_vcpu_id(KVMState *s)
1748{
1749    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1750    return (ret) ? ret : kvm_max_vcpus(s);
1751}
1752
1753bool kvm_vcpu_id_is_valid(int vcpu_id)
1754{
1755    KVMState *s = KVM_STATE(current_machine->accelerator);
1756    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1757}
1758
1759static int kvm_init(MachineState *ms)
1760{
1761    MachineClass *mc = MACHINE_GET_CLASS(ms);
1762    static const char upgrade_note[] =
1763        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1764        "(see http://sourceforge.net/projects/kvm).\n";
1765    struct {
1766        const char *name;
1767        int num;
1768    } num_cpus[] = {
1769        { "SMP",          ms->smp.cpus },
1770        { "hotpluggable", ms->smp.max_cpus },
1771        { NULL, }
1772    }, *nc = num_cpus;
1773    int soft_vcpus_limit, hard_vcpus_limit;
1774    KVMState *s;
1775    const KVMCapabilityInfo *missing_cap;
1776    int ret;
1777    int type = 0;
1778    const char *kvm_type;
1779
1780    s = KVM_STATE(ms->accelerator);
1781
1782    /*
1783     * On systems where the kernel can support different base page
1784     * sizes, host page size may be different from TARGET_PAGE_SIZE,
1785     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
1786     * page size for the system though.
1787     */
1788    assert(TARGET_PAGE_SIZE <= getpagesize());
1789
1790    s->sigmask_len = 8;
1791
1792#ifdef KVM_CAP_SET_GUEST_DEBUG
1793    QTAILQ_INIT(&s->kvm_sw_breakpoints);
1794#endif
1795    QLIST_INIT(&s->kvm_parked_vcpus);
1796    s->vmfd = -1;
1797    s->fd = qemu_open("/dev/kvm", O_RDWR);
1798    if (s->fd == -1) {
1799        fprintf(stderr, "Could not access KVM kernel module: %m\n");
1800        ret = -errno;
1801        goto err;
1802    }
1803
1804    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1805    if (ret < KVM_API_VERSION) {
1806        if (ret >= 0) {
1807            ret = -EINVAL;
1808        }
1809        fprintf(stderr, "kvm version too old\n");
1810        goto err;
1811    }
1812
1813    if (ret > KVM_API_VERSION) {
1814        ret = -EINVAL;
1815        fprintf(stderr, "kvm version not supported\n");
1816        goto err;
1817    }
1818
1819    kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
1820    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1821
1822    /* If unspecified, use the default value */
1823    if (!s->nr_slots) {
1824        s->nr_slots = 32;
1825    }
1826
1827    s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
1828    if (s->nr_as <= 1) {
1829        s->nr_as = 1;
1830    }
1831    s->as = g_new0(struct KVMAs, s->nr_as);
1832
1833    kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1834    if (mc->kvm_type) {
1835        type = mc->kvm_type(ms, kvm_type);
1836    } else if (kvm_type) {
1837        ret = -EINVAL;
1838        fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1839        goto err;
1840    }
1841
1842    do {
1843        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1844    } while (ret == -EINTR);
1845
1846    if (ret < 0) {
1847        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1848                strerror(-ret));
1849
1850#ifdef TARGET_S390X
1851        if (ret == -EINVAL) {
1852            fprintf(stderr,
1853                    "Host kernel setup problem detected. Please verify:\n");
1854            fprintf(stderr, "- for kernels supporting the switch_amode or"
1855                    " user_mode parameters, whether\n");
1856            fprintf(stderr,
1857                    "  user space is running in primary address space\n");
1858            fprintf(stderr,
1859                    "- for kernels supporting the vm.allocate_pgste sysctl, "
1860                    "whether it is enabled\n");
1861        }
1862#endif
1863        goto err;
1864    }
1865
1866    s->vmfd = ret;
1867
1868    /* check the vcpu limits */
1869    soft_vcpus_limit = kvm_recommended_vcpus(s);
1870    hard_vcpus_limit = kvm_max_vcpus(s);
1871
1872    while (nc->name) {
1873        if (nc->num > soft_vcpus_limit) {
1874            warn_report("Number of %s cpus requested (%d) exceeds "
1875                        "the recommended cpus supported by KVM (%d)",
1876                        nc->name, nc->num, soft_vcpus_limit);
1877
1878            if (nc->num > hard_vcpus_limit) {
1879                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1880                        "the maximum cpus supported by KVM (%d)\n",
1881                        nc->name, nc->num, hard_vcpus_limit);
1882                exit(1);
1883            }
1884        }
1885        nc++;
1886    }
1887
1888    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1889    if (!missing_cap) {
1890        missing_cap =
1891            kvm_check_extension_list(s, kvm_arch_required_capabilities);
1892    }
1893    if (missing_cap) {
1894        ret = -EINVAL;
1895        fprintf(stderr, "kvm does not support %s\n%s",
1896                missing_cap->name, upgrade_note);
1897        goto err;
1898    }
1899
1900    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1901    s->coalesced_pio = s->coalesced_mmio &&
1902                       kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
1903
1904    s->manual_dirty_log_protect =
1905        kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
1906    if (s->manual_dirty_log_protect) {
1907        ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
1908        if (ret) {
1909            warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 "
1910                        "but failed.  Falling back to the legacy mode. ");
1911            s->manual_dirty_log_protect = false;
1912        }
1913    }
1914
1915#ifdef KVM_CAP_VCPU_EVENTS
1916    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1917#endif
1918
1919    s->robust_singlestep =
1920        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1921
1922#ifdef KVM_CAP_DEBUGREGS
1923    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1924#endif
1925
1926    s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
1927
1928#ifdef KVM_CAP_IRQ_ROUTING
1929    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1930#endif
1931
1932    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1933
1934    s->irq_set_ioctl = KVM_IRQ_LINE;
1935    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1936        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1937    }
1938
1939    kvm_readonly_mem_allowed =
1940        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1941
1942    kvm_eventfds_allowed =
1943        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1944
1945    kvm_irqfds_allowed =
1946        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
1947
1948    kvm_resamplefds_allowed =
1949        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
1950
1951    kvm_vm_attributes_allowed =
1952        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
1953
1954    kvm_ioeventfd_any_length_allowed =
1955        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
1956
1957    kvm_state = s;
1958
1959    /*
1960     * if memory encryption object is specified then initialize the memory
1961     * encryption context.
1962     */
1963    if (ms->memory_encryption) {
1964        kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
1965        if (!kvm_state->memcrypt_handle) {
1966            ret = -1;
1967            goto err;
1968        }
1969
1970        kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
1971    }
1972
1973    ret = kvm_arch_init(ms, s);
1974    if (ret < 0) {
1975        goto err;
1976    }
1977
1978    if (machine_kernel_irqchip_allowed(ms)) {
1979        kvm_irqchip_create(ms, s);
1980    }
1981
1982    if (kvm_eventfds_allowed) {
1983        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
1984        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
1985    }
1986    s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
1987    s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
1988
1989    kvm_memory_listener_register(s, &s->memory_listener,
1990                                 &address_space_memory, 0);
1991    memory_listener_register(&kvm_io_listener,
1992                             &address_space_io);
1993    memory_listener_register(&kvm_coalesced_pio_listener,
1994                             &address_space_io);
1995
1996    s->many_ioeventfds = kvm_check_many_ioeventfds();
1997
1998    s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1999    if (!s->sync_mmu) {
2000        qemu_balloon_inhibit(true);
2001    }
2002
2003    return 0;
2004
2005err:
2006    assert(ret < 0);
2007    if (s->vmfd >= 0) {
2008        close(s->vmfd);
2009    }
2010    if (s->fd != -1) {
2011        close(s->fd);
2012    }
2013    g_free(s->memory_listener.slots);
2014
2015    return ret;
2016}
2017
2018void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2019{
2020    s->sigmask_len = sigmask_len;
2021}
2022
2023static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2024                          int size, uint32_t count)
2025{
2026    int i;
2027    uint8_t *ptr = data;
2028
2029    for (i = 0; i < count; i++) {
2030        address_space_rw(&address_space_io, port, attrs,
2031                         ptr, size,
2032                         direction == KVM_EXIT_IO_OUT);
2033        ptr += size;
2034    }
2035}
2036
2037static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2038{
2039    fprintf(stderr, "KVM internal error. Suberror: %d\n",
2040            run->internal.suberror);
2041
2042    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2043        int i;
2044
2045        for (i = 0; i < run->internal.ndata; ++i) {
2046            fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
2047                    i, (uint64_t)run->internal.data[i]);
2048        }
2049    }
2050    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2051        fprintf(stderr, "emulation failure\n");
2052        if (!kvm_arch_stop_on_emulation_error(cpu)) {
2053            cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2054            return EXCP_INTERRUPT;
2055        }
2056    }
2057    /* FIXME: Should trigger a qmp message to let management know
2058     * something went wrong.
2059     */
2060    return -1;
2061}
2062
2063void kvm_flush_coalesced_mmio_buffer(void)
2064{
2065    KVMState *s = kvm_state;
2066
2067    if (s->coalesced_flush_in_progress) {
2068        return;
2069    }
2070
2071    s->coalesced_flush_in_progress = true;
2072
2073    if (s->coalesced_mmio_ring) {
2074        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2075        while (ring->first != ring->last) {
2076            struct kvm_coalesced_mmio *ent;
2077
2078            ent = &ring->coalesced_mmio[ring->first];
2079
2080            if (ent->pio == 1) {
2081                address_space_rw(&address_space_io, ent->phys_addr,
2082                                 MEMTXATTRS_UNSPECIFIED, ent->data,
2083                                 ent->len, true);
2084            } else {
2085                cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2086            }
2087            smp_wmb();
2088            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2089        }
2090    }
2091
2092    s->coalesced_flush_in_progress = false;
2093}
2094
2095static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2096{
2097    if (!cpu->vcpu_dirty) {
2098        kvm_arch_get_registers(cpu);
2099        cpu->vcpu_dirty = true;
2100    }
2101}
2102
2103void kvm_cpu_synchronize_state(CPUState *cpu)
2104{
2105    if (!cpu->vcpu_dirty) {
2106        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2107    }
2108}
2109
2110static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2111{
2112    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2113    cpu->vcpu_dirty = false;
2114}
2115
2116void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2117{
2118    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2119}
2120
2121static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2122{
2123    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2124    cpu->vcpu_dirty = false;
2125}
2126
2127void kvm_cpu_synchronize_post_init(CPUState *cpu)
2128{
2129    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2130}
2131
2132static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2133{
2134    cpu->vcpu_dirty = true;
2135}
2136
2137void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2138{
2139    run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2140}
2141
2142#ifdef KVM_HAVE_MCE_INJECTION
2143static __thread void *pending_sigbus_addr;
2144static __thread int pending_sigbus_code;
2145static __thread bool have_sigbus_pending;
2146#endif
2147
2148static void kvm_cpu_kick(CPUState *cpu)
2149{
2150    atomic_set(&cpu->kvm_run->immediate_exit, 1);
2151}
2152
2153static void kvm_cpu_kick_self(void)
2154{
2155    if (kvm_immediate_exit) {
2156        kvm_cpu_kick(current_cpu);
2157    } else {
2158        qemu_cpu_kick_self();
2159    }
2160}
2161
2162static void kvm_eat_signals(CPUState *cpu)
2163{
2164    struct timespec ts = { 0, 0 };
2165    siginfo_t siginfo;
2166    sigset_t waitset;
2167    sigset_t chkset;
2168    int r;
2169
2170    if (kvm_immediate_exit) {
2171        atomic_set(&cpu->kvm_run->immediate_exit, 0);
2172        /* Write kvm_run->immediate_exit before the cpu->exit_request
2173         * write in kvm_cpu_exec.
2174         */
2175        smp_wmb();
2176        return;
2177    }
2178
2179    sigemptyset(&waitset);
2180    sigaddset(&waitset, SIG_IPI);
2181
2182    do {
2183        r = sigtimedwait(&waitset, &siginfo, &ts);
2184        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2185            perror("sigtimedwait");
2186            exit(1);
2187        }
2188
2189        r = sigpending(&chkset);
2190        if (r == -1) {
2191            perror("sigpending");
2192            exit(1);
2193        }
2194    } while (sigismember(&chkset, SIG_IPI));
2195}
2196
2197int kvm_cpu_exec(CPUState *cpu)
2198{
2199    struct kvm_run *run = cpu->kvm_run;
2200    int ret, run_ret;
2201
2202    DPRINTF("kvm_cpu_exec()\n");
2203
2204    if (kvm_arch_process_async_events(cpu)) {
2205        atomic_set(&cpu->exit_request, 0);
2206        return EXCP_HLT;
2207    }
2208
2209    qemu_mutex_unlock_iothread();
2210    cpu_exec_start(cpu);
2211
2212    do {
2213        MemTxAttrs attrs;
2214
2215        if (cpu->vcpu_dirty) {
2216            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2217            cpu->vcpu_dirty = false;
2218        }
2219
2220        kvm_arch_pre_run(cpu, run);
2221        if (atomic_read(&cpu->exit_request)) {
2222            DPRINTF("interrupt exit requested\n");
2223            /*
2224             * KVM requires us to reenter the kernel after IO exits to complete
2225             * instruction emulation. This self-signal will ensure that we
2226             * leave ASAP again.
2227             */
2228            kvm_cpu_kick_self();
2229        }
2230
2231        /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2232         * Matching barrier in kvm_eat_signals.
2233         */
2234        smp_rmb();
2235
2236        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2237
2238        attrs = kvm_arch_post_run(cpu, run);
2239
2240#ifdef KVM_HAVE_MCE_INJECTION
2241        if (unlikely(have_sigbus_pending)) {
2242            qemu_mutex_lock_iothread();
2243            kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2244                                    pending_sigbus_addr);
2245            have_sigbus_pending = false;
2246            qemu_mutex_unlock_iothread();
2247        }
2248#endif
2249
2250        if (run_ret < 0) {
2251            if (run_ret == -EINTR || run_ret == -EAGAIN) {
2252                DPRINTF("io window exit\n");
2253                kvm_eat_signals(cpu);
2254                ret = EXCP_INTERRUPT;
2255                break;
2256            }
2257            fprintf(stderr, "error: kvm run failed %s\n",
2258                    strerror(-run_ret));
2259#ifdef TARGET_PPC
2260            if (run_ret == -EBUSY) {
2261                fprintf(stderr,
2262                        "This is probably because your SMT is enabled.\n"
2263                        "VCPU can only run on primary threads with all "
2264                        "secondary threads offline.\n");
2265            }
2266#endif
2267            ret = -1;
2268            break;
2269        }
2270
2271        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2272        switch (run->exit_reason) {
2273        case KVM_EXIT_IO:
2274            DPRINTF("handle_io\n");
2275            /* Called outside BQL */
2276            kvm_handle_io(run->io.port, attrs,
2277                          (uint8_t *)run + run->io.data_offset,
2278                          run->io.direction,
2279                          run->io.size,
2280                          run->io.count);
2281            ret = 0;
2282            break;
2283        case KVM_EXIT_MMIO:
2284            DPRINTF("handle_mmio\n");
2285            /* Called outside BQL */
2286            address_space_rw(&address_space_memory,
2287                             run->mmio.phys_addr, attrs,
2288                             run->mmio.data,
2289                             run->mmio.len,
2290                             run->mmio.is_write);
2291            ret = 0;
2292            break;
2293        case KVM_EXIT_IRQ_WINDOW_OPEN:
2294            DPRINTF("irq_window_open\n");
2295            ret = EXCP_INTERRUPT;
2296            break;
2297        case KVM_EXIT_SHUTDOWN:
2298            DPRINTF("shutdown\n");
2299            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2300            ret = EXCP_INTERRUPT;
2301            break;
2302        case KVM_EXIT_UNKNOWN:
2303            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2304                    (uint64_t)run->hw.hardware_exit_reason);
2305            ret = -1;
2306            break;
2307        case KVM_EXIT_INTERNAL_ERROR:
2308            ret = kvm_handle_internal_error(cpu, run);
2309            break;
2310        case KVM_EXIT_SYSTEM_EVENT:
2311            switch (run->system_event.type) {
2312            case KVM_SYSTEM_EVENT_SHUTDOWN:
2313                qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2314                ret = EXCP_INTERRUPT;
2315                break;
2316            case KVM_SYSTEM_EVENT_RESET:
2317                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2318                ret = EXCP_INTERRUPT;
2319                break;
2320            case KVM_SYSTEM_EVENT_CRASH:
2321                kvm_cpu_synchronize_state(cpu);
2322                qemu_mutex_lock_iothread();
2323                qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2324                qemu_mutex_unlock_iothread();
2325                ret = 0;
2326                break;
2327            default:
2328                DPRINTF("kvm_arch_handle_exit\n");
2329                ret = kvm_arch_handle_exit(cpu, run);
2330                break;
2331            }
2332            break;
2333        default:
2334            DPRINTF("kvm_arch_handle_exit\n");
2335            ret = kvm_arch_handle_exit(cpu, run);
2336            break;
2337        }
2338    } while (ret == 0);
2339
2340    cpu_exec_end(cpu);
2341    qemu_mutex_lock_iothread();
2342
2343    if (ret < 0) {
2344        cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2345        vm_stop(RUN_STATE_INTERNAL_ERROR);
2346    }
2347
2348    atomic_set(&cpu->exit_request, 0);
2349    return ret;
2350}
2351
2352int kvm_ioctl(KVMState *s, int type, ...)
2353{
2354    int ret;
2355    void *arg;
2356    va_list ap;
2357
2358    va_start(ap, type);
2359    arg = va_arg(ap, void *);
2360    va_end(ap);
2361
2362    trace_kvm_ioctl(type, arg);
2363    ret = ioctl(s->fd, type, arg);
2364    if (ret == -1) {
2365        ret = -errno;
2366    }
2367    return ret;
2368}
2369
2370int kvm_vm_ioctl(KVMState *s, int type, ...)
2371{
2372    int ret;
2373    void *arg;
2374    va_list ap;
2375
2376    va_start(ap, type);
2377    arg = va_arg(ap, void *);
2378    va_end(ap);
2379
2380    trace_kvm_vm_ioctl(type, arg);
2381    ret = ioctl(s->vmfd, type, arg);
2382    if (ret == -1) {
2383        ret = -errno;
2384    }
2385    return ret;
2386}
2387
2388int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2389{
2390    int ret;
2391    void *arg;
2392    va_list ap;
2393
2394    va_start(ap, type);
2395    arg = va_arg(ap, void *);
2396    va_end(ap);
2397
2398    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2399    ret = ioctl(cpu->kvm_fd, type, arg);
2400    if (ret == -1) {
2401        ret = -errno;
2402    }
2403    return ret;
2404}
2405
2406int kvm_device_ioctl(int fd, int type, ...)
2407{
2408    int ret;
2409    void *arg;
2410    va_list ap;
2411
2412    va_start(ap, type);
2413    arg = va_arg(ap, void *);
2414    va_end(ap);
2415
2416    trace_kvm_device_ioctl(fd, type, arg);
2417    ret = ioctl(fd, type, arg);
2418    if (ret == -1) {
2419        ret = -errno;
2420    }
2421    return ret;
2422}
2423
2424int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2425{
2426    int ret;
2427    struct kvm_device_attr attribute = {
2428        .group = group,
2429        .attr = attr,
2430    };
2431
2432    if (!kvm_vm_attributes_allowed) {
2433        return 0;
2434    }
2435
2436    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2437    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2438    return ret ? 0 : 1;
2439}
2440
2441int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2442{
2443    struct kvm_device_attr attribute = {
2444        .group = group,
2445        .attr = attr,
2446        .flags = 0,
2447    };
2448
2449    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2450}
2451
2452int kvm_device_access(int fd, int group, uint64_t attr,
2453                      void *val, bool write, Error **errp)
2454{
2455    struct kvm_device_attr kvmattr;
2456    int err;
2457
2458    kvmattr.flags = 0;
2459    kvmattr.group = group;
2460    kvmattr.attr = attr;
2461    kvmattr.addr = (uintptr_t)val;
2462
2463    err = kvm_device_ioctl(fd,
2464                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2465                           &kvmattr);
2466    if (err < 0) {
2467        error_setg_errno(errp, -err,
2468                         "KVM_%s_DEVICE_ATTR failed: Group %d "
2469                         "attr 0x%016" PRIx64,
2470                         write ? "SET" : "GET", group, attr);
2471    }
2472    return err;
2473}
2474
2475bool kvm_has_sync_mmu(void)
2476{
2477    return kvm_state->sync_mmu;
2478}
2479
2480int kvm_has_vcpu_events(void)
2481{
2482    return kvm_state->vcpu_events;
2483}
2484
2485int kvm_has_robust_singlestep(void)
2486{
2487    return kvm_state->robust_singlestep;
2488}
2489
2490int kvm_has_debugregs(void)
2491{
2492    return kvm_state->debugregs;
2493}
2494
2495int kvm_max_nested_state_length(void)
2496{
2497    return kvm_state->max_nested_state_len;
2498}
2499
2500int kvm_has_many_ioeventfds(void)
2501{
2502    if (!kvm_enabled()) {
2503        return 0;
2504    }
2505    return kvm_state->many_ioeventfds;
2506}
2507
2508int kvm_has_gsi_routing(void)
2509{
2510#ifdef KVM_CAP_IRQ_ROUTING
2511    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2512#else
2513    return false;
2514#endif
2515}
2516
2517int kvm_has_intx_set_mask(void)
2518{
2519    return kvm_state->intx_set_mask;
2520}
2521
2522bool kvm_arm_supports_user_irq(void)
2523{
2524    return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2525}
2526
2527#ifdef KVM_CAP_SET_GUEST_DEBUG
2528struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2529                                                 target_ulong pc)
2530{
2531    struct kvm_sw_breakpoint *bp;
2532
2533    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2534        if (bp->pc == pc) {
2535            return bp;
2536        }
2537    }
2538    return NULL;
2539}
2540
2541int kvm_sw_breakpoints_active(CPUState *cpu)
2542{
2543    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2544}
2545
2546struct kvm_set_guest_debug_data {
2547    struct kvm_guest_debug dbg;
2548    int err;
2549};
2550
2551static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2552{
2553    struct kvm_set_guest_debug_data *dbg_data =
2554        (struct kvm_set_guest_debug_data *) data.host_ptr;
2555
2556    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2557                                   &dbg_data->dbg);
2558}
2559
2560int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2561{
2562    struct kvm_set_guest_debug_data data;
2563
2564    data.dbg.control = reinject_trap;
2565
2566    if (cpu->singlestep_enabled) {
2567        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2568    }
2569    kvm_arch_update_guest_debug(cpu, &data.dbg);
2570
2571    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2572               RUN_ON_CPU_HOST_PTR(&data));
2573    return data.err;
2574}
2575
2576int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2577                          target_ulong len, int type)
2578{
2579    struct kvm_sw_breakpoint *bp;
2580    int err;
2581
2582    if (type == GDB_BREAKPOINT_SW) {
2583        bp = kvm_find_sw_breakpoint(cpu, addr);
2584        if (bp) {
2585            bp->use_count++;
2586            return 0;
2587        }
2588
2589        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2590        bp->pc = addr;
2591        bp->use_count = 1;
2592        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2593        if (err) {
2594            g_free(bp);
2595            return err;
2596        }
2597
2598        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2599    } else {
2600        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2601        if (err) {
2602            return err;
2603        }
2604    }
2605
2606    CPU_FOREACH(cpu) {
2607        err = kvm_update_guest_debug(cpu, 0);
2608        if (err) {
2609            return err;
2610        }
2611    }
2612    return 0;
2613}
2614
2615int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2616                          target_ulong len, int type)
2617{
2618    struct kvm_sw_breakpoint *bp;
2619    int err;
2620
2621    if (type == GDB_BREAKPOINT_SW) {
2622        bp = kvm_find_sw_breakpoint(cpu, addr);
2623        if (!bp) {
2624            return -ENOENT;
2625        }
2626
2627        if (bp->use_count > 1) {
2628            bp->use_count--;
2629            return 0;
2630        }
2631
2632        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2633        if (err) {
2634            return err;
2635        }
2636
2637        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2638        g_free(bp);
2639    } else {
2640        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2641        if (err) {
2642            return err;
2643        }
2644    }
2645
2646    CPU_FOREACH(cpu) {
2647        err = kvm_update_guest_debug(cpu, 0);
2648        if (err) {
2649            return err;
2650        }
2651    }
2652    return 0;
2653}
2654
2655void kvm_remove_all_breakpoints(CPUState *cpu)
2656{
2657    struct kvm_sw_breakpoint *bp, *next;
2658    KVMState *s = cpu->kvm_state;
2659    CPUState *tmpcpu;
2660
2661    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2662        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2663            /* Try harder to find a CPU that currently sees the breakpoint. */
2664            CPU_FOREACH(tmpcpu) {
2665                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2666                    break;
2667                }
2668            }
2669        }
2670        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2671        g_free(bp);
2672    }
2673    kvm_arch_remove_all_hw_breakpoints();
2674
2675    CPU_FOREACH(cpu) {
2676        kvm_update_guest_debug(cpu, 0);
2677    }
2678}
2679
2680#else /* !KVM_CAP_SET_GUEST_DEBUG */
2681
2682int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2683{
2684    return -EINVAL;
2685}
2686
2687int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2688                          target_ulong len, int type)
2689{
2690    return -EINVAL;
2691}
2692
2693int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2694                          target_ulong len, int type)
2695{
2696    return -EINVAL;
2697}
2698
2699void kvm_remove_all_breakpoints(CPUState *cpu)
2700{
2701}
2702#endif /* !KVM_CAP_SET_GUEST_DEBUG */
2703
2704static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2705{
2706    KVMState *s = kvm_state;
2707    struct kvm_signal_mask *sigmask;
2708    int r;
2709
2710    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2711
2712    sigmask->len = s->sigmask_len;
2713    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2714    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2715    g_free(sigmask);
2716
2717    return r;
2718}
2719
2720static void kvm_ipi_signal(int sig)
2721{
2722    if (current_cpu) {
2723        assert(kvm_immediate_exit);
2724        kvm_cpu_kick(current_cpu);
2725    }
2726}
2727
2728void kvm_init_cpu_signals(CPUState *cpu)
2729{
2730    int r;
2731    sigset_t set;
2732    struct sigaction sigact;
2733
2734    memset(&sigact, 0, sizeof(sigact));
2735    sigact.sa_handler = kvm_ipi_signal;
2736    sigaction(SIG_IPI, &sigact, NULL);
2737
2738    pthread_sigmask(SIG_BLOCK, NULL, &set);
2739#if defined KVM_HAVE_MCE_INJECTION
2740    sigdelset(&set, SIGBUS);
2741    pthread_sigmask(SIG_SETMASK, &set, NULL);
2742#endif
2743    sigdelset(&set, SIG_IPI);
2744    if (kvm_immediate_exit) {
2745        r = pthread_sigmask(SIG_SETMASK, &set, NULL);
2746    } else {
2747        r = kvm_set_signal_mask(cpu, &set);
2748    }
2749    if (r) {
2750        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
2751        exit(1);
2752    }
2753}
2754
2755/* Called asynchronously in VCPU thread.  */
2756int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2757{
2758#ifdef KVM_HAVE_MCE_INJECTION
2759    if (have_sigbus_pending) {
2760        return 1;
2761    }
2762    have_sigbus_pending = true;
2763    pending_sigbus_addr = addr;
2764    pending_sigbus_code = code;
2765    atomic_set(&cpu->exit_request, 1);
2766    return 0;
2767#else
2768    return 1;
2769#endif
2770}
2771
2772/* Called synchronously (via signalfd) in main thread.  */
2773int kvm_on_sigbus(int code, void *addr)
2774{
2775#ifdef KVM_HAVE_MCE_INJECTION
2776    /* Action required MCE kills the process if SIGBUS is blocked.  Because
2777     * that's what happens in the I/O thread, where we handle MCE via signalfd,
2778     * we can only get action optional here.
2779     */
2780    assert(code != BUS_MCEERR_AR);
2781    kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
2782    return 0;
2783#else
2784    return 1;
2785#endif
2786}
2787
2788int kvm_create_device(KVMState *s, uint64_t type, bool test)
2789{
2790    int ret;
2791    struct kvm_create_device create_dev;
2792
2793    create_dev.type = type;
2794    create_dev.fd = -1;
2795    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2796
2797    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2798        return -ENOTSUP;
2799    }
2800
2801    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2802    if (ret) {
2803        return ret;
2804    }
2805
2806    return test ? 0 : create_dev.fd;
2807}
2808
2809bool kvm_device_supported(int vmfd, uint64_t type)
2810{
2811    struct kvm_create_device create_dev = {
2812        .type = type,
2813        .fd = -1,
2814        .flags = KVM_CREATE_DEVICE_TEST,
2815    };
2816
2817    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2818        return false;
2819    }
2820
2821    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2822}
2823
2824int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2825{
2826    struct kvm_one_reg reg;
2827    int r;
2828
2829    reg.id = id;
2830    reg.addr = (uintptr_t) source;
2831    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2832    if (r) {
2833        trace_kvm_failed_reg_set(id, strerror(-r));
2834    }
2835    return r;
2836}
2837
2838int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2839{
2840    struct kvm_one_reg reg;
2841    int r;
2842
2843    reg.id = id;
2844    reg.addr = (uintptr_t) target;
2845    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2846    if (r) {
2847        trace_kvm_failed_reg_get(id, strerror(-r));
2848    }
2849    return r;
2850}
2851
2852static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
2853                                 hwaddr start_addr, hwaddr size)
2854{
2855    KVMState *kvm = KVM_STATE(ms->accelerator);
2856    int i;
2857
2858    for (i = 0; i < kvm->nr_as; ++i) {
2859        if (kvm->as[i].as == as && kvm->as[i].ml) {
2860            return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
2861                                                    start_addr, size);
2862        }
2863    }
2864
2865    return false;
2866}
2867
2868static void kvm_accel_class_init(ObjectClass *oc, void *data)
2869{
2870    AccelClass *ac = ACCEL_CLASS(oc);
2871    ac->name = "KVM";
2872    ac->init_machine = kvm_init;
2873    ac->has_memory = kvm_accel_has_memory;
2874    ac->allowed = &kvm_allowed;
2875}
2876
2877static const TypeInfo kvm_accel_type = {
2878    .name = TYPE_KVM_ACCEL,
2879    .parent = TYPE_ACCEL,
2880    .class_init = kvm_accel_class_init,
2881    .instance_size = sizeof(KVMState),
2882};
2883
2884static void kvm_type_init(void)
2885{
2886    type_register_static(&kvm_accel_type);
2887}
2888
2889type_init(kvm_type_init);
2890