qemu/accel/kvm/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18
  19#include <linux/kvm.h>
  20
  21#include "qemu/atomic.h"
  22#include "qemu/option.h"
  23#include "qemu/config-file.h"
  24#include "qemu/error-report.h"
  25#include "qapi/error.h"
  26#include "hw/pci/msi.h"
  27#include "hw/pci/msix.h"
  28#include "hw/s390x/adapter.h"
  29#include "exec/gdbstub.h"
  30#include "sysemu/kvm_int.h"
  31#include "sysemu/runstate.h"
  32#include "sysemu/cpus.h"
  33#include "sysemu/sysemu.h"
  34#include "qemu/bswap.h"
  35#include "exec/memory.h"
  36#include "exec/ram_addr.h"
  37#include "exec/address-spaces.h"
  38#include "qemu/event_notifier.h"
  39#include "qemu/main-loop.h"
  40#include "trace.h"
  41#include "hw/irq.h"
  42#include "sysemu/sev.h"
  43#include "qapi/visitor.h"
  44#include "qapi/qapi-types-common.h"
  45#include "qapi/qapi-visit-common.h"
  46#include "sysemu/reset.h"
  47#include "qemu/guest-random.h"
  48#include "sysemu/hw_accel.h"
  49#include "kvm-cpus.h"
  50
  51#include "hw/boards.h"
  52
  53/* This check must be after config-host.h is included */
  54#ifdef CONFIG_EVENTFD
  55#include <sys/eventfd.h>
  56#endif
  57
  58/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  59 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  60 */
  61#define PAGE_SIZE qemu_real_host_page_size
  62
  63//#define DEBUG_KVM
  64
  65#ifdef DEBUG_KVM
  66#define DPRINTF(fmt, ...) \
  67    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  68#else
  69#define DPRINTF(fmt, ...) \
  70    do { } while (0)
  71#endif
  72
  73#define KVM_MSI_HASHTAB_SIZE    256
  74
  75struct KVMParkedVcpu {
  76    unsigned long vcpu_id;
  77    int kvm_fd;
  78    QLIST_ENTRY(KVMParkedVcpu) node;
  79};
  80
  81struct KVMState
  82{
  83    AccelState parent_obj;
  84
  85    int nr_slots;
  86    int fd;
  87    int vmfd;
  88    int coalesced_mmio;
  89    int coalesced_pio;
  90    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  91    bool coalesced_flush_in_progress;
  92    int vcpu_events;
  93    int robust_singlestep;
  94    int debugregs;
  95#ifdef KVM_CAP_SET_GUEST_DEBUG
  96    QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
  97#endif
  98    int max_nested_state_len;
  99    int many_ioeventfds;
 100    int intx_set_mask;
 101    int kvm_shadow_mem;
 102    bool kernel_irqchip_allowed;
 103    bool kernel_irqchip_required;
 104    OnOffAuto kernel_irqchip_split;
 105    bool sync_mmu;
 106    uint64_t manual_dirty_log_protect;
 107    /* The man page (and posix) say ioctl numbers are signed int, but
 108     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
 109     * unsigned, and treating them as signed here can break things */
 110    unsigned irq_set_ioctl;
 111    unsigned int sigmask_len;
 112    GHashTable *gsimap;
 113#ifdef KVM_CAP_IRQ_ROUTING
 114    struct kvm_irq_routing *irq_routes;
 115    int nr_allocated_irq_routes;
 116    unsigned long *used_gsi_bitmap;
 117    unsigned int gsi_count;
 118    QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 119#endif
 120    KVMMemoryListener memory_listener;
 121    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 122
 123    /* memory encryption */
 124    void *memcrypt_handle;
 125    int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
 126
 127    /* For "info mtree -f" to tell if an MR is registered in KVM */
 128    int nr_as;
 129    struct KVMAs {
 130        KVMMemoryListener *ml;
 131        AddressSpace *as;
 132    } *as;
 133};
 134
 135KVMState *kvm_state;
 136bool kvm_kernel_irqchip;
 137bool kvm_split_irqchip;
 138bool kvm_async_interrupts_allowed;
 139bool kvm_halt_in_kernel_allowed;
 140bool kvm_eventfds_allowed;
 141bool kvm_irqfds_allowed;
 142bool kvm_resamplefds_allowed;
 143bool kvm_msi_via_irqfd_allowed;
 144bool kvm_gsi_routing_allowed;
 145bool kvm_gsi_direct_mapping;
 146bool kvm_allowed;
 147bool kvm_readonly_mem_allowed;
 148bool kvm_vm_attributes_allowed;
 149bool kvm_direct_msi_allowed;
 150bool kvm_ioeventfd_any_length_allowed;
 151bool kvm_msi_use_devid;
 152static bool kvm_immediate_exit;
 153static hwaddr kvm_max_slot_size = ~0;
 154
 155static const KVMCapabilityInfo kvm_required_capabilites[] = {
 156    KVM_CAP_INFO(USER_MEMORY),
 157    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 158    KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
 159    KVM_CAP_LAST_INFO
 160};
 161
 162static NotifierList kvm_irqchip_change_notifiers =
 163    NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
 164
 165struct KVMResampleFd {
 166    int gsi;
 167    EventNotifier *resample_event;
 168    QLIST_ENTRY(KVMResampleFd) node;
 169};
 170typedef struct KVMResampleFd KVMResampleFd;
 171
 172/*
 173 * Only used with split irqchip where we need to do the resample fd
 174 * kick for the kernel from userspace.
 175 */
 176static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
 177    QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
 178
 179#define kvm_slots_lock(kml)      qemu_mutex_lock(&(kml)->slots_lock)
 180#define kvm_slots_unlock(kml)    qemu_mutex_unlock(&(kml)->slots_lock)
 181
 182static inline void kvm_resample_fd_remove(int gsi)
 183{
 184    KVMResampleFd *rfd;
 185
 186    QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
 187        if (rfd->gsi == gsi) {
 188            QLIST_REMOVE(rfd, node);
 189            g_free(rfd);
 190            break;
 191        }
 192    }
 193}
 194
 195static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
 196{
 197    KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
 198
 199    rfd->gsi = gsi;
 200    rfd->resample_event = event;
 201
 202    QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
 203}
 204
 205void kvm_resample_fd_notify(int gsi)
 206{
 207    KVMResampleFd *rfd;
 208
 209    QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
 210        if (rfd->gsi == gsi) {
 211            event_notifier_set(rfd->resample_event);
 212            trace_kvm_resample_fd_notify(gsi);
 213            return;
 214        }
 215    }
 216}
 217
 218int kvm_get_max_memslots(void)
 219{
 220    KVMState *s = KVM_STATE(current_accel());
 221
 222    return s->nr_slots;
 223}
 224
 225bool kvm_memcrypt_enabled(void)
 226{
 227    if (kvm_state && kvm_state->memcrypt_handle) {
 228        return true;
 229    }
 230
 231    return false;
 232}
 233
 234int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
 235{
 236    if (kvm_state->memcrypt_handle &&
 237        kvm_state->memcrypt_encrypt_data) {
 238        return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
 239                                              ptr, len);
 240    }
 241
 242    return 1;
 243}
 244
 245/* Called with KVMMemoryListener.slots_lock held */
 246static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 247{
 248    KVMState *s = kvm_state;
 249    int i;
 250
 251    for (i = 0; i < s->nr_slots; i++) {
 252        if (kml->slots[i].memory_size == 0) {
 253            return &kml->slots[i];
 254        }
 255    }
 256
 257    return NULL;
 258}
 259
 260bool kvm_has_free_slot(MachineState *ms)
 261{
 262    KVMState *s = KVM_STATE(ms->accelerator);
 263    bool result;
 264    KVMMemoryListener *kml = &s->memory_listener;
 265
 266    kvm_slots_lock(kml);
 267    result = !!kvm_get_free_slot(kml);
 268    kvm_slots_unlock(kml);
 269
 270    return result;
 271}
 272
 273/* Called with KVMMemoryListener.slots_lock held */
 274static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 275{
 276    KVMSlot *slot = kvm_get_free_slot(kml);
 277
 278    if (slot) {
 279        return slot;
 280    }
 281
 282    fprintf(stderr, "%s: no free slot available\n", __func__);
 283    abort();
 284}
 285
 286static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 287                                         hwaddr start_addr,
 288                                         hwaddr size)
 289{
 290    KVMState *s = kvm_state;
 291    int i;
 292
 293    for (i = 0; i < s->nr_slots; i++) {
 294        KVMSlot *mem = &kml->slots[i];
 295
 296        if (start_addr == mem->start_addr && size == mem->memory_size) {
 297            return mem;
 298        }
 299    }
 300
 301    return NULL;
 302}
 303
 304/*
 305 * Calculate and align the start address and the size of the section.
 306 * Return the size. If the size is 0, the aligned section is empty.
 307 */
 308static hwaddr kvm_align_section(MemoryRegionSection *section,
 309                                hwaddr *start)
 310{
 311    hwaddr size = int128_get64(section->size);
 312    hwaddr delta, aligned;
 313
 314    /* kvm works in page size chunks, but the function may be called
 315       with sub-page size and unaligned start address. Pad the start
 316       address to next and truncate size to previous page boundary. */
 317    aligned = ROUND_UP(section->offset_within_address_space,
 318                       qemu_real_host_page_size);
 319    delta = aligned - section->offset_within_address_space;
 320    *start = aligned;
 321    if (delta > size) {
 322        return 0;
 323    }
 324
 325    return (size - delta) & qemu_real_host_page_mask;
 326}
 327
 328int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 329                                       hwaddr *phys_addr)
 330{
 331    KVMMemoryListener *kml = &s->memory_listener;
 332    int i, ret = 0;
 333
 334    kvm_slots_lock(kml);
 335    for (i = 0; i < s->nr_slots; i++) {
 336        KVMSlot *mem = &kml->slots[i];
 337
 338        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 339            *phys_addr = mem->start_addr + (ram - mem->ram);
 340            ret = 1;
 341            break;
 342        }
 343    }
 344    kvm_slots_unlock(kml);
 345
 346    return ret;
 347}
 348
 349static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
 350{
 351    KVMState *s = kvm_state;
 352    struct kvm_userspace_memory_region mem;
 353    int ret;
 354
 355    mem.slot = slot->slot | (kml->as_id << 16);
 356    mem.guest_phys_addr = slot->start_addr;
 357    mem.userspace_addr = (unsigned long)slot->ram;
 358    mem.flags = slot->flags;
 359
 360    if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
 361        /* Set the slot size to 0 before setting the slot to the desired
 362         * value. This is needed based on KVM commit 75d61fbc. */
 363        mem.memory_size = 0;
 364        ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 365        if (ret < 0) {
 366            goto err;
 367        }
 368    }
 369    mem.memory_size = slot->memory_size;
 370    ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 371    slot->old_flags = mem.flags;
 372err:
 373    trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
 374                              mem.memory_size, mem.userspace_addr, ret);
 375    if (ret < 0) {
 376        error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
 377                     " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
 378                     __func__, mem.slot, slot->start_addr,
 379                     (uint64_t)mem.memory_size, strerror(errno));
 380    }
 381    return ret;
 382}
 383
 384static int do_kvm_destroy_vcpu(CPUState *cpu)
 385{
 386    KVMState *s = kvm_state;
 387    long mmap_size;
 388    struct KVMParkedVcpu *vcpu = NULL;
 389    int ret = 0;
 390
 391    DPRINTF("kvm_destroy_vcpu\n");
 392
 393    ret = kvm_arch_destroy_vcpu(cpu);
 394    if (ret < 0) {
 395        goto err;
 396    }
 397
 398    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 399    if (mmap_size < 0) {
 400        ret = mmap_size;
 401        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 402        goto err;
 403    }
 404
 405    ret = munmap(cpu->kvm_run, mmap_size);
 406    if (ret < 0) {
 407        goto err;
 408    }
 409
 410    vcpu = g_malloc0(sizeof(*vcpu));
 411    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 412    vcpu->kvm_fd = cpu->kvm_fd;
 413    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 414err:
 415    return ret;
 416}
 417
 418void kvm_destroy_vcpu(CPUState *cpu)
 419{
 420    if (do_kvm_destroy_vcpu(cpu) < 0) {
 421        error_report("kvm_destroy_vcpu failed");
 422        exit(EXIT_FAILURE);
 423    }
 424}
 425
 426static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 427{
 428    struct KVMParkedVcpu *cpu;
 429
 430    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 431        if (cpu->vcpu_id == vcpu_id) {
 432            int kvm_fd;
 433
 434            QLIST_REMOVE(cpu, node);
 435            kvm_fd = cpu->kvm_fd;
 436            g_free(cpu);
 437            return kvm_fd;
 438        }
 439    }
 440
 441    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 442}
 443
 444int kvm_init_vcpu(CPUState *cpu, Error **errp)
 445{
 446    KVMState *s = kvm_state;
 447    long mmap_size;
 448    int ret;
 449
 450    trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
 451
 452    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 453    if (ret < 0) {
 454        error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
 455                         kvm_arch_vcpu_id(cpu));
 456        goto err;
 457    }
 458
 459    cpu->kvm_fd = ret;
 460    cpu->kvm_state = s;
 461    cpu->vcpu_dirty = true;
 462
 463    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 464    if (mmap_size < 0) {
 465        ret = mmap_size;
 466        error_setg_errno(errp, -mmap_size,
 467                         "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
 468        goto err;
 469    }
 470
 471    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 472                        cpu->kvm_fd, 0);
 473    if (cpu->kvm_run == MAP_FAILED) {
 474        ret = -errno;
 475        error_setg_errno(errp, ret,
 476                         "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
 477                         kvm_arch_vcpu_id(cpu));
 478        goto err;
 479    }
 480
 481    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 482        s->coalesced_mmio_ring =
 483            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 484    }
 485
 486    ret = kvm_arch_init_vcpu(cpu);
 487    if (ret < 0) {
 488        error_setg_errno(errp, -ret,
 489                         "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
 490                         kvm_arch_vcpu_id(cpu));
 491    }
 492err:
 493    return ret;
 494}
 495
 496/*
 497 * dirty pages logging control
 498 */
 499
 500static int kvm_mem_flags(MemoryRegion *mr)
 501{
 502    bool readonly = mr->readonly || memory_region_is_romd(mr);
 503    int flags = 0;
 504
 505    if (memory_region_get_dirty_log_mask(mr) != 0) {
 506        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 507    }
 508    if (readonly && kvm_readonly_mem_allowed) {
 509        flags |= KVM_MEM_READONLY;
 510    }
 511    return flags;
 512}
 513
 514/* Called with KVMMemoryListener.slots_lock held */
 515static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 516                                 MemoryRegion *mr)
 517{
 518    mem->flags = kvm_mem_flags(mr);
 519
 520    /* If nothing changed effectively, no need to issue ioctl */
 521    if (mem->flags == mem->old_flags) {
 522        return 0;
 523    }
 524
 525    return kvm_set_user_memory_region(kml, mem, false);
 526}
 527
 528static int kvm_section_update_flags(KVMMemoryListener *kml,
 529                                    MemoryRegionSection *section)
 530{
 531    hwaddr start_addr, size, slot_size;
 532    KVMSlot *mem;
 533    int ret = 0;
 534
 535    size = kvm_align_section(section, &start_addr);
 536    if (!size) {
 537        return 0;
 538    }
 539
 540    kvm_slots_lock(kml);
 541
 542    while (size && !ret) {
 543        slot_size = MIN(kvm_max_slot_size, size);
 544        mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
 545        if (!mem) {
 546            /* We don't have a slot if we want to trap every access. */
 547            goto out;
 548        }
 549
 550        ret = kvm_slot_update_flags(kml, mem, section->mr);
 551        start_addr += slot_size;
 552        size -= slot_size;
 553    }
 554
 555out:
 556    kvm_slots_unlock(kml);
 557    return ret;
 558}
 559
 560static void kvm_log_start(MemoryListener *listener,
 561                          MemoryRegionSection *section,
 562                          int old, int new)
 563{
 564    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 565    int r;
 566
 567    if (old != 0) {
 568        return;
 569    }
 570
 571    r = kvm_section_update_flags(kml, section);
 572    if (r < 0) {
 573        abort();
 574    }
 575}
 576
 577static void kvm_log_stop(MemoryListener *listener,
 578                          MemoryRegionSection *section,
 579                          int old, int new)
 580{
 581    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 582    int r;
 583
 584    if (new != 0) {
 585        return;
 586    }
 587
 588    r = kvm_section_update_flags(kml, section);
 589    if (r < 0) {
 590        abort();
 591    }
 592}
 593
 594/* get kvm's dirty pages bitmap and update qemu's */
 595static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
 596                                         unsigned long *bitmap)
 597{
 598    ram_addr_t start = section->offset_within_region +
 599                       memory_region_get_ram_addr(section->mr);
 600    ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size;
 601
 602    cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
 603    return 0;
 604}
 605
 606#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 607
 608/* Allocate the dirty bitmap for a slot  */
 609static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
 610{
 611    /*
 612     * XXX bad kernel interface alert
 613     * For dirty bitmap, kernel allocates array of size aligned to
 614     * bits-per-long.  But for case when the kernel is 64bits and
 615     * the userspace is 32bits, userspace can't align to the same
 616     * bits-per-long, since sizeof(long) is different between kernel
 617     * and user space.  This way, userspace will provide buffer which
 618     * may be 4 bytes less than the kernel will use, resulting in
 619     * userspace memory corruption (which is not detectable by valgrind
 620     * too, in most cases).
 621     * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 622     * a hope that sizeof(long) won't become >8 any time soon.
 623     */
 624    hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
 625                                        /*HOST_LONG_BITS*/ 64) / 8;
 626    mem->dirty_bmap = g_malloc0(bitmap_size);
 627}
 628
 629/**
 630 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
 631 *
 632 * This function will first try to fetch dirty bitmap from the kernel,
 633 * and then updates qemu's dirty bitmap.
 634 *
 635 * NOTE: caller must be with kml->slots_lock held.
 636 *
 637 * @kml: the KVM memory listener object
 638 * @section: the memory section to sync the dirty bitmap with
 639 */
 640static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 641                                          MemoryRegionSection *section)
 642{
 643    KVMState *s = kvm_state;
 644    struct kvm_dirty_log d = {};
 645    KVMSlot *mem;
 646    hwaddr start_addr, size;
 647    hwaddr slot_size, slot_offset = 0;
 648    int ret = 0;
 649
 650    size = kvm_align_section(section, &start_addr);
 651    while (size) {
 652        MemoryRegionSection subsection = *section;
 653
 654        slot_size = MIN(kvm_max_slot_size, size);
 655        mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
 656        if (!mem) {
 657            /* We don't have a slot if we want to trap every access. */
 658            goto out;
 659        }
 660
 661        if (!mem->dirty_bmap) {
 662            /* Allocate on the first log_sync, once and for all */
 663            kvm_memslot_init_dirty_bitmap(mem);
 664        }
 665
 666        d.dirty_bitmap = mem->dirty_bmap;
 667        d.slot = mem->slot | (kml->as_id << 16);
 668        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
 669            DPRINTF("ioctl failed %d\n", errno);
 670            ret = -1;
 671            goto out;
 672        }
 673
 674        subsection.offset_within_region += slot_offset;
 675        subsection.size = int128_make64(slot_size);
 676        kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
 677
 678        slot_offset += slot_size;
 679        start_addr += slot_size;
 680        size -= slot_size;
 681    }
 682out:
 683    return ret;
 684}
 685
 686/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
 687#define KVM_CLEAR_LOG_SHIFT  6
 688#define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
 689#define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
 690
 691static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
 692                                  uint64_t size)
 693{
 694    KVMState *s = kvm_state;
 695    uint64_t end, bmap_start, start_delta, bmap_npages;
 696    struct kvm_clear_dirty_log d;
 697    unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
 698    int ret;
 699
 700    /*
 701     * We need to extend either the start or the size or both to
 702     * satisfy the KVM interface requirement.  Firstly, do the start
 703     * page alignment on 64 host pages
 704     */
 705    bmap_start = start & KVM_CLEAR_LOG_MASK;
 706    start_delta = start - bmap_start;
 707    bmap_start /= psize;
 708
 709    /*
 710     * The kernel interface has restriction on the size too, that either:
 711     *
 712     * (1) the size is 64 host pages aligned (just like the start), or
 713     * (2) the size fills up until the end of the KVM memslot.
 714     */
 715    bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
 716        << KVM_CLEAR_LOG_SHIFT;
 717    end = mem->memory_size / psize;
 718    if (bmap_npages > end - bmap_start) {
 719        bmap_npages = end - bmap_start;
 720    }
 721    start_delta /= psize;
 722
 723    /*
 724     * Prepare the bitmap to clear dirty bits.  Here we must guarantee
 725     * that we won't clear any unknown dirty bits otherwise we might
 726     * accidentally clear some set bits which are not yet synced from
 727     * the kernel into QEMU's bitmap, then we'll lose track of the
 728     * guest modifications upon those pages (which can directly lead
 729     * to guest data loss or panic after migration).
 730     *
 731     * Layout of the KVMSlot.dirty_bmap:
 732     *
 733     *                   |<-------- bmap_npages -----------..>|
 734     *                                                     [1]
 735     *                     start_delta         size
 736     *  |----------------|-------------|------------------|------------|
 737     *  ^                ^             ^                               ^
 738     *  |                |             |                               |
 739     * start          bmap_start     (start)                         end
 740     * of memslot                                             of memslot
 741     *
 742     * [1] bmap_npages can be aligned to either 64 pages or the end of slot
 743     */
 744
 745    assert(bmap_start % BITS_PER_LONG == 0);
 746    /* We should never do log_clear before log_sync */
 747    assert(mem->dirty_bmap);
 748    if (start_delta) {
 749        /* Slow path - we need to manipulate a temp bitmap */
 750        bmap_clear = bitmap_new(bmap_npages);
 751        bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
 752                                    bmap_start, start_delta + size / psize);
 753        /*
 754         * We need to fill the holes at start because that was not
 755         * specified by the caller and we extended the bitmap only for
 756         * 64 pages alignment
 757         */
 758        bitmap_clear(bmap_clear, 0, start_delta);
 759        d.dirty_bitmap = bmap_clear;
 760    } else {
 761        /* Fast path - start address aligns well with BITS_PER_LONG */
 762        d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
 763    }
 764
 765    d.first_page = bmap_start;
 766    /* It should never overflow.  If it happens, say something */
 767    assert(bmap_npages <= UINT32_MAX);
 768    d.num_pages = bmap_npages;
 769    d.slot = mem->slot | (as_id << 16);
 770
 771    if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
 772        ret = -errno;
 773        error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
 774                     "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
 775                     __func__, d.slot, (uint64_t)d.first_page,
 776                     (uint32_t)d.num_pages, ret);
 777    } else {
 778        ret = 0;
 779        trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
 780    }
 781
 782    /*
 783     * After we have updated the remote dirty bitmap, we update the
 784     * cached bitmap as well for the memslot, then if another user
 785     * clears the same region we know we shouldn't clear it again on
 786     * the remote otherwise it's data loss as well.
 787     */
 788    bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
 789                 size / psize);
 790    /* This handles the NULL case well */
 791    g_free(bmap_clear);
 792    return ret;
 793}
 794
 795
 796/**
 797 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
 798 *
 799 * NOTE: this will be a no-op if we haven't enabled manual dirty log
 800 * protection in the host kernel because in that case this operation
 801 * will be done within log_sync().
 802 *
 803 * @kml:     the kvm memory listener
 804 * @section: the memory range to clear dirty bitmap
 805 */
 806static int kvm_physical_log_clear(KVMMemoryListener *kml,
 807                                  MemoryRegionSection *section)
 808{
 809    KVMState *s = kvm_state;
 810    uint64_t start, size, offset, count;
 811    KVMSlot *mem;
 812    int ret = 0, i;
 813
 814    if (!s->manual_dirty_log_protect) {
 815        /* No need to do explicit clear */
 816        return ret;
 817    }
 818
 819    start = section->offset_within_address_space;
 820    size = int128_get64(section->size);
 821
 822    if (!size) {
 823        /* Nothing more we can do... */
 824        return ret;
 825    }
 826
 827    kvm_slots_lock(kml);
 828
 829    for (i = 0; i < s->nr_slots; i++) {
 830        mem = &kml->slots[i];
 831        /* Discard slots that are empty or do not overlap the section */
 832        if (!mem->memory_size ||
 833            mem->start_addr > start + size - 1 ||
 834            start > mem->start_addr + mem->memory_size - 1) {
 835            continue;
 836        }
 837
 838        if (start >= mem->start_addr) {
 839            /* The slot starts before section or is aligned to it.  */
 840            offset = start - mem->start_addr;
 841            count = MIN(mem->memory_size - offset, size);
 842        } else {
 843            /* The slot starts after section.  */
 844            offset = 0;
 845            count = MIN(mem->memory_size, size - (mem->start_addr - start));
 846        }
 847        ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
 848        if (ret < 0) {
 849            break;
 850        }
 851    }
 852
 853    kvm_slots_unlock(kml);
 854
 855    return ret;
 856}
 857
 858static void kvm_coalesce_mmio_region(MemoryListener *listener,
 859                                     MemoryRegionSection *secion,
 860                                     hwaddr start, hwaddr size)
 861{
 862    KVMState *s = kvm_state;
 863
 864    if (s->coalesced_mmio) {
 865        struct kvm_coalesced_mmio_zone zone;
 866
 867        zone.addr = start;
 868        zone.size = size;
 869        zone.pad = 0;
 870
 871        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 872    }
 873}
 874
 875static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
 876                                       MemoryRegionSection *secion,
 877                                       hwaddr start, hwaddr size)
 878{
 879    KVMState *s = kvm_state;
 880
 881    if (s->coalesced_mmio) {
 882        struct kvm_coalesced_mmio_zone zone;
 883
 884        zone.addr = start;
 885        zone.size = size;
 886        zone.pad = 0;
 887
 888        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 889    }
 890}
 891
 892static void kvm_coalesce_pio_add(MemoryListener *listener,
 893                                MemoryRegionSection *section,
 894                                hwaddr start, hwaddr size)
 895{
 896    KVMState *s = kvm_state;
 897
 898    if (s->coalesced_pio) {
 899        struct kvm_coalesced_mmio_zone zone;
 900
 901        zone.addr = start;
 902        zone.size = size;
 903        zone.pio = 1;
 904
 905        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 906    }
 907}
 908
 909static void kvm_coalesce_pio_del(MemoryListener *listener,
 910                                MemoryRegionSection *section,
 911                                hwaddr start, hwaddr size)
 912{
 913    KVMState *s = kvm_state;
 914
 915    if (s->coalesced_pio) {
 916        struct kvm_coalesced_mmio_zone zone;
 917
 918        zone.addr = start;
 919        zone.size = size;
 920        zone.pio = 1;
 921
 922        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 923     }
 924}
 925
 926static MemoryListener kvm_coalesced_pio_listener = {
 927    .coalesced_io_add = kvm_coalesce_pio_add,
 928    .coalesced_io_del = kvm_coalesce_pio_del,
 929};
 930
 931int kvm_check_extension(KVMState *s, unsigned int extension)
 932{
 933    int ret;
 934
 935    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 936    if (ret < 0) {
 937        ret = 0;
 938    }
 939
 940    return ret;
 941}
 942
 943int kvm_vm_check_extension(KVMState *s, unsigned int extension)
 944{
 945    int ret;
 946
 947    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 948    if (ret < 0) {
 949        /* VM wide version not implemented, use global one instead */
 950        ret = kvm_check_extension(s, extension);
 951    }
 952
 953    return ret;
 954}
 955
 956typedef struct HWPoisonPage {
 957    ram_addr_t ram_addr;
 958    QLIST_ENTRY(HWPoisonPage) list;
 959} HWPoisonPage;
 960
 961static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
 962    QLIST_HEAD_INITIALIZER(hwpoison_page_list);
 963
 964static void kvm_unpoison_all(void *param)
 965{
 966    HWPoisonPage *page, *next_page;
 967
 968    QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
 969        QLIST_REMOVE(page, list);
 970        qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
 971        g_free(page);
 972    }
 973}
 974
 975void kvm_hwpoison_page_add(ram_addr_t ram_addr)
 976{
 977    HWPoisonPage *page;
 978
 979    QLIST_FOREACH(page, &hwpoison_page_list, list) {
 980        if (page->ram_addr == ram_addr) {
 981            return;
 982        }
 983    }
 984    page = g_new(HWPoisonPage, 1);
 985    page->ram_addr = ram_addr;
 986    QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
 987}
 988
 989static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
 990{
 991#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
 992    /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
 993     * endianness, but the memory core hands them in target endianness.
 994     * For example, PPC is always treated as big-endian even if running
 995     * on KVM and on PPC64LE.  Correct here.
 996     */
 997    switch (size) {
 998    case 2:
 999        val = bswap16(val);
1000        break;
1001    case 4:
1002        val = bswap32(val);
1003        break;
1004    }
1005#endif
1006    return val;
1007}
1008
1009static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1010                                  bool assign, uint32_t size, bool datamatch)
1011{
1012    int ret;
1013    struct kvm_ioeventfd iofd = {
1014        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1015        .addr = addr,
1016        .len = size,
1017        .flags = 0,
1018        .fd = fd,
1019    };
1020
1021    trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1022                                 datamatch);
1023    if (!kvm_enabled()) {
1024        return -ENOSYS;
1025    }
1026
1027    if (datamatch) {
1028        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1029    }
1030    if (!assign) {
1031        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1032    }
1033
1034    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1035
1036    if (ret < 0) {
1037        return -errno;
1038    }
1039
1040    return 0;
1041}
1042
1043static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1044                                 bool assign, uint32_t size, bool datamatch)
1045{
1046    struct kvm_ioeventfd kick = {
1047        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1048        .addr = addr,
1049        .flags = KVM_IOEVENTFD_FLAG_PIO,
1050        .len = size,
1051        .fd = fd,
1052    };
1053    int r;
1054    trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1055    if (!kvm_enabled()) {
1056        return -ENOSYS;
1057    }
1058    if (datamatch) {
1059        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1060    }
1061    if (!assign) {
1062        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1063    }
1064    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1065    if (r < 0) {
1066        return r;
1067    }
1068    return 0;
1069}
1070
1071
1072static int kvm_check_many_ioeventfds(void)
1073{
1074    /* Userspace can use ioeventfd for io notification.  This requires a host
1075     * that supports eventfd(2) and an I/O thread; since eventfd does not
1076     * support SIGIO it cannot interrupt the vcpu.
1077     *
1078     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
1079     * can avoid creating too many ioeventfds.
1080     */
1081#if defined(CONFIG_EVENTFD)
1082    int ioeventfds[7];
1083    int i, ret = 0;
1084    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
1085        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
1086        if (ioeventfds[i] < 0) {
1087            break;
1088        }
1089        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
1090        if (ret < 0) {
1091            close(ioeventfds[i]);
1092            break;
1093        }
1094    }
1095
1096    /* Decide whether many devices are supported or not */
1097    ret = i == ARRAY_SIZE(ioeventfds);
1098
1099    while (i-- > 0) {
1100        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
1101        close(ioeventfds[i]);
1102    }
1103    return ret;
1104#else
1105    return 0;
1106#endif
1107}
1108
1109static const KVMCapabilityInfo *
1110kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1111{
1112    while (list->name) {
1113        if (!kvm_check_extension(s, list->value)) {
1114            return list;
1115        }
1116        list++;
1117    }
1118    return NULL;
1119}
1120
1121void kvm_set_max_memslot_size(hwaddr max_slot_size)
1122{
1123    g_assert(
1124        ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
1125    );
1126    kvm_max_slot_size = max_slot_size;
1127}
1128
1129static void kvm_set_phys_mem(KVMMemoryListener *kml,
1130                             MemoryRegionSection *section, bool add)
1131{
1132    KVMSlot *mem;
1133    int err;
1134    MemoryRegion *mr = section->mr;
1135    bool writeable = !mr->readonly && !mr->rom_device;
1136    hwaddr start_addr, size, slot_size;
1137    void *ram;
1138
1139    if (!memory_region_is_ram(mr)) {
1140        if (writeable || !kvm_readonly_mem_allowed) {
1141            return;
1142        } else if (!mr->romd_mode) {
1143            /* If the memory device is not in romd_mode, then we actually want
1144             * to remove the kvm memory slot so all accesses will trap. */
1145            add = false;
1146        }
1147    }
1148
1149    size = kvm_align_section(section, &start_addr);
1150    if (!size) {
1151        return;
1152    }
1153
1154    /* use aligned delta to align the ram address */
1155    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
1156          (start_addr - section->offset_within_address_space);
1157
1158    kvm_slots_lock(kml);
1159
1160    if (!add) {
1161        do {
1162            slot_size = MIN(kvm_max_slot_size, size);
1163            mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1164            if (!mem) {
1165                goto out;
1166            }
1167            if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1168                kvm_physical_sync_dirty_bitmap(kml, section);
1169            }
1170
1171            /* unregister the slot */
1172            g_free(mem->dirty_bmap);
1173            mem->dirty_bmap = NULL;
1174            mem->memory_size = 0;
1175            mem->flags = 0;
1176            err = kvm_set_user_memory_region(kml, mem, false);
1177            if (err) {
1178                fprintf(stderr, "%s: error unregistering slot: %s\n",
1179                        __func__, strerror(-err));
1180                abort();
1181            }
1182            start_addr += slot_size;
1183            size -= slot_size;
1184        } while (size);
1185        goto out;
1186    }
1187
1188    /* register the new slot */
1189    do {
1190        slot_size = MIN(kvm_max_slot_size, size);
1191        mem = kvm_alloc_slot(kml);
1192        mem->memory_size = slot_size;
1193        mem->start_addr = start_addr;
1194        mem->ram = ram;
1195        mem->flags = kvm_mem_flags(mr);
1196
1197        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1198            /*
1199             * Reallocate the bmap; it means it doesn't disappear in
1200             * middle of a migrate.
1201             */
1202            kvm_memslot_init_dirty_bitmap(mem);
1203        }
1204        err = kvm_set_user_memory_region(kml, mem, true);
1205        if (err) {
1206            fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1207                    strerror(-err));
1208            abort();
1209        }
1210        start_addr += slot_size;
1211        ram += slot_size;
1212        size -= slot_size;
1213    } while (size);
1214
1215out:
1216    kvm_slots_unlock(kml);
1217}
1218
1219static void kvm_region_add(MemoryListener *listener,
1220                           MemoryRegionSection *section)
1221{
1222    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1223
1224    memory_region_ref(section->mr);
1225    kvm_set_phys_mem(kml, section, true);
1226}
1227
1228static void kvm_region_del(MemoryListener *listener,
1229                           MemoryRegionSection *section)
1230{
1231    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1232
1233    kvm_set_phys_mem(kml, section, false);
1234    memory_region_unref(section->mr);
1235}
1236
1237static void kvm_log_sync(MemoryListener *listener,
1238                         MemoryRegionSection *section)
1239{
1240    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1241    int r;
1242
1243    kvm_slots_lock(kml);
1244    r = kvm_physical_sync_dirty_bitmap(kml, section);
1245    kvm_slots_unlock(kml);
1246    if (r < 0) {
1247        abort();
1248    }
1249}
1250
1251static void kvm_log_clear(MemoryListener *listener,
1252                          MemoryRegionSection *section)
1253{
1254    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1255    int r;
1256
1257    r = kvm_physical_log_clear(kml, section);
1258    if (r < 0) {
1259        error_report_once("%s: kvm log clear failed: mr=%s "
1260                          "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1261                          section->mr->name, section->offset_within_region,
1262                          int128_get64(section->size));
1263        abort();
1264    }
1265}
1266
1267static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1268                                  MemoryRegionSection *section,
1269                                  bool match_data, uint64_t data,
1270                                  EventNotifier *e)
1271{
1272    int fd = event_notifier_get_fd(e);
1273    int r;
1274
1275    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1276                               data, true, int128_get64(section->size),
1277                               match_data);
1278    if (r < 0) {
1279        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1280                __func__, strerror(-r), -r);
1281        abort();
1282    }
1283}
1284
1285static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1286                                  MemoryRegionSection *section,
1287                                  bool match_data, uint64_t data,
1288                                  EventNotifier *e)
1289{
1290    int fd = event_notifier_get_fd(e);
1291    int r;
1292
1293    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1294                               data, false, int128_get64(section->size),
1295                               match_data);
1296    if (r < 0) {
1297        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1298                __func__, strerror(-r), -r);
1299        abort();
1300    }
1301}
1302
1303static void kvm_io_ioeventfd_add(MemoryListener *listener,
1304                                 MemoryRegionSection *section,
1305                                 bool match_data, uint64_t data,
1306                                 EventNotifier *e)
1307{
1308    int fd = event_notifier_get_fd(e);
1309    int r;
1310
1311    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1312                              data, true, int128_get64(section->size),
1313                              match_data);
1314    if (r < 0) {
1315        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1316                __func__, strerror(-r), -r);
1317        abort();
1318    }
1319}
1320
1321static void kvm_io_ioeventfd_del(MemoryListener *listener,
1322                                 MemoryRegionSection *section,
1323                                 bool match_data, uint64_t data,
1324                                 EventNotifier *e)
1325
1326{
1327    int fd = event_notifier_get_fd(e);
1328    int r;
1329
1330    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1331                              data, false, int128_get64(section->size),
1332                              match_data);
1333    if (r < 0) {
1334        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1335                __func__, strerror(-r), -r);
1336        abort();
1337    }
1338}
1339
1340void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1341                                  AddressSpace *as, int as_id)
1342{
1343    int i;
1344
1345    qemu_mutex_init(&kml->slots_lock);
1346    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1347    kml->as_id = as_id;
1348
1349    for (i = 0; i < s->nr_slots; i++) {
1350        kml->slots[i].slot = i;
1351    }
1352
1353    kml->listener.region_add = kvm_region_add;
1354    kml->listener.region_del = kvm_region_del;
1355    kml->listener.log_start = kvm_log_start;
1356    kml->listener.log_stop = kvm_log_stop;
1357    kml->listener.log_sync = kvm_log_sync;
1358    kml->listener.log_clear = kvm_log_clear;
1359    kml->listener.priority = 10;
1360
1361    memory_listener_register(&kml->listener, as);
1362
1363    for (i = 0; i < s->nr_as; ++i) {
1364        if (!s->as[i].as) {
1365            s->as[i].as = as;
1366            s->as[i].ml = kml;
1367            break;
1368        }
1369    }
1370}
1371
1372static MemoryListener kvm_io_listener = {
1373    .eventfd_add = kvm_io_ioeventfd_add,
1374    .eventfd_del = kvm_io_ioeventfd_del,
1375    .priority = 10,
1376};
1377
1378int kvm_set_irq(KVMState *s, int irq, int level)
1379{
1380    struct kvm_irq_level event;
1381    int ret;
1382
1383    assert(kvm_async_interrupts_enabled());
1384
1385    event.level = level;
1386    event.irq = irq;
1387    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1388    if (ret < 0) {
1389        perror("kvm_set_irq");
1390        abort();
1391    }
1392
1393    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1394}
1395
1396#ifdef KVM_CAP_IRQ_ROUTING
1397typedef struct KVMMSIRoute {
1398    struct kvm_irq_routing_entry kroute;
1399    QTAILQ_ENTRY(KVMMSIRoute) entry;
1400} KVMMSIRoute;
1401
1402static void set_gsi(KVMState *s, unsigned int gsi)
1403{
1404    set_bit(gsi, s->used_gsi_bitmap);
1405}
1406
1407static void clear_gsi(KVMState *s, unsigned int gsi)
1408{
1409    clear_bit(gsi, s->used_gsi_bitmap);
1410}
1411
1412void kvm_init_irq_routing(KVMState *s)
1413{
1414    int gsi_count, i;
1415
1416    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1417    if (gsi_count > 0) {
1418        /* Round up so we can search ints using ffs */
1419        s->used_gsi_bitmap = bitmap_new(gsi_count);
1420        s->gsi_count = gsi_count;
1421    }
1422
1423    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1424    s->nr_allocated_irq_routes = 0;
1425
1426    if (!kvm_direct_msi_allowed) {
1427        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1428            QTAILQ_INIT(&s->msi_hashtab[i]);
1429        }
1430    }
1431
1432    kvm_arch_init_irq_routing(s);
1433}
1434
1435void kvm_irqchip_commit_routes(KVMState *s)
1436{
1437    int ret;
1438
1439    if (kvm_gsi_direct_mapping()) {
1440        return;
1441    }
1442
1443    if (!kvm_gsi_routing_enabled()) {
1444        return;
1445    }
1446
1447    s->irq_routes->flags = 0;
1448    trace_kvm_irqchip_commit_routes();
1449    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1450    assert(ret == 0);
1451}
1452
1453static void kvm_add_routing_entry(KVMState *s,
1454                                  struct kvm_irq_routing_entry *entry)
1455{
1456    struct kvm_irq_routing_entry *new;
1457    int n, size;
1458
1459    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1460        n = s->nr_allocated_irq_routes * 2;
1461        if (n < 64) {
1462            n = 64;
1463        }
1464        size = sizeof(struct kvm_irq_routing);
1465        size += n * sizeof(*new);
1466        s->irq_routes = g_realloc(s->irq_routes, size);
1467        s->nr_allocated_irq_routes = n;
1468    }
1469    n = s->irq_routes->nr++;
1470    new = &s->irq_routes->entries[n];
1471
1472    *new = *entry;
1473
1474    set_gsi(s, entry->gsi);
1475}
1476
1477static int kvm_update_routing_entry(KVMState *s,
1478                                    struct kvm_irq_routing_entry *new_entry)
1479{
1480    struct kvm_irq_routing_entry *entry;
1481    int n;
1482
1483    for (n = 0; n < s->irq_routes->nr; n++) {
1484        entry = &s->irq_routes->entries[n];
1485        if (entry->gsi != new_entry->gsi) {
1486            continue;
1487        }
1488
1489        if(!memcmp(entry, new_entry, sizeof *entry)) {
1490            return 0;
1491        }
1492
1493        *entry = *new_entry;
1494
1495        return 0;
1496    }
1497
1498    return -ESRCH;
1499}
1500
1501void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1502{
1503    struct kvm_irq_routing_entry e = {};
1504
1505    assert(pin < s->gsi_count);
1506
1507    e.gsi = irq;
1508    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1509    e.flags = 0;
1510    e.u.irqchip.irqchip = irqchip;
1511    e.u.irqchip.pin = pin;
1512    kvm_add_routing_entry(s, &e);
1513}
1514
1515void kvm_irqchip_release_virq(KVMState *s, int virq)
1516{
1517    struct kvm_irq_routing_entry *e;
1518    int i;
1519
1520    if (kvm_gsi_direct_mapping()) {
1521        return;
1522    }
1523
1524    for (i = 0; i < s->irq_routes->nr; i++) {
1525        e = &s->irq_routes->entries[i];
1526        if (e->gsi == virq) {
1527            s->irq_routes->nr--;
1528            *e = s->irq_routes->entries[s->irq_routes->nr];
1529        }
1530    }
1531    clear_gsi(s, virq);
1532    kvm_arch_release_virq_post(virq);
1533    trace_kvm_irqchip_release_virq(virq);
1534}
1535
1536void kvm_irqchip_add_change_notifier(Notifier *n)
1537{
1538    notifier_list_add(&kvm_irqchip_change_notifiers, n);
1539}
1540
1541void kvm_irqchip_remove_change_notifier(Notifier *n)
1542{
1543    notifier_remove(n);
1544}
1545
1546void kvm_irqchip_change_notify(void)
1547{
1548    notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1549}
1550
1551static unsigned int kvm_hash_msi(uint32_t data)
1552{
1553    /* This is optimized for IA32 MSI layout. However, no other arch shall
1554     * repeat the mistake of not providing a direct MSI injection API. */
1555    return data & 0xff;
1556}
1557
1558static void kvm_flush_dynamic_msi_routes(KVMState *s)
1559{
1560    KVMMSIRoute *route, *next;
1561    unsigned int hash;
1562
1563    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1564        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1565            kvm_irqchip_release_virq(s, route->kroute.gsi);
1566            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1567            g_free(route);
1568        }
1569    }
1570}
1571
1572static int kvm_irqchip_get_virq(KVMState *s)
1573{
1574    int next_virq;
1575
1576    /*
1577     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1578     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1579     * number can succeed even though a new route entry cannot be added.
1580     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1581     */
1582    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1583        kvm_flush_dynamic_msi_routes(s);
1584    }
1585
1586    /* Return the lowest unused GSI in the bitmap */
1587    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1588    if (next_virq >= s->gsi_count) {
1589        return -ENOSPC;
1590    } else {
1591        return next_virq;
1592    }
1593}
1594
1595static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1596{
1597    unsigned int hash = kvm_hash_msi(msg.data);
1598    KVMMSIRoute *route;
1599
1600    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1601        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1602            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1603            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1604            return route;
1605        }
1606    }
1607    return NULL;
1608}
1609
1610int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1611{
1612    struct kvm_msi msi;
1613    KVMMSIRoute *route;
1614
1615    if (kvm_direct_msi_allowed) {
1616        msi.address_lo = (uint32_t)msg.address;
1617        msi.address_hi = msg.address >> 32;
1618        msi.data = le32_to_cpu(msg.data);
1619        msi.flags = 0;
1620        memset(msi.pad, 0, sizeof(msi.pad));
1621
1622        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1623    }
1624
1625    route = kvm_lookup_msi_route(s, msg);
1626    if (!route) {
1627        int virq;
1628
1629        virq = kvm_irqchip_get_virq(s);
1630        if (virq < 0) {
1631            return virq;
1632        }
1633
1634        route = g_malloc0(sizeof(KVMMSIRoute));
1635        route->kroute.gsi = virq;
1636        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1637        route->kroute.flags = 0;
1638        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1639        route->kroute.u.msi.address_hi = msg.address >> 32;
1640        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1641
1642        kvm_add_routing_entry(s, &route->kroute);
1643        kvm_irqchip_commit_routes(s);
1644
1645        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1646                           entry);
1647    }
1648
1649    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1650
1651    return kvm_set_irq(s, route->kroute.gsi, 1);
1652}
1653
1654int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1655{
1656    struct kvm_irq_routing_entry kroute = {};
1657    int virq;
1658    MSIMessage msg = {0, 0};
1659
1660    if (pci_available && dev) {
1661        msg = pci_get_msi_message(dev, vector);
1662    }
1663
1664    if (kvm_gsi_direct_mapping()) {
1665        return kvm_arch_msi_data_to_gsi(msg.data);
1666    }
1667
1668    if (!kvm_gsi_routing_enabled()) {
1669        return -ENOSYS;
1670    }
1671
1672    virq = kvm_irqchip_get_virq(s);
1673    if (virq < 0) {
1674        return virq;
1675    }
1676
1677    kroute.gsi = virq;
1678    kroute.type = KVM_IRQ_ROUTING_MSI;
1679    kroute.flags = 0;
1680    kroute.u.msi.address_lo = (uint32_t)msg.address;
1681    kroute.u.msi.address_hi = msg.address >> 32;
1682    kroute.u.msi.data = le32_to_cpu(msg.data);
1683    if (pci_available && kvm_msi_devid_required()) {
1684        kroute.flags = KVM_MSI_VALID_DEVID;
1685        kroute.u.msi.devid = pci_requester_id(dev);
1686    }
1687    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1688        kvm_irqchip_release_virq(s, virq);
1689        return -EINVAL;
1690    }
1691
1692    trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1693                                    vector, virq);
1694
1695    kvm_add_routing_entry(s, &kroute);
1696    kvm_arch_add_msi_route_post(&kroute, vector, dev);
1697    kvm_irqchip_commit_routes(s);
1698
1699    return virq;
1700}
1701
1702int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1703                                 PCIDevice *dev)
1704{
1705    struct kvm_irq_routing_entry kroute = {};
1706
1707    if (kvm_gsi_direct_mapping()) {
1708        return 0;
1709    }
1710
1711    if (!kvm_irqchip_in_kernel()) {
1712        return -ENOSYS;
1713    }
1714
1715    kroute.gsi = virq;
1716    kroute.type = KVM_IRQ_ROUTING_MSI;
1717    kroute.flags = 0;
1718    kroute.u.msi.address_lo = (uint32_t)msg.address;
1719    kroute.u.msi.address_hi = msg.address >> 32;
1720    kroute.u.msi.data = le32_to_cpu(msg.data);
1721    if (pci_available && kvm_msi_devid_required()) {
1722        kroute.flags = KVM_MSI_VALID_DEVID;
1723        kroute.u.msi.devid = pci_requester_id(dev);
1724    }
1725    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1726        return -EINVAL;
1727    }
1728
1729    trace_kvm_irqchip_update_msi_route(virq);
1730
1731    return kvm_update_routing_entry(s, &kroute);
1732}
1733
1734static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
1735                                    EventNotifier *resample, int virq,
1736                                    bool assign)
1737{
1738    int fd = event_notifier_get_fd(event);
1739    int rfd = resample ? event_notifier_get_fd(resample) : -1;
1740
1741    struct kvm_irqfd irqfd = {
1742        .fd = fd,
1743        .gsi = virq,
1744        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1745    };
1746
1747    if (rfd != -1) {
1748        assert(assign);
1749        if (kvm_irqchip_is_split()) {
1750            /*
1751             * When the slow irqchip (e.g. IOAPIC) is in the
1752             * userspace, KVM kernel resamplefd will not work because
1753             * the EOI of the interrupt will be delivered to userspace
1754             * instead, so the KVM kernel resamplefd kick will be
1755             * skipped.  The userspace here mimics what the kernel
1756             * provides with resamplefd, remember the resamplefd and
1757             * kick it when we receive EOI of this IRQ.
1758             *
1759             * This is hackery because IOAPIC is mostly bypassed
1760             * (except EOI broadcasts) when irqfd is used.  However
1761             * this can bring much performance back for split irqchip
1762             * with INTx IRQs (for VFIO, this gives 93% perf of the
1763             * full fast path, which is 46% perf boost comparing to
1764             * the INTx slow path).
1765             */
1766            kvm_resample_fd_insert(virq, resample);
1767        } else {
1768            irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1769            irqfd.resamplefd = rfd;
1770        }
1771    } else if (!assign) {
1772        if (kvm_irqchip_is_split()) {
1773            kvm_resample_fd_remove(virq);
1774        }
1775    }
1776
1777    if (!kvm_irqfds_enabled()) {
1778        return -ENOSYS;
1779    }
1780
1781    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1782}
1783
1784int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1785{
1786    struct kvm_irq_routing_entry kroute = {};
1787    int virq;
1788
1789    if (!kvm_gsi_routing_enabled()) {
1790        return -ENOSYS;
1791    }
1792
1793    virq = kvm_irqchip_get_virq(s);
1794    if (virq < 0) {
1795        return virq;
1796    }
1797
1798    kroute.gsi = virq;
1799    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1800    kroute.flags = 0;
1801    kroute.u.adapter.summary_addr = adapter->summary_addr;
1802    kroute.u.adapter.ind_addr = adapter->ind_addr;
1803    kroute.u.adapter.summary_offset = adapter->summary_offset;
1804    kroute.u.adapter.ind_offset = adapter->ind_offset;
1805    kroute.u.adapter.adapter_id = adapter->adapter_id;
1806
1807    kvm_add_routing_entry(s, &kroute);
1808
1809    return virq;
1810}
1811
1812int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1813{
1814    struct kvm_irq_routing_entry kroute = {};
1815    int virq;
1816
1817    if (!kvm_gsi_routing_enabled()) {
1818        return -ENOSYS;
1819    }
1820    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1821        return -ENOSYS;
1822    }
1823    virq = kvm_irqchip_get_virq(s);
1824    if (virq < 0) {
1825        return virq;
1826    }
1827
1828    kroute.gsi = virq;
1829    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1830    kroute.flags = 0;
1831    kroute.u.hv_sint.vcpu = vcpu;
1832    kroute.u.hv_sint.sint = sint;
1833
1834    kvm_add_routing_entry(s, &kroute);
1835    kvm_irqchip_commit_routes(s);
1836
1837    return virq;
1838}
1839
1840#else /* !KVM_CAP_IRQ_ROUTING */
1841
1842void kvm_init_irq_routing(KVMState *s)
1843{
1844}
1845
1846void kvm_irqchip_release_virq(KVMState *s, int virq)
1847{
1848}
1849
1850int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1851{
1852    abort();
1853}
1854
1855int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1856{
1857    return -ENOSYS;
1858}
1859
1860int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1861{
1862    return -ENOSYS;
1863}
1864
1865int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1866{
1867    return -ENOSYS;
1868}
1869
1870static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
1871                                    EventNotifier *resample, int virq,
1872                                    bool assign)
1873{
1874    abort();
1875}
1876
1877int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1878{
1879    return -ENOSYS;
1880}
1881#endif /* !KVM_CAP_IRQ_ROUTING */
1882
1883int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1884                                       EventNotifier *rn, int virq)
1885{
1886    return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
1887}
1888
1889int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1890                                          int virq)
1891{
1892    return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
1893}
1894
1895int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1896                                   EventNotifier *rn, qemu_irq irq)
1897{
1898    gpointer key, gsi;
1899    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1900
1901    if (!found) {
1902        return -ENXIO;
1903    }
1904    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1905}
1906
1907int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1908                                      qemu_irq irq)
1909{
1910    gpointer key, gsi;
1911    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1912
1913    if (!found) {
1914        return -ENXIO;
1915    }
1916    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1917}
1918
1919void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1920{
1921    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1922}
1923
1924static void kvm_irqchip_create(KVMState *s)
1925{
1926    int ret;
1927
1928    assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
1929    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1930        ;
1931    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1932        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1933        if (ret < 0) {
1934            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1935            exit(1);
1936        }
1937    } else {
1938        return;
1939    }
1940
1941    /* First probe and see if there's a arch-specific hook to create the
1942     * in-kernel irqchip for us */
1943    ret = kvm_arch_irqchip_create(s);
1944    if (ret == 0) {
1945        if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
1946            perror("Split IRQ chip mode not supported.");
1947            exit(1);
1948        } else {
1949            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1950        }
1951    }
1952    if (ret < 0) {
1953        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1954        exit(1);
1955    }
1956
1957    kvm_kernel_irqchip = true;
1958    /* If we have an in-kernel IRQ chip then we must have asynchronous
1959     * interrupt delivery (though the reverse is not necessarily true)
1960     */
1961    kvm_async_interrupts_allowed = true;
1962    kvm_halt_in_kernel_allowed = true;
1963
1964    kvm_init_irq_routing(s);
1965
1966    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1967}
1968
1969/* Find number of supported CPUs using the recommended
1970 * procedure from the kernel API documentation to cope with
1971 * older kernels that may be missing capabilities.
1972 */
1973static int kvm_recommended_vcpus(KVMState *s)
1974{
1975    int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1976    return (ret) ? ret : 4;
1977}
1978
1979static int kvm_max_vcpus(KVMState *s)
1980{
1981    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1982    return (ret) ? ret : kvm_recommended_vcpus(s);
1983}
1984
1985static int kvm_max_vcpu_id(KVMState *s)
1986{
1987    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1988    return (ret) ? ret : kvm_max_vcpus(s);
1989}
1990
1991bool kvm_vcpu_id_is_valid(int vcpu_id)
1992{
1993    KVMState *s = KVM_STATE(current_accel());
1994    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1995}
1996
1997static int kvm_init(MachineState *ms)
1998{
1999    MachineClass *mc = MACHINE_GET_CLASS(ms);
2000    static const char upgrade_note[] =
2001        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2002        "(see http://sourceforge.net/projects/kvm).\n";
2003    struct {
2004        const char *name;
2005        int num;
2006    } num_cpus[] = {
2007        { "SMP",          ms->smp.cpus },
2008        { "hotpluggable", ms->smp.max_cpus },
2009        { NULL, }
2010    }, *nc = num_cpus;
2011    int soft_vcpus_limit, hard_vcpus_limit;
2012    KVMState *s;
2013    const KVMCapabilityInfo *missing_cap;
2014    int ret;
2015    int type = 0;
2016    const char *kvm_type;
2017    uint64_t dirty_log_manual_caps;
2018
2019    s = KVM_STATE(ms->accelerator);
2020
2021    /*
2022     * On systems where the kernel can support different base page
2023     * sizes, host page size may be different from TARGET_PAGE_SIZE,
2024     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
2025     * page size for the system though.
2026     */
2027    assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
2028
2029    s->sigmask_len = 8;
2030
2031#ifdef KVM_CAP_SET_GUEST_DEBUG
2032    QTAILQ_INIT(&s->kvm_sw_breakpoints);
2033#endif
2034    QLIST_INIT(&s->kvm_parked_vcpus);
2035    s->vmfd = -1;
2036    s->fd = qemu_open_old("/dev/kvm", O_RDWR);
2037    if (s->fd == -1) {
2038        fprintf(stderr, "Could not access KVM kernel module: %m\n");
2039        ret = -errno;
2040        goto err;
2041    }
2042
2043    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2044    if (ret < KVM_API_VERSION) {
2045        if (ret >= 0) {
2046            ret = -EINVAL;
2047        }
2048        fprintf(stderr, "kvm version too old\n");
2049        goto err;
2050    }
2051
2052    if (ret > KVM_API_VERSION) {
2053        ret = -EINVAL;
2054        fprintf(stderr, "kvm version not supported\n");
2055        goto err;
2056    }
2057
2058    kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2059    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2060
2061    /* If unspecified, use the default value */
2062    if (!s->nr_slots) {
2063        s->nr_slots = 32;
2064    }
2065
2066    s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2067    if (s->nr_as <= 1) {
2068        s->nr_as = 1;
2069    }
2070    s->as = g_new0(struct KVMAs, s->nr_as);
2071
2072    kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
2073    if (mc->kvm_type) {
2074        type = mc->kvm_type(ms, kvm_type);
2075    } else if (kvm_type) {
2076        ret = -EINVAL;
2077        fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
2078        goto err;
2079    }
2080
2081    do {
2082        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2083    } while (ret == -EINTR);
2084
2085    if (ret < 0) {
2086        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2087                strerror(-ret));
2088
2089#ifdef TARGET_S390X
2090        if (ret == -EINVAL) {
2091            fprintf(stderr,
2092                    "Host kernel setup problem detected. Please verify:\n");
2093            fprintf(stderr, "- for kernels supporting the switch_amode or"
2094                    " user_mode parameters, whether\n");
2095            fprintf(stderr,
2096                    "  user space is running in primary address space\n");
2097            fprintf(stderr,
2098                    "- for kernels supporting the vm.allocate_pgste sysctl, "
2099                    "whether it is enabled\n");
2100        }
2101#endif
2102        goto err;
2103    }
2104
2105    s->vmfd = ret;
2106
2107    /* check the vcpu limits */
2108    soft_vcpus_limit = kvm_recommended_vcpus(s);
2109    hard_vcpus_limit = kvm_max_vcpus(s);
2110
2111    while (nc->name) {
2112        if (nc->num > soft_vcpus_limit) {
2113            warn_report("Number of %s cpus requested (%d) exceeds "
2114                        "the recommended cpus supported by KVM (%d)",
2115                        nc->name, nc->num, soft_vcpus_limit);
2116
2117            if (nc->num > hard_vcpus_limit) {
2118                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2119                        "the maximum cpus supported by KVM (%d)\n",
2120                        nc->name, nc->num, hard_vcpus_limit);
2121                exit(1);
2122            }
2123        }
2124        nc++;
2125    }
2126
2127    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2128    if (!missing_cap) {
2129        missing_cap =
2130            kvm_check_extension_list(s, kvm_arch_required_capabilities);
2131    }
2132    if (missing_cap) {
2133        ret = -EINVAL;
2134        fprintf(stderr, "kvm does not support %s\n%s",
2135                missing_cap->name, upgrade_note);
2136        goto err;
2137    }
2138
2139    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2140    s->coalesced_pio = s->coalesced_mmio &&
2141                       kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2142
2143    dirty_log_manual_caps =
2144        kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2145    dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2146                              KVM_DIRTY_LOG_INITIALLY_SET);
2147    s->manual_dirty_log_protect = dirty_log_manual_caps;
2148    if (dirty_log_manual_caps) {
2149        ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2150                                   dirty_log_manual_caps);
2151        if (ret) {
2152            warn_report("Trying to enable capability %"PRIu64" of "
2153                        "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2154                        "Falling back to the legacy mode. ",
2155                        dirty_log_manual_caps);
2156            s->manual_dirty_log_protect = 0;
2157        }
2158    }
2159
2160#ifdef KVM_CAP_VCPU_EVENTS
2161    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2162#endif
2163
2164    s->robust_singlestep =
2165        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2166
2167#ifdef KVM_CAP_DEBUGREGS
2168    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2169#endif
2170
2171    s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2172
2173#ifdef KVM_CAP_IRQ_ROUTING
2174    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2175#endif
2176
2177    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2178
2179    s->irq_set_ioctl = KVM_IRQ_LINE;
2180    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2181        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2182    }
2183
2184    kvm_readonly_mem_allowed =
2185        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2186
2187    kvm_eventfds_allowed =
2188        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2189
2190    kvm_irqfds_allowed =
2191        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2192
2193    kvm_resamplefds_allowed =
2194        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2195
2196    kvm_vm_attributes_allowed =
2197        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2198
2199    kvm_ioeventfd_any_length_allowed =
2200        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2201
2202    kvm_state = s;
2203
2204    /*
2205     * if memory encryption object is specified then initialize the memory
2206     * encryption context.
2207     */
2208    if (ms->memory_encryption) {
2209        kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
2210        if (!kvm_state->memcrypt_handle) {
2211            ret = -1;
2212            goto err;
2213        }
2214
2215        kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
2216    }
2217
2218    ret = kvm_arch_init(ms, s);
2219    if (ret < 0) {
2220        goto err;
2221    }
2222
2223    if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2224        s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2225    }
2226
2227    qemu_register_reset(kvm_unpoison_all, NULL);
2228
2229    if (s->kernel_irqchip_allowed) {
2230        kvm_irqchip_create(s);
2231    }
2232
2233    if (kvm_eventfds_allowed) {
2234        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2235        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2236    }
2237    s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2238    s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2239
2240    kvm_memory_listener_register(s, &s->memory_listener,
2241                                 &address_space_memory, 0);
2242    if (kvm_eventfds_allowed) {
2243        memory_listener_register(&kvm_io_listener,
2244                                 &address_space_io);
2245    }
2246    memory_listener_register(&kvm_coalesced_pio_listener,
2247                             &address_space_io);
2248
2249    s->many_ioeventfds = kvm_check_many_ioeventfds();
2250
2251    s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2252    if (!s->sync_mmu) {
2253        ret = ram_block_discard_disable(true);
2254        assert(!ret);
2255    }
2256
2257    cpus_register_accel(&kvm_cpus);
2258    return 0;
2259
2260err:
2261    assert(ret < 0);
2262    if (s->vmfd >= 0) {
2263        close(s->vmfd);
2264    }
2265    if (s->fd != -1) {
2266        close(s->fd);
2267    }
2268    g_free(s->memory_listener.slots);
2269
2270    return ret;
2271}
2272
2273void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2274{
2275    s->sigmask_len = sigmask_len;
2276}
2277
2278static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2279                          int size, uint32_t count)
2280{
2281    int i;
2282    uint8_t *ptr = data;
2283
2284    for (i = 0; i < count; i++) {
2285        address_space_rw(&address_space_io, port, attrs,
2286                         ptr, size,
2287                         direction == KVM_EXIT_IO_OUT);
2288        ptr += size;
2289    }
2290}
2291
2292static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2293{
2294    fprintf(stderr, "KVM internal error. Suberror: %d\n",
2295            run->internal.suberror);
2296
2297    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2298        int i;
2299
2300        for (i = 0; i < run->internal.ndata; ++i) {
2301            fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
2302                    i, (uint64_t)run->internal.data[i]);
2303        }
2304    }
2305    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2306        fprintf(stderr, "emulation failure\n");
2307        if (!kvm_arch_stop_on_emulation_error(cpu)) {
2308            cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2309            return EXCP_INTERRUPT;
2310        }
2311    }
2312    /* FIXME: Should trigger a qmp message to let management know
2313     * something went wrong.
2314     */
2315    return -1;
2316}
2317
2318void kvm_flush_coalesced_mmio_buffer(void)
2319{
2320    KVMState *s = kvm_state;
2321
2322    if (s->coalesced_flush_in_progress) {
2323        return;
2324    }
2325
2326    s->coalesced_flush_in_progress = true;
2327
2328    if (s->coalesced_mmio_ring) {
2329        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2330        while (ring->first != ring->last) {
2331            struct kvm_coalesced_mmio *ent;
2332
2333            ent = &ring->coalesced_mmio[ring->first];
2334
2335            if (ent->pio == 1) {
2336                address_space_write(&address_space_io, ent->phys_addr,
2337                                    MEMTXATTRS_UNSPECIFIED, ent->data,
2338                                    ent->len);
2339            } else {
2340                cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2341            }
2342            smp_wmb();
2343            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2344        }
2345    }
2346
2347    s->coalesced_flush_in_progress = false;
2348}
2349
2350static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2351{
2352    if (!cpu->vcpu_dirty) {
2353        kvm_arch_get_registers(cpu);
2354        cpu->vcpu_dirty = true;
2355    }
2356}
2357
2358void kvm_cpu_synchronize_state(CPUState *cpu)
2359{
2360    if (!cpu->vcpu_dirty) {
2361        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2362    }
2363}
2364
2365static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2366{
2367    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2368    cpu->vcpu_dirty = false;
2369}
2370
2371void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2372{
2373    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2374}
2375
2376static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2377{
2378    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2379    cpu->vcpu_dirty = false;
2380}
2381
2382void kvm_cpu_synchronize_post_init(CPUState *cpu)
2383{
2384    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2385}
2386
2387static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2388{
2389    cpu->vcpu_dirty = true;
2390}
2391
2392void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2393{
2394    run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2395}
2396
2397#ifdef KVM_HAVE_MCE_INJECTION
2398static __thread void *pending_sigbus_addr;
2399static __thread int pending_sigbus_code;
2400static __thread bool have_sigbus_pending;
2401#endif
2402
2403static void kvm_cpu_kick(CPUState *cpu)
2404{
2405    qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2406}
2407
2408static void kvm_cpu_kick_self(void)
2409{
2410    if (kvm_immediate_exit) {
2411        kvm_cpu_kick(current_cpu);
2412    } else {
2413        qemu_cpu_kick_self();
2414    }
2415}
2416
2417static void kvm_eat_signals(CPUState *cpu)
2418{
2419    struct timespec ts = { 0, 0 };
2420    siginfo_t siginfo;
2421    sigset_t waitset;
2422    sigset_t chkset;
2423    int r;
2424
2425    if (kvm_immediate_exit) {
2426        qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2427        /* Write kvm_run->immediate_exit before the cpu->exit_request
2428         * write in kvm_cpu_exec.
2429         */
2430        smp_wmb();
2431        return;
2432    }
2433
2434    sigemptyset(&waitset);
2435    sigaddset(&waitset, SIG_IPI);
2436
2437    do {
2438        r = sigtimedwait(&waitset, &siginfo, &ts);
2439        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2440            perror("sigtimedwait");
2441            exit(1);
2442        }
2443
2444        r = sigpending(&chkset);
2445        if (r == -1) {
2446            perror("sigpending");
2447            exit(1);
2448        }
2449    } while (sigismember(&chkset, SIG_IPI));
2450}
2451
2452int kvm_cpu_exec(CPUState *cpu)
2453{
2454    struct kvm_run *run = cpu->kvm_run;
2455    int ret, run_ret;
2456
2457    DPRINTF("kvm_cpu_exec()\n");
2458
2459    if (kvm_arch_process_async_events(cpu)) {
2460        qatomic_set(&cpu->exit_request, 0);
2461        return EXCP_HLT;
2462    }
2463
2464    qemu_mutex_unlock_iothread();
2465    cpu_exec_start(cpu);
2466
2467    do {
2468        MemTxAttrs attrs;
2469
2470        if (cpu->vcpu_dirty) {
2471            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2472            cpu->vcpu_dirty = false;
2473        }
2474
2475        kvm_arch_pre_run(cpu, run);
2476        if (qatomic_read(&cpu->exit_request)) {
2477            DPRINTF("interrupt exit requested\n");
2478            /*
2479             * KVM requires us to reenter the kernel after IO exits to complete
2480             * instruction emulation. This self-signal will ensure that we
2481             * leave ASAP again.
2482             */
2483            kvm_cpu_kick_self();
2484        }
2485
2486        /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2487         * Matching barrier in kvm_eat_signals.
2488         */
2489        smp_rmb();
2490
2491        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2492
2493        attrs = kvm_arch_post_run(cpu, run);
2494
2495#ifdef KVM_HAVE_MCE_INJECTION
2496        if (unlikely(have_sigbus_pending)) {
2497            qemu_mutex_lock_iothread();
2498            kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2499                                    pending_sigbus_addr);
2500            have_sigbus_pending = false;
2501            qemu_mutex_unlock_iothread();
2502        }
2503#endif
2504
2505        if (run_ret < 0) {
2506            if (run_ret == -EINTR || run_ret == -EAGAIN) {
2507                DPRINTF("io window exit\n");
2508                kvm_eat_signals(cpu);
2509                ret = EXCP_INTERRUPT;
2510                break;
2511            }
2512            fprintf(stderr, "error: kvm run failed %s\n",
2513                    strerror(-run_ret));
2514#ifdef TARGET_PPC
2515            if (run_ret == -EBUSY) {
2516                fprintf(stderr,
2517                        "This is probably because your SMT is enabled.\n"
2518                        "VCPU can only run on primary threads with all "
2519                        "secondary threads offline.\n");
2520            }
2521#endif
2522            ret = -1;
2523            break;
2524        }
2525
2526        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2527        switch (run->exit_reason) {
2528        case KVM_EXIT_IO:
2529            DPRINTF("handle_io\n");
2530            /* Called outside BQL */
2531            kvm_handle_io(run->io.port, attrs,
2532                          (uint8_t *)run + run->io.data_offset,
2533                          run->io.direction,
2534                          run->io.size,
2535                          run->io.count);
2536            ret = 0;
2537            break;
2538        case KVM_EXIT_MMIO:
2539            DPRINTF("handle_mmio\n");
2540            /* Called outside BQL */
2541            address_space_rw(&address_space_memory,
2542                             run->mmio.phys_addr, attrs,
2543                             run->mmio.data,
2544                             run->mmio.len,
2545                             run->mmio.is_write);
2546            ret = 0;
2547            break;
2548        case KVM_EXIT_IRQ_WINDOW_OPEN:
2549            DPRINTF("irq_window_open\n");
2550            ret = EXCP_INTERRUPT;
2551            break;
2552        case KVM_EXIT_SHUTDOWN:
2553            DPRINTF("shutdown\n");
2554            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2555            ret = EXCP_INTERRUPT;
2556            break;
2557        case KVM_EXIT_UNKNOWN:
2558            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2559                    (uint64_t)run->hw.hardware_exit_reason);
2560            ret = -1;
2561            break;
2562        case KVM_EXIT_INTERNAL_ERROR:
2563            ret = kvm_handle_internal_error(cpu, run);
2564            break;
2565        case KVM_EXIT_SYSTEM_EVENT:
2566            switch (run->system_event.type) {
2567            case KVM_SYSTEM_EVENT_SHUTDOWN:
2568                qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2569                ret = EXCP_INTERRUPT;
2570                break;
2571            case KVM_SYSTEM_EVENT_RESET:
2572                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2573                ret = EXCP_INTERRUPT;
2574                break;
2575            case KVM_SYSTEM_EVENT_CRASH:
2576                kvm_cpu_synchronize_state(cpu);
2577                qemu_mutex_lock_iothread();
2578                qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2579                qemu_mutex_unlock_iothread();
2580                ret = 0;
2581                break;
2582            default:
2583                DPRINTF("kvm_arch_handle_exit\n");
2584                ret = kvm_arch_handle_exit(cpu, run);
2585                break;
2586            }
2587            break;
2588        default:
2589            DPRINTF("kvm_arch_handle_exit\n");
2590            ret = kvm_arch_handle_exit(cpu, run);
2591            break;
2592        }
2593    } while (ret == 0);
2594
2595    cpu_exec_end(cpu);
2596    qemu_mutex_lock_iothread();
2597
2598    if (ret < 0) {
2599        cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2600        vm_stop(RUN_STATE_INTERNAL_ERROR);
2601    }
2602
2603    qatomic_set(&cpu->exit_request, 0);
2604    return ret;
2605}
2606
2607int kvm_ioctl(KVMState *s, int type, ...)
2608{
2609    int ret;
2610    void *arg;
2611    va_list ap;
2612
2613    va_start(ap, type);
2614    arg = va_arg(ap, void *);
2615    va_end(ap);
2616
2617    trace_kvm_ioctl(type, arg);
2618    ret = ioctl(s->fd, type, arg);
2619    if (ret == -1) {
2620        ret = -errno;
2621    }
2622    return ret;
2623}
2624
2625int kvm_vm_ioctl(KVMState *s, int type, ...)
2626{
2627    int ret;
2628    void *arg;
2629    va_list ap;
2630
2631    va_start(ap, type);
2632    arg = va_arg(ap, void *);
2633    va_end(ap);
2634
2635    trace_kvm_vm_ioctl(type, arg);
2636    ret = ioctl(s->vmfd, type, arg);
2637    if (ret == -1) {
2638        ret = -errno;
2639    }
2640    return ret;
2641}
2642
2643int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2644{
2645    int ret;
2646    void *arg;
2647    va_list ap;
2648
2649    va_start(ap, type);
2650    arg = va_arg(ap, void *);
2651    va_end(ap);
2652
2653    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2654    ret = ioctl(cpu->kvm_fd, type, arg);
2655    if (ret == -1) {
2656        ret = -errno;
2657    }
2658    return ret;
2659}
2660
2661int kvm_device_ioctl(int fd, int type, ...)
2662{
2663    int ret;
2664    void *arg;
2665    va_list ap;
2666
2667    va_start(ap, type);
2668    arg = va_arg(ap, void *);
2669    va_end(ap);
2670
2671    trace_kvm_device_ioctl(fd, type, arg);
2672    ret = ioctl(fd, type, arg);
2673    if (ret == -1) {
2674        ret = -errno;
2675    }
2676    return ret;
2677}
2678
2679int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2680{
2681    int ret;
2682    struct kvm_device_attr attribute = {
2683        .group = group,
2684        .attr = attr,
2685    };
2686
2687    if (!kvm_vm_attributes_allowed) {
2688        return 0;
2689    }
2690
2691    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2692    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2693    return ret ? 0 : 1;
2694}
2695
2696int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2697{
2698    struct kvm_device_attr attribute = {
2699        .group = group,
2700        .attr = attr,
2701        .flags = 0,
2702    };
2703
2704    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2705}
2706
2707int kvm_device_access(int fd, int group, uint64_t attr,
2708                      void *val, bool write, Error **errp)
2709{
2710    struct kvm_device_attr kvmattr;
2711    int err;
2712
2713    kvmattr.flags = 0;
2714    kvmattr.group = group;
2715    kvmattr.attr = attr;
2716    kvmattr.addr = (uintptr_t)val;
2717
2718    err = kvm_device_ioctl(fd,
2719                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2720                           &kvmattr);
2721    if (err < 0) {
2722        error_setg_errno(errp, -err,
2723                         "KVM_%s_DEVICE_ATTR failed: Group %d "
2724                         "attr 0x%016" PRIx64,
2725                         write ? "SET" : "GET", group, attr);
2726    }
2727    return err;
2728}
2729
2730bool kvm_has_sync_mmu(void)
2731{
2732    return kvm_state->sync_mmu;
2733}
2734
2735int kvm_has_vcpu_events(void)
2736{
2737    return kvm_state->vcpu_events;
2738}
2739
2740int kvm_has_robust_singlestep(void)
2741{
2742    return kvm_state->robust_singlestep;
2743}
2744
2745int kvm_has_debugregs(void)
2746{
2747    return kvm_state->debugregs;
2748}
2749
2750int kvm_max_nested_state_length(void)
2751{
2752    return kvm_state->max_nested_state_len;
2753}
2754
2755int kvm_has_many_ioeventfds(void)
2756{
2757    if (!kvm_enabled()) {
2758        return 0;
2759    }
2760    return kvm_state->many_ioeventfds;
2761}
2762
2763int kvm_has_gsi_routing(void)
2764{
2765#ifdef KVM_CAP_IRQ_ROUTING
2766    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2767#else
2768    return false;
2769#endif
2770}
2771
2772int kvm_has_intx_set_mask(void)
2773{
2774    return kvm_state->intx_set_mask;
2775}
2776
2777bool kvm_arm_supports_user_irq(void)
2778{
2779    return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2780}
2781
2782#ifdef KVM_CAP_SET_GUEST_DEBUG
2783struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2784                                                 target_ulong pc)
2785{
2786    struct kvm_sw_breakpoint *bp;
2787
2788    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2789        if (bp->pc == pc) {
2790            return bp;
2791        }
2792    }
2793    return NULL;
2794}
2795
2796int kvm_sw_breakpoints_active(CPUState *cpu)
2797{
2798    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2799}
2800
2801struct kvm_set_guest_debug_data {
2802    struct kvm_guest_debug dbg;
2803    int err;
2804};
2805
2806static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2807{
2808    struct kvm_set_guest_debug_data *dbg_data =
2809        (struct kvm_set_guest_debug_data *) data.host_ptr;
2810
2811    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2812                                   &dbg_data->dbg);
2813}
2814
2815int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2816{
2817    struct kvm_set_guest_debug_data data;
2818
2819    data.dbg.control = reinject_trap;
2820
2821    if (cpu->singlestep_enabled) {
2822        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2823    }
2824    kvm_arch_update_guest_debug(cpu, &data.dbg);
2825
2826    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2827               RUN_ON_CPU_HOST_PTR(&data));
2828    return data.err;
2829}
2830
2831int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2832                          target_ulong len, int type)
2833{
2834    struct kvm_sw_breakpoint *bp;
2835    int err;
2836
2837    if (type == GDB_BREAKPOINT_SW) {
2838        bp = kvm_find_sw_breakpoint(cpu, addr);
2839        if (bp) {
2840            bp->use_count++;
2841            return 0;
2842        }
2843
2844        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2845        bp->pc = addr;
2846        bp->use_count = 1;
2847        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2848        if (err) {
2849            g_free(bp);
2850            return err;
2851        }
2852
2853        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2854    } else {
2855        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2856        if (err) {
2857            return err;
2858        }
2859    }
2860
2861    CPU_FOREACH(cpu) {
2862        err = kvm_update_guest_debug(cpu, 0);
2863        if (err) {
2864            return err;
2865        }
2866    }
2867    return 0;
2868}
2869
2870int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2871                          target_ulong len, int type)
2872{
2873    struct kvm_sw_breakpoint *bp;
2874    int err;
2875
2876    if (type == GDB_BREAKPOINT_SW) {
2877        bp = kvm_find_sw_breakpoint(cpu, addr);
2878        if (!bp) {
2879            return -ENOENT;
2880        }
2881
2882        if (bp->use_count > 1) {
2883            bp->use_count--;
2884            return 0;
2885        }
2886
2887        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2888        if (err) {
2889            return err;
2890        }
2891
2892        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2893        g_free(bp);
2894    } else {
2895        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2896        if (err) {
2897            return err;
2898        }
2899    }
2900
2901    CPU_FOREACH(cpu) {
2902        err = kvm_update_guest_debug(cpu, 0);
2903        if (err) {
2904            return err;
2905        }
2906    }
2907    return 0;
2908}
2909
2910void kvm_remove_all_breakpoints(CPUState *cpu)
2911{
2912    struct kvm_sw_breakpoint *bp, *next;
2913    KVMState *s = cpu->kvm_state;
2914    CPUState *tmpcpu;
2915
2916    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2917        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2918            /* Try harder to find a CPU that currently sees the breakpoint. */
2919            CPU_FOREACH(tmpcpu) {
2920                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2921                    break;
2922                }
2923            }
2924        }
2925        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2926        g_free(bp);
2927    }
2928    kvm_arch_remove_all_hw_breakpoints();
2929
2930    CPU_FOREACH(cpu) {
2931        kvm_update_guest_debug(cpu, 0);
2932    }
2933}
2934
2935#else /* !KVM_CAP_SET_GUEST_DEBUG */
2936
2937int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2938{
2939    return -EINVAL;
2940}
2941
2942int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2943                          target_ulong len, int type)
2944{
2945    return -EINVAL;
2946}
2947
2948int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2949                          target_ulong len, int type)
2950{
2951    return -EINVAL;
2952}
2953
2954void kvm_remove_all_breakpoints(CPUState *cpu)
2955{
2956}
2957#endif /* !KVM_CAP_SET_GUEST_DEBUG */
2958
2959static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2960{
2961    KVMState *s = kvm_state;
2962    struct kvm_signal_mask *sigmask;
2963    int r;
2964
2965    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2966
2967    sigmask->len = s->sigmask_len;
2968    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2969    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2970    g_free(sigmask);
2971
2972    return r;
2973}
2974
2975static void kvm_ipi_signal(int sig)
2976{
2977    if (current_cpu) {
2978        assert(kvm_immediate_exit);
2979        kvm_cpu_kick(current_cpu);
2980    }
2981}
2982
2983void kvm_init_cpu_signals(CPUState *cpu)
2984{
2985    int r;
2986    sigset_t set;
2987    struct sigaction sigact;
2988
2989    memset(&sigact, 0, sizeof(sigact));
2990    sigact.sa_handler = kvm_ipi_signal;
2991    sigaction(SIG_IPI, &sigact, NULL);
2992
2993    pthread_sigmask(SIG_BLOCK, NULL, &set);
2994#if defined KVM_HAVE_MCE_INJECTION
2995    sigdelset(&set, SIGBUS);
2996    pthread_sigmask(SIG_SETMASK, &set, NULL);
2997#endif
2998    sigdelset(&set, SIG_IPI);
2999    if (kvm_immediate_exit) {
3000        r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3001    } else {
3002        r = kvm_set_signal_mask(cpu, &set);
3003    }
3004    if (r) {
3005        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3006        exit(1);
3007    }
3008}
3009
3010/* Called asynchronously in VCPU thread.  */
3011int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3012{
3013#ifdef KVM_HAVE_MCE_INJECTION
3014    if (have_sigbus_pending) {
3015        return 1;
3016    }
3017    have_sigbus_pending = true;
3018    pending_sigbus_addr = addr;
3019    pending_sigbus_code = code;
3020    qatomic_set(&cpu->exit_request, 1);
3021    return 0;
3022#else
3023    return 1;
3024#endif
3025}
3026
3027/* Called synchronously (via signalfd) in main thread.  */
3028int kvm_on_sigbus(int code, void *addr)
3029{
3030#ifdef KVM_HAVE_MCE_INJECTION
3031    /* Action required MCE kills the process if SIGBUS is blocked.  Because
3032     * that's what happens in the I/O thread, where we handle MCE via signalfd,
3033     * we can only get action optional here.
3034     */
3035    assert(code != BUS_MCEERR_AR);
3036    kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3037    return 0;
3038#else
3039    return 1;
3040#endif
3041}
3042
3043int kvm_create_device(KVMState *s, uint64_t type, bool test)
3044{
3045    int ret;
3046    struct kvm_create_device create_dev;
3047
3048    create_dev.type = type;
3049    create_dev.fd = -1;
3050    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3051
3052    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3053        return -ENOTSUP;
3054    }
3055
3056    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3057    if (ret) {
3058        return ret;
3059    }
3060
3061    return test ? 0 : create_dev.fd;
3062}
3063
3064bool kvm_device_supported(int vmfd, uint64_t type)
3065{
3066    struct kvm_create_device create_dev = {
3067        .type = type,
3068        .fd = -1,
3069        .flags = KVM_CREATE_DEVICE_TEST,
3070    };
3071
3072    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3073        return false;
3074    }
3075
3076    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3077}
3078
3079int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3080{
3081    struct kvm_one_reg reg;
3082    int r;
3083
3084    reg.id = id;
3085    reg.addr = (uintptr_t) source;
3086    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3087    if (r) {
3088        trace_kvm_failed_reg_set(id, strerror(-r));
3089    }
3090    return r;
3091}
3092
3093int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3094{
3095    struct kvm_one_reg reg;
3096    int r;
3097
3098    reg.id = id;
3099    reg.addr = (uintptr_t) target;
3100    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3101    if (r) {
3102        trace_kvm_failed_reg_get(id, strerror(-r));
3103    }
3104    return r;
3105}
3106
3107static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3108                                 hwaddr start_addr, hwaddr size)
3109{
3110    KVMState *kvm = KVM_STATE(ms->accelerator);
3111    int i;
3112
3113    for (i = 0; i < kvm->nr_as; ++i) {
3114        if (kvm->as[i].as == as && kvm->as[i].ml) {
3115            size = MIN(kvm_max_slot_size, size);
3116            return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3117                                                    start_addr, size);
3118        }
3119    }
3120
3121    return false;
3122}
3123
3124static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3125                                   const char *name, void *opaque,
3126                                   Error **errp)
3127{
3128    KVMState *s = KVM_STATE(obj);
3129    int64_t value = s->kvm_shadow_mem;
3130
3131    visit_type_int(v, name, &value, errp);
3132}
3133
3134static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3135                                   const char *name, void *opaque,
3136                                   Error **errp)
3137{
3138    KVMState *s = KVM_STATE(obj);
3139    int64_t value;
3140
3141    if (!visit_type_int(v, name, &value, errp)) {
3142        return;
3143    }
3144
3145    s->kvm_shadow_mem = value;
3146}
3147
3148static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3149                                   const char *name, void *opaque,
3150                                   Error **errp)
3151{
3152    KVMState *s = KVM_STATE(obj);
3153    OnOffSplit mode;
3154
3155    if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3156        return;
3157    }
3158    switch (mode) {
3159    case ON_OFF_SPLIT_ON:
3160        s->kernel_irqchip_allowed = true;
3161        s->kernel_irqchip_required = true;
3162        s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3163        break;
3164    case ON_OFF_SPLIT_OFF:
3165        s->kernel_irqchip_allowed = false;
3166        s->kernel_irqchip_required = false;
3167        s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3168        break;
3169    case ON_OFF_SPLIT_SPLIT:
3170        s->kernel_irqchip_allowed = true;
3171        s->kernel_irqchip_required = true;
3172        s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3173        break;
3174    default:
3175        /* The value was checked in visit_type_OnOffSplit() above. If
3176         * we get here, then something is wrong in QEMU.
3177         */
3178        abort();
3179    }
3180}
3181
3182bool kvm_kernel_irqchip_allowed(void)
3183{
3184    return kvm_state->kernel_irqchip_allowed;
3185}
3186
3187bool kvm_kernel_irqchip_required(void)
3188{
3189    return kvm_state->kernel_irqchip_required;
3190}
3191
3192bool kvm_kernel_irqchip_split(void)
3193{
3194    return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3195}
3196
3197static void kvm_accel_instance_init(Object *obj)
3198{
3199    KVMState *s = KVM_STATE(obj);
3200
3201    s->kvm_shadow_mem = -1;
3202    s->kernel_irqchip_allowed = true;
3203    s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3204}
3205
3206static void kvm_accel_class_init(ObjectClass *oc, void *data)
3207{
3208    AccelClass *ac = ACCEL_CLASS(oc);
3209    ac->name = "KVM";
3210    ac->init_machine = kvm_init;
3211    ac->has_memory = kvm_accel_has_memory;
3212    ac->allowed = &kvm_allowed;
3213
3214    object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3215        NULL, kvm_set_kernel_irqchip,
3216        NULL, NULL);
3217    object_class_property_set_description(oc, "kernel-irqchip",
3218        "Configure KVM in-kernel irqchip");
3219
3220    object_class_property_add(oc, "kvm-shadow-mem", "int",
3221        kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3222        NULL, NULL);
3223    object_class_property_set_description(oc, "kvm-shadow-mem",
3224        "KVM shadow MMU size");
3225}
3226
3227static const TypeInfo kvm_accel_type = {
3228    .name = TYPE_KVM_ACCEL,
3229    .parent = TYPE_ACCEL,
3230    .instance_init = kvm_accel_instance_init,
3231    .class_init = kvm_accel_class_init,
3232    .instance_size = sizeof(KVMState),
3233};
3234
3235static void kvm_type_init(void)
3236{
3237    type_register_static(&kvm_accel_type);
3238}
3239
3240type_init(kvm_type_init);
3241