qemu/accel/kvm/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18
  19#include <linux/kvm.h>
  20
  21#include "qemu-common.h"
  22#include "qemu/atomic.h"
  23#include "qemu/option.h"
  24#include "qemu/config-file.h"
  25#include "qemu/error-report.h"
  26#include "qapi/error.h"
  27#include "hw/hw.h"
  28#include "hw/pci/msi.h"
  29#include "hw/pci/msix.h"
  30#include "hw/s390x/adapter.h"
  31#include "exec/gdbstub.h"
  32#include "sysemu/kvm_int.h"
  33#include "sysemu/cpus.h"
  34#include "qemu/bswap.h"
  35#include "exec/memory.h"
  36#include "exec/ram_addr.h"
  37#include "exec/address-spaces.h"
  38#include "qemu/event_notifier.h"
  39#include "trace.h"
  40#include "hw/irq.h"
  41#include "sysemu/sev.h"
  42#include "sysemu/balloon.h"
  43
  44#include "hw/boards.h"
  45
  46/* This check must be after config-host.h is included */
  47#ifdef CONFIG_EVENTFD
  48#include <sys/eventfd.h>
  49#endif
  50
  51/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  52 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  53 */
  54#define PAGE_SIZE getpagesize()
  55
  56//#define DEBUG_KVM
  57
  58#ifdef DEBUG_KVM
  59#define DPRINTF(fmt, ...) \
  60    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  61#else
  62#define DPRINTF(fmt, ...) \
  63    do { } while (0)
  64#endif
  65
  66#define KVM_MSI_HASHTAB_SIZE    256
  67
  68struct KVMParkedVcpu {
  69    unsigned long vcpu_id;
  70    int kvm_fd;
  71    QLIST_ENTRY(KVMParkedVcpu) node;
  72};
  73
  74struct KVMState
  75{
  76    AccelState parent_obj;
  77
  78    int nr_slots;
  79    int fd;
  80    int vmfd;
  81    int coalesced_mmio;
  82    int coalesced_pio;
  83    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  84    bool coalesced_flush_in_progress;
  85    int vcpu_events;
  86    int robust_singlestep;
  87    int debugregs;
  88#ifdef KVM_CAP_SET_GUEST_DEBUG
  89    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
  90#endif
  91    int many_ioeventfds;
  92    int intx_set_mask;
  93    bool sync_mmu;
  94    /* The man page (and posix) say ioctl numbers are signed int, but
  95     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
  96     * unsigned, and treating them as signed here can break things */
  97    unsigned irq_set_ioctl;
  98    unsigned int sigmask_len;
  99    GHashTable *gsimap;
 100#ifdef KVM_CAP_IRQ_ROUTING
 101    struct kvm_irq_routing *irq_routes;
 102    int nr_allocated_irq_routes;
 103    unsigned long *used_gsi_bitmap;
 104    unsigned int gsi_count;
 105    QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 106#endif
 107    KVMMemoryListener memory_listener;
 108    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 109
 110    /* memory encryption */
 111    void *memcrypt_handle;
 112    int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
 113};
 114
 115KVMState *kvm_state;
 116bool kvm_kernel_irqchip;
 117bool kvm_split_irqchip;
 118bool kvm_async_interrupts_allowed;
 119bool kvm_halt_in_kernel_allowed;
 120bool kvm_eventfds_allowed;
 121bool kvm_irqfds_allowed;
 122bool kvm_resamplefds_allowed;
 123bool kvm_msi_via_irqfd_allowed;
 124bool kvm_gsi_routing_allowed;
 125bool kvm_gsi_direct_mapping;
 126bool kvm_allowed;
 127bool kvm_readonly_mem_allowed;
 128bool kvm_vm_attributes_allowed;
 129bool kvm_direct_msi_allowed;
 130bool kvm_ioeventfd_any_length_allowed;
 131bool kvm_msi_use_devid;
 132static bool kvm_immediate_exit;
 133
 134static const KVMCapabilityInfo kvm_required_capabilites[] = {
 135    KVM_CAP_INFO(USER_MEMORY),
 136    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 137    KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
 138    KVM_CAP_LAST_INFO
 139};
 140
 141int kvm_get_max_memslots(void)
 142{
 143    KVMState *s = KVM_STATE(current_machine->accelerator);
 144
 145    return s->nr_slots;
 146}
 147
 148bool kvm_memcrypt_enabled(void)
 149{
 150    if (kvm_state && kvm_state->memcrypt_handle) {
 151        return true;
 152    }
 153
 154    return false;
 155}
 156
 157int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
 158{
 159    if (kvm_state->memcrypt_handle &&
 160        kvm_state->memcrypt_encrypt_data) {
 161        return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
 162                                              ptr, len);
 163    }
 164
 165    return 1;
 166}
 167
 168static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 169{
 170    KVMState *s = kvm_state;
 171    int i;
 172
 173    for (i = 0; i < s->nr_slots; i++) {
 174        if (kml->slots[i].memory_size == 0) {
 175            return &kml->slots[i];
 176        }
 177    }
 178
 179    return NULL;
 180}
 181
 182bool kvm_has_free_slot(MachineState *ms)
 183{
 184    KVMState *s = KVM_STATE(ms->accelerator);
 185
 186    return kvm_get_free_slot(&s->memory_listener);
 187}
 188
 189static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 190{
 191    KVMSlot *slot = kvm_get_free_slot(kml);
 192
 193    if (slot) {
 194        return slot;
 195    }
 196
 197    fprintf(stderr, "%s: no free slot available\n", __func__);
 198    abort();
 199}
 200
 201static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 202                                         hwaddr start_addr,
 203                                         hwaddr size)
 204{
 205    KVMState *s = kvm_state;
 206    int i;
 207
 208    for (i = 0; i < s->nr_slots; i++) {
 209        KVMSlot *mem = &kml->slots[i];
 210
 211        if (start_addr == mem->start_addr && size == mem->memory_size) {
 212            return mem;
 213        }
 214    }
 215
 216    return NULL;
 217}
 218
 219/*
 220 * Calculate and align the start address and the size of the section.
 221 * Return the size. If the size is 0, the aligned section is empty.
 222 */
 223static hwaddr kvm_align_section(MemoryRegionSection *section,
 224                                hwaddr *start)
 225{
 226    hwaddr size = int128_get64(section->size);
 227    hwaddr delta, aligned;
 228
 229    /* kvm works in page size chunks, but the function may be called
 230       with sub-page size and unaligned start address. Pad the start
 231       address to next and truncate size to previous page boundary. */
 232    aligned = ROUND_UP(section->offset_within_address_space,
 233                       qemu_real_host_page_size);
 234    delta = aligned - section->offset_within_address_space;
 235    *start = aligned;
 236    if (delta > size) {
 237        return 0;
 238    }
 239
 240    return (size - delta) & qemu_real_host_page_mask;
 241}
 242
 243int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 244                                       hwaddr *phys_addr)
 245{
 246    KVMMemoryListener *kml = &s->memory_listener;
 247    int i;
 248
 249    for (i = 0; i < s->nr_slots; i++) {
 250        KVMSlot *mem = &kml->slots[i];
 251
 252        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 253            *phys_addr = mem->start_addr + (ram - mem->ram);
 254            return 1;
 255        }
 256    }
 257
 258    return 0;
 259}
 260
 261static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
 262{
 263    KVMState *s = kvm_state;
 264    struct kvm_userspace_memory_region mem;
 265    int ret;
 266
 267    mem.slot = slot->slot | (kml->as_id << 16);
 268    mem.guest_phys_addr = slot->start_addr;
 269    mem.userspace_addr = (unsigned long)slot->ram;
 270    mem.flags = slot->flags;
 271
 272    if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
 273        /* Set the slot size to 0 before setting the slot to the desired
 274         * value. This is needed based on KVM commit 75d61fbc. */
 275        mem.memory_size = 0;
 276        kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 277    }
 278    mem.memory_size = slot->memory_size;
 279    ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 280    slot->old_flags = mem.flags;
 281    trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
 282                              mem.memory_size, mem.userspace_addr, ret);
 283    return ret;
 284}
 285
 286int kvm_destroy_vcpu(CPUState *cpu)
 287{
 288    KVMState *s = kvm_state;
 289    long mmap_size;
 290    struct KVMParkedVcpu *vcpu = NULL;
 291    int ret = 0;
 292
 293    DPRINTF("kvm_destroy_vcpu\n");
 294
 295    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 296    if (mmap_size < 0) {
 297        ret = mmap_size;
 298        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 299        goto err;
 300    }
 301
 302    ret = munmap(cpu->kvm_run, mmap_size);
 303    if (ret < 0) {
 304        goto err;
 305    }
 306
 307    vcpu = g_malloc0(sizeof(*vcpu));
 308    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 309    vcpu->kvm_fd = cpu->kvm_fd;
 310    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 311err:
 312    return ret;
 313}
 314
 315static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 316{
 317    struct KVMParkedVcpu *cpu;
 318
 319    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 320        if (cpu->vcpu_id == vcpu_id) {
 321            int kvm_fd;
 322
 323            QLIST_REMOVE(cpu, node);
 324            kvm_fd = cpu->kvm_fd;
 325            g_free(cpu);
 326            return kvm_fd;
 327        }
 328    }
 329
 330    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 331}
 332
 333int kvm_init_vcpu(CPUState *cpu)
 334{
 335    KVMState *s = kvm_state;
 336    long mmap_size;
 337    int ret;
 338
 339    DPRINTF("kvm_init_vcpu\n");
 340
 341    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 342    if (ret < 0) {
 343        DPRINTF("kvm_create_vcpu failed\n");
 344        goto err;
 345    }
 346
 347    cpu->kvm_fd = ret;
 348    cpu->kvm_state = s;
 349    cpu->vcpu_dirty = true;
 350
 351    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 352    if (mmap_size < 0) {
 353        ret = mmap_size;
 354        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 355        goto err;
 356    }
 357
 358    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 359                        cpu->kvm_fd, 0);
 360    if (cpu->kvm_run == MAP_FAILED) {
 361        ret = -errno;
 362        DPRINTF("mmap'ing vcpu state failed\n");
 363        goto err;
 364    }
 365
 366    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 367        s->coalesced_mmio_ring =
 368            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 369    }
 370
 371    ret = kvm_arch_init_vcpu(cpu);
 372err:
 373    return ret;
 374}
 375
 376/*
 377 * dirty pages logging control
 378 */
 379
 380static int kvm_mem_flags(MemoryRegion *mr)
 381{
 382    bool readonly = mr->readonly || memory_region_is_romd(mr);
 383    int flags = 0;
 384
 385    if (memory_region_get_dirty_log_mask(mr) != 0) {
 386        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 387    }
 388    if (readonly && kvm_readonly_mem_allowed) {
 389        flags |= KVM_MEM_READONLY;
 390    }
 391    return flags;
 392}
 393
 394static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 395                                 MemoryRegion *mr)
 396{
 397    mem->flags = kvm_mem_flags(mr);
 398
 399    /* If nothing changed effectively, no need to issue ioctl */
 400    if (mem->flags == mem->old_flags) {
 401        return 0;
 402    }
 403
 404    return kvm_set_user_memory_region(kml, mem, false);
 405}
 406
 407static int kvm_section_update_flags(KVMMemoryListener *kml,
 408                                    MemoryRegionSection *section)
 409{
 410    hwaddr start_addr, size;
 411    KVMSlot *mem;
 412
 413    size = kvm_align_section(section, &start_addr);
 414    if (!size) {
 415        return 0;
 416    }
 417
 418    mem = kvm_lookup_matching_slot(kml, start_addr, size);
 419    if (!mem) {
 420        /* We don't have a slot if we want to trap every access. */
 421        return 0;
 422    }
 423
 424    return kvm_slot_update_flags(kml, mem, section->mr);
 425}
 426
 427static void kvm_log_start(MemoryListener *listener,
 428                          MemoryRegionSection *section,
 429                          int old, int new)
 430{
 431    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 432    int r;
 433
 434    if (old != 0) {
 435        return;
 436    }
 437
 438    r = kvm_section_update_flags(kml, section);
 439    if (r < 0) {
 440        abort();
 441    }
 442}
 443
 444static void kvm_log_stop(MemoryListener *listener,
 445                          MemoryRegionSection *section,
 446                          int old, int new)
 447{
 448    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 449    int r;
 450
 451    if (new != 0) {
 452        return;
 453    }
 454
 455    r = kvm_section_update_flags(kml, section);
 456    if (r < 0) {
 457        abort();
 458    }
 459}
 460
 461/* get kvm's dirty pages bitmap and update qemu's */
 462static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
 463                                         unsigned long *bitmap)
 464{
 465    ram_addr_t start = section->offset_within_region +
 466                       memory_region_get_ram_addr(section->mr);
 467    ram_addr_t pages = int128_get64(section->size) / getpagesize();
 468
 469    cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
 470    return 0;
 471}
 472
 473#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 474
 475/**
 476 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
 477 * This function updates qemu's dirty bitmap using
 478 * memory_region_set_dirty().  This means all bits are set
 479 * to dirty.
 480 *
 481 * @start_add: start of logged region.
 482 * @end_addr: end of logged region.
 483 */
 484static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 485                                          MemoryRegionSection *section)
 486{
 487    KVMState *s = kvm_state;
 488    struct kvm_dirty_log d = {};
 489    KVMSlot *mem;
 490    hwaddr start_addr, size;
 491
 492    size = kvm_align_section(section, &start_addr);
 493    if (size) {
 494        mem = kvm_lookup_matching_slot(kml, start_addr, size);
 495        if (!mem) {
 496            /* We don't have a slot if we want to trap every access. */
 497            return 0;
 498        }
 499
 500        /* XXX bad kernel interface alert
 501         * For dirty bitmap, kernel allocates array of size aligned to
 502         * bits-per-long.  But for case when the kernel is 64bits and
 503         * the userspace is 32bits, userspace can't align to the same
 504         * bits-per-long, since sizeof(long) is different between kernel
 505         * and user space.  This way, userspace will provide buffer which
 506         * may be 4 bytes less than the kernel will use, resulting in
 507         * userspace memory corruption (which is not detectable by valgrind
 508         * too, in most cases).
 509         * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 510         * a hope that sizeof(long) won't become >8 any time soon.
 511         */
 512        size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
 513                     /*HOST_LONG_BITS*/ 64) / 8;
 514        d.dirty_bitmap = g_malloc0(size);
 515
 516        d.slot = mem->slot | (kml->as_id << 16);
 517        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
 518            DPRINTF("ioctl failed %d\n", errno);
 519            g_free(d.dirty_bitmap);
 520            return -1;
 521        }
 522
 523        kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
 524        g_free(d.dirty_bitmap);
 525    }
 526
 527    return 0;
 528}
 529
 530static void kvm_coalesce_mmio_region(MemoryListener *listener,
 531                                     MemoryRegionSection *secion,
 532                                     hwaddr start, hwaddr size)
 533{
 534    KVMState *s = kvm_state;
 535
 536    if (s->coalesced_mmio) {
 537        struct kvm_coalesced_mmio_zone zone;
 538
 539        zone.addr = start;
 540        zone.size = size;
 541        zone.pad = 0;
 542
 543        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 544    }
 545}
 546
 547static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
 548                                       MemoryRegionSection *secion,
 549                                       hwaddr start, hwaddr size)
 550{
 551    KVMState *s = kvm_state;
 552
 553    if (s->coalesced_mmio) {
 554        struct kvm_coalesced_mmio_zone zone;
 555
 556        zone.addr = start;
 557        zone.size = size;
 558        zone.pad = 0;
 559
 560        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 561    }
 562}
 563
 564static void kvm_coalesce_pio_add(MemoryListener *listener,
 565                                MemoryRegionSection *section,
 566                                hwaddr start, hwaddr size)
 567{
 568    KVMState *s = kvm_state;
 569
 570    if (s->coalesced_pio) {
 571        struct kvm_coalesced_mmio_zone zone;
 572
 573        zone.addr = start;
 574        zone.size = size;
 575        zone.pio = 1;
 576
 577        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 578    }
 579}
 580
 581static void kvm_coalesce_pio_del(MemoryListener *listener,
 582                                MemoryRegionSection *section,
 583                                hwaddr start, hwaddr size)
 584{
 585    KVMState *s = kvm_state;
 586
 587    if (s->coalesced_pio) {
 588        struct kvm_coalesced_mmio_zone zone;
 589
 590        zone.addr = start;
 591        zone.size = size;
 592        zone.pio = 1;
 593
 594        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 595     }
 596}
 597
 598static MemoryListener kvm_coalesced_pio_listener = {
 599    .coalesced_io_add = kvm_coalesce_pio_add,
 600    .coalesced_io_del = kvm_coalesce_pio_del,
 601};
 602
 603int kvm_check_extension(KVMState *s, unsigned int extension)
 604{
 605    int ret;
 606
 607    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 608    if (ret < 0) {
 609        ret = 0;
 610    }
 611
 612    return ret;
 613}
 614
 615int kvm_vm_check_extension(KVMState *s, unsigned int extension)
 616{
 617    int ret;
 618
 619    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 620    if (ret < 0) {
 621        /* VM wide version not implemented, use global one instead */
 622        ret = kvm_check_extension(s, extension);
 623    }
 624
 625    return ret;
 626}
 627
 628static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
 629{
 630#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
 631    /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
 632     * endianness, but the memory core hands them in target endianness.
 633     * For example, PPC is always treated as big-endian even if running
 634     * on KVM and on PPC64LE.  Correct here.
 635     */
 636    switch (size) {
 637    case 2:
 638        val = bswap16(val);
 639        break;
 640    case 4:
 641        val = bswap32(val);
 642        break;
 643    }
 644#endif
 645    return val;
 646}
 647
 648static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
 649                                  bool assign, uint32_t size, bool datamatch)
 650{
 651    int ret;
 652    struct kvm_ioeventfd iofd = {
 653        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 654        .addr = addr,
 655        .len = size,
 656        .flags = 0,
 657        .fd = fd,
 658    };
 659
 660    if (!kvm_enabled()) {
 661        return -ENOSYS;
 662    }
 663
 664    if (datamatch) {
 665        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 666    }
 667    if (!assign) {
 668        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 669    }
 670
 671    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
 672
 673    if (ret < 0) {
 674        return -errno;
 675    }
 676
 677    return 0;
 678}
 679
 680static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
 681                                 bool assign, uint32_t size, bool datamatch)
 682{
 683    struct kvm_ioeventfd kick = {
 684        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 685        .addr = addr,
 686        .flags = KVM_IOEVENTFD_FLAG_PIO,
 687        .len = size,
 688        .fd = fd,
 689    };
 690    int r;
 691    if (!kvm_enabled()) {
 692        return -ENOSYS;
 693    }
 694    if (datamatch) {
 695        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 696    }
 697    if (!assign) {
 698        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 699    }
 700    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
 701    if (r < 0) {
 702        return r;
 703    }
 704    return 0;
 705}
 706
 707
 708static int kvm_check_many_ioeventfds(void)
 709{
 710    /* Userspace can use ioeventfd for io notification.  This requires a host
 711     * that supports eventfd(2) and an I/O thread; since eventfd does not
 712     * support SIGIO it cannot interrupt the vcpu.
 713     *
 714     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
 715     * can avoid creating too many ioeventfds.
 716     */
 717#if defined(CONFIG_EVENTFD)
 718    int ioeventfds[7];
 719    int i, ret = 0;
 720    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
 721        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
 722        if (ioeventfds[i] < 0) {
 723            break;
 724        }
 725        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
 726        if (ret < 0) {
 727            close(ioeventfds[i]);
 728            break;
 729        }
 730    }
 731
 732    /* Decide whether many devices are supported or not */
 733    ret = i == ARRAY_SIZE(ioeventfds);
 734
 735    while (i-- > 0) {
 736        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
 737        close(ioeventfds[i]);
 738    }
 739    return ret;
 740#else
 741    return 0;
 742#endif
 743}
 744
 745static const KVMCapabilityInfo *
 746kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
 747{
 748    while (list->name) {
 749        if (!kvm_check_extension(s, list->value)) {
 750            return list;
 751        }
 752        list++;
 753    }
 754    return NULL;
 755}
 756
 757static void kvm_set_phys_mem(KVMMemoryListener *kml,
 758                             MemoryRegionSection *section, bool add)
 759{
 760    KVMSlot *mem;
 761    int err;
 762    MemoryRegion *mr = section->mr;
 763    bool writeable = !mr->readonly && !mr->rom_device;
 764    hwaddr start_addr, size;
 765    void *ram;
 766
 767    if (!memory_region_is_ram(mr)) {
 768        if (writeable || !kvm_readonly_mem_allowed) {
 769            return;
 770        } else if (!mr->romd_mode) {
 771            /* If the memory device is not in romd_mode, then we actually want
 772             * to remove the kvm memory slot so all accesses will trap. */
 773            add = false;
 774        }
 775    }
 776
 777    size = kvm_align_section(section, &start_addr);
 778    if (!size) {
 779        return;
 780    }
 781
 782    /* use aligned delta to align the ram address */
 783    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
 784          (start_addr - section->offset_within_address_space);
 785
 786    if (!add) {
 787        mem = kvm_lookup_matching_slot(kml, start_addr, size);
 788        if (!mem) {
 789            return;
 790        }
 791        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
 792            kvm_physical_sync_dirty_bitmap(kml, section);
 793        }
 794
 795        /* unregister the slot */
 796        mem->memory_size = 0;
 797        mem->flags = 0;
 798        err = kvm_set_user_memory_region(kml, mem, false);
 799        if (err) {
 800            fprintf(stderr, "%s: error unregistering slot: %s\n",
 801                    __func__, strerror(-err));
 802            abort();
 803        }
 804        return;
 805    }
 806
 807    /* register the new slot */
 808    mem = kvm_alloc_slot(kml);
 809    mem->memory_size = size;
 810    mem->start_addr = start_addr;
 811    mem->ram = ram;
 812    mem->flags = kvm_mem_flags(mr);
 813
 814    err = kvm_set_user_memory_region(kml, mem, true);
 815    if (err) {
 816        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
 817                strerror(-err));
 818        abort();
 819    }
 820}
 821
 822static void kvm_region_add(MemoryListener *listener,
 823                           MemoryRegionSection *section)
 824{
 825    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 826
 827    memory_region_ref(section->mr);
 828    kvm_set_phys_mem(kml, section, true);
 829}
 830
 831static void kvm_region_del(MemoryListener *listener,
 832                           MemoryRegionSection *section)
 833{
 834    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 835
 836    kvm_set_phys_mem(kml, section, false);
 837    memory_region_unref(section->mr);
 838}
 839
 840static void kvm_log_sync(MemoryListener *listener,
 841                         MemoryRegionSection *section)
 842{
 843    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 844    int r;
 845
 846    r = kvm_physical_sync_dirty_bitmap(kml, section);
 847    if (r < 0) {
 848        abort();
 849    }
 850}
 851
 852static void kvm_mem_ioeventfd_add(MemoryListener *listener,
 853                                  MemoryRegionSection *section,
 854                                  bool match_data, uint64_t data,
 855                                  EventNotifier *e)
 856{
 857    int fd = event_notifier_get_fd(e);
 858    int r;
 859
 860    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
 861                               data, true, int128_get64(section->size),
 862                               match_data);
 863    if (r < 0) {
 864        fprintf(stderr, "%s: error adding ioeventfd: %s\n",
 865                __func__, strerror(-r));
 866        abort();
 867    }
 868}
 869
 870static void kvm_mem_ioeventfd_del(MemoryListener *listener,
 871                                  MemoryRegionSection *section,
 872                                  bool match_data, uint64_t data,
 873                                  EventNotifier *e)
 874{
 875    int fd = event_notifier_get_fd(e);
 876    int r;
 877
 878    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
 879                               data, false, int128_get64(section->size),
 880                               match_data);
 881    if (r < 0) {
 882        abort();
 883    }
 884}
 885
 886static void kvm_io_ioeventfd_add(MemoryListener *listener,
 887                                 MemoryRegionSection *section,
 888                                 bool match_data, uint64_t data,
 889                                 EventNotifier *e)
 890{
 891    int fd = event_notifier_get_fd(e);
 892    int r;
 893
 894    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
 895                              data, true, int128_get64(section->size),
 896                              match_data);
 897    if (r < 0) {
 898        fprintf(stderr, "%s: error adding ioeventfd: %s\n",
 899                __func__, strerror(-r));
 900        abort();
 901    }
 902}
 903
 904static void kvm_io_ioeventfd_del(MemoryListener *listener,
 905                                 MemoryRegionSection *section,
 906                                 bool match_data, uint64_t data,
 907                                 EventNotifier *e)
 908
 909{
 910    int fd = event_notifier_get_fd(e);
 911    int r;
 912
 913    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
 914                              data, false, int128_get64(section->size),
 915                              match_data);
 916    if (r < 0) {
 917        abort();
 918    }
 919}
 920
 921void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
 922                                  AddressSpace *as, int as_id)
 923{
 924    int i;
 925
 926    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
 927    kml->as_id = as_id;
 928
 929    for (i = 0; i < s->nr_slots; i++) {
 930        kml->slots[i].slot = i;
 931    }
 932
 933    kml->listener.region_add = kvm_region_add;
 934    kml->listener.region_del = kvm_region_del;
 935    kml->listener.log_start = kvm_log_start;
 936    kml->listener.log_stop = kvm_log_stop;
 937    kml->listener.log_sync = kvm_log_sync;
 938    kml->listener.priority = 10;
 939
 940    memory_listener_register(&kml->listener, as);
 941}
 942
 943static MemoryListener kvm_io_listener = {
 944    .eventfd_add = kvm_io_ioeventfd_add,
 945    .eventfd_del = kvm_io_ioeventfd_del,
 946    .priority = 10,
 947};
 948
 949int kvm_set_irq(KVMState *s, int irq, int level)
 950{
 951    struct kvm_irq_level event;
 952    int ret;
 953
 954    assert(kvm_async_interrupts_enabled());
 955
 956    event.level = level;
 957    event.irq = irq;
 958    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
 959    if (ret < 0) {
 960        perror("kvm_set_irq");
 961        abort();
 962    }
 963
 964    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
 965}
 966
 967#ifdef KVM_CAP_IRQ_ROUTING
 968typedef struct KVMMSIRoute {
 969    struct kvm_irq_routing_entry kroute;
 970    QTAILQ_ENTRY(KVMMSIRoute) entry;
 971} KVMMSIRoute;
 972
 973static void set_gsi(KVMState *s, unsigned int gsi)
 974{
 975    set_bit(gsi, s->used_gsi_bitmap);
 976}
 977
 978static void clear_gsi(KVMState *s, unsigned int gsi)
 979{
 980    clear_bit(gsi, s->used_gsi_bitmap);
 981}
 982
 983void kvm_init_irq_routing(KVMState *s)
 984{
 985    int gsi_count, i;
 986
 987    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
 988    if (gsi_count > 0) {
 989        /* Round up so we can search ints using ffs */
 990        s->used_gsi_bitmap = bitmap_new(gsi_count);
 991        s->gsi_count = gsi_count;
 992    }
 993
 994    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
 995    s->nr_allocated_irq_routes = 0;
 996
 997    if (!kvm_direct_msi_allowed) {
 998        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
 999            QTAILQ_INIT(&s->msi_hashtab[i]);
1000        }
1001    }
1002
1003    kvm_arch_init_irq_routing(s);
1004}
1005
1006void kvm_irqchip_commit_routes(KVMState *s)
1007{
1008    int ret;
1009
1010    if (kvm_gsi_direct_mapping()) {
1011        return;
1012    }
1013
1014    if (!kvm_gsi_routing_enabled()) {
1015        return;
1016    }
1017
1018    s->irq_routes->flags = 0;
1019    trace_kvm_irqchip_commit_routes();
1020    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1021    assert(ret == 0);
1022}
1023
1024static void kvm_add_routing_entry(KVMState *s,
1025                                  struct kvm_irq_routing_entry *entry)
1026{
1027    struct kvm_irq_routing_entry *new;
1028    int n, size;
1029
1030    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1031        n = s->nr_allocated_irq_routes * 2;
1032        if (n < 64) {
1033            n = 64;
1034        }
1035        size = sizeof(struct kvm_irq_routing);
1036        size += n * sizeof(*new);
1037        s->irq_routes = g_realloc(s->irq_routes, size);
1038        s->nr_allocated_irq_routes = n;
1039    }
1040    n = s->irq_routes->nr++;
1041    new = &s->irq_routes->entries[n];
1042
1043    *new = *entry;
1044
1045    set_gsi(s, entry->gsi);
1046}
1047
1048static int kvm_update_routing_entry(KVMState *s,
1049                                    struct kvm_irq_routing_entry *new_entry)
1050{
1051    struct kvm_irq_routing_entry *entry;
1052    int n;
1053
1054    for (n = 0; n < s->irq_routes->nr; n++) {
1055        entry = &s->irq_routes->entries[n];
1056        if (entry->gsi != new_entry->gsi) {
1057            continue;
1058        }
1059
1060        if(!memcmp(entry, new_entry, sizeof *entry)) {
1061            return 0;
1062        }
1063
1064        *entry = *new_entry;
1065
1066        return 0;
1067    }
1068
1069    return -ESRCH;
1070}
1071
1072void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1073{
1074    struct kvm_irq_routing_entry e = {};
1075
1076    assert(pin < s->gsi_count);
1077
1078    e.gsi = irq;
1079    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1080    e.flags = 0;
1081    e.u.irqchip.irqchip = irqchip;
1082    e.u.irqchip.pin = pin;
1083    kvm_add_routing_entry(s, &e);
1084}
1085
1086void kvm_irqchip_release_virq(KVMState *s, int virq)
1087{
1088    struct kvm_irq_routing_entry *e;
1089    int i;
1090
1091    if (kvm_gsi_direct_mapping()) {
1092        return;
1093    }
1094
1095    for (i = 0; i < s->irq_routes->nr; i++) {
1096        e = &s->irq_routes->entries[i];
1097        if (e->gsi == virq) {
1098            s->irq_routes->nr--;
1099            *e = s->irq_routes->entries[s->irq_routes->nr];
1100        }
1101    }
1102    clear_gsi(s, virq);
1103    kvm_arch_release_virq_post(virq);
1104    trace_kvm_irqchip_release_virq(virq);
1105}
1106
1107static unsigned int kvm_hash_msi(uint32_t data)
1108{
1109    /* This is optimized for IA32 MSI layout. However, no other arch shall
1110     * repeat the mistake of not providing a direct MSI injection API. */
1111    return data & 0xff;
1112}
1113
1114static void kvm_flush_dynamic_msi_routes(KVMState *s)
1115{
1116    KVMMSIRoute *route, *next;
1117    unsigned int hash;
1118
1119    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1120        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1121            kvm_irqchip_release_virq(s, route->kroute.gsi);
1122            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1123            g_free(route);
1124        }
1125    }
1126}
1127
1128static int kvm_irqchip_get_virq(KVMState *s)
1129{
1130    int next_virq;
1131
1132    /*
1133     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1134     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1135     * number can succeed even though a new route entry cannot be added.
1136     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1137     */
1138    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1139        kvm_flush_dynamic_msi_routes(s);
1140    }
1141
1142    /* Return the lowest unused GSI in the bitmap */
1143    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1144    if (next_virq >= s->gsi_count) {
1145        return -ENOSPC;
1146    } else {
1147        return next_virq;
1148    }
1149}
1150
1151static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1152{
1153    unsigned int hash = kvm_hash_msi(msg.data);
1154    KVMMSIRoute *route;
1155
1156    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1157        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1158            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1159            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1160            return route;
1161        }
1162    }
1163    return NULL;
1164}
1165
1166int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1167{
1168    struct kvm_msi msi;
1169    KVMMSIRoute *route;
1170
1171    if (kvm_direct_msi_allowed) {
1172        msi.address_lo = (uint32_t)msg.address;
1173        msi.address_hi = msg.address >> 32;
1174        msi.data = le32_to_cpu(msg.data);
1175        msi.flags = 0;
1176        memset(msi.pad, 0, sizeof(msi.pad));
1177
1178        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1179    }
1180
1181    route = kvm_lookup_msi_route(s, msg);
1182    if (!route) {
1183        int virq;
1184
1185        virq = kvm_irqchip_get_virq(s);
1186        if (virq < 0) {
1187            return virq;
1188        }
1189
1190        route = g_malloc0(sizeof(KVMMSIRoute));
1191        route->kroute.gsi = virq;
1192        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1193        route->kroute.flags = 0;
1194        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1195        route->kroute.u.msi.address_hi = msg.address >> 32;
1196        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1197
1198        kvm_add_routing_entry(s, &route->kroute);
1199        kvm_irqchip_commit_routes(s);
1200
1201        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1202                           entry);
1203    }
1204
1205    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1206
1207    return kvm_set_irq(s, route->kroute.gsi, 1);
1208}
1209
1210int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1211{
1212    struct kvm_irq_routing_entry kroute = {};
1213    int virq;
1214    MSIMessage msg = {0, 0};
1215
1216    if (pci_available && dev) {
1217        msg = pci_get_msi_message(dev, vector);
1218    }
1219
1220    if (kvm_gsi_direct_mapping()) {
1221        return kvm_arch_msi_data_to_gsi(msg.data);
1222    }
1223
1224    if (!kvm_gsi_routing_enabled()) {
1225        return -ENOSYS;
1226    }
1227
1228    virq = kvm_irqchip_get_virq(s);
1229    if (virq < 0) {
1230        return virq;
1231    }
1232
1233    kroute.gsi = virq;
1234    kroute.type = KVM_IRQ_ROUTING_MSI;
1235    kroute.flags = 0;
1236    kroute.u.msi.address_lo = (uint32_t)msg.address;
1237    kroute.u.msi.address_hi = msg.address >> 32;
1238    kroute.u.msi.data = le32_to_cpu(msg.data);
1239    if (pci_available && kvm_msi_devid_required()) {
1240        kroute.flags = KVM_MSI_VALID_DEVID;
1241        kroute.u.msi.devid = pci_requester_id(dev);
1242    }
1243    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1244        kvm_irqchip_release_virq(s, virq);
1245        return -EINVAL;
1246    }
1247
1248    trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1249                                    vector, virq);
1250
1251    kvm_add_routing_entry(s, &kroute);
1252    kvm_arch_add_msi_route_post(&kroute, vector, dev);
1253    kvm_irqchip_commit_routes(s);
1254
1255    return virq;
1256}
1257
1258int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1259                                 PCIDevice *dev)
1260{
1261    struct kvm_irq_routing_entry kroute = {};
1262
1263    if (kvm_gsi_direct_mapping()) {
1264        return 0;
1265    }
1266
1267    if (!kvm_irqchip_in_kernel()) {
1268        return -ENOSYS;
1269    }
1270
1271    kroute.gsi = virq;
1272    kroute.type = KVM_IRQ_ROUTING_MSI;
1273    kroute.flags = 0;
1274    kroute.u.msi.address_lo = (uint32_t)msg.address;
1275    kroute.u.msi.address_hi = msg.address >> 32;
1276    kroute.u.msi.data = le32_to_cpu(msg.data);
1277    if (pci_available && kvm_msi_devid_required()) {
1278        kroute.flags = KVM_MSI_VALID_DEVID;
1279        kroute.u.msi.devid = pci_requester_id(dev);
1280    }
1281    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1282        return -EINVAL;
1283    }
1284
1285    trace_kvm_irqchip_update_msi_route(virq);
1286
1287    return kvm_update_routing_entry(s, &kroute);
1288}
1289
1290static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1291                                    bool assign)
1292{
1293    struct kvm_irqfd irqfd = {
1294        .fd = fd,
1295        .gsi = virq,
1296        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1297    };
1298
1299    if (rfd != -1) {
1300        irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1301        irqfd.resamplefd = rfd;
1302    }
1303
1304    if (!kvm_irqfds_enabled()) {
1305        return -ENOSYS;
1306    }
1307
1308    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1309}
1310
1311int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1312{
1313    struct kvm_irq_routing_entry kroute = {};
1314    int virq;
1315
1316    if (!kvm_gsi_routing_enabled()) {
1317        return -ENOSYS;
1318    }
1319
1320    virq = kvm_irqchip_get_virq(s);
1321    if (virq < 0) {
1322        return virq;
1323    }
1324
1325    kroute.gsi = virq;
1326    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1327    kroute.flags = 0;
1328    kroute.u.adapter.summary_addr = adapter->summary_addr;
1329    kroute.u.adapter.ind_addr = adapter->ind_addr;
1330    kroute.u.adapter.summary_offset = adapter->summary_offset;
1331    kroute.u.adapter.ind_offset = adapter->ind_offset;
1332    kroute.u.adapter.adapter_id = adapter->adapter_id;
1333
1334    kvm_add_routing_entry(s, &kroute);
1335
1336    return virq;
1337}
1338
1339int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1340{
1341    struct kvm_irq_routing_entry kroute = {};
1342    int virq;
1343
1344    if (!kvm_gsi_routing_enabled()) {
1345        return -ENOSYS;
1346    }
1347    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1348        return -ENOSYS;
1349    }
1350    virq = kvm_irqchip_get_virq(s);
1351    if (virq < 0) {
1352        return virq;
1353    }
1354
1355    kroute.gsi = virq;
1356    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1357    kroute.flags = 0;
1358    kroute.u.hv_sint.vcpu = vcpu;
1359    kroute.u.hv_sint.sint = sint;
1360
1361    kvm_add_routing_entry(s, &kroute);
1362    kvm_irqchip_commit_routes(s);
1363
1364    return virq;
1365}
1366
1367#else /* !KVM_CAP_IRQ_ROUTING */
1368
1369void kvm_init_irq_routing(KVMState *s)
1370{
1371}
1372
1373void kvm_irqchip_release_virq(KVMState *s, int virq)
1374{
1375}
1376
1377int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1378{
1379    abort();
1380}
1381
1382int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1383{
1384    return -ENOSYS;
1385}
1386
1387int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1388{
1389    return -ENOSYS;
1390}
1391
1392int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1393{
1394    return -ENOSYS;
1395}
1396
1397static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1398{
1399    abort();
1400}
1401
1402int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1403{
1404    return -ENOSYS;
1405}
1406#endif /* !KVM_CAP_IRQ_ROUTING */
1407
1408int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1409                                       EventNotifier *rn, int virq)
1410{
1411    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1412           rn ? event_notifier_get_fd(rn) : -1, virq, true);
1413}
1414
1415int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1416                                          int virq)
1417{
1418    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1419           false);
1420}
1421
1422int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1423                                   EventNotifier *rn, qemu_irq irq)
1424{
1425    gpointer key, gsi;
1426    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1427
1428    if (!found) {
1429        return -ENXIO;
1430    }
1431    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1432}
1433
1434int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1435                                      qemu_irq irq)
1436{
1437    gpointer key, gsi;
1438    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1439
1440    if (!found) {
1441        return -ENXIO;
1442    }
1443    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1444}
1445
1446void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1447{
1448    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1449}
1450
1451static void kvm_irqchip_create(MachineState *machine, KVMState *s)
1452{
1453    int ret;
1454
1455    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1456        ;
1457    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1458        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1459        if (ret < 0) {
1460            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1461            exit(1);
1462        }
1463    } else {
1464        return;
1465    }
1466
1467    /* First probe and see if there's a arch-specific hook to create the
1468     * in-kernel irqchip for us */
1469    ret = kvm_arch_irqchip_create(machine, s);
1470    if (ret == 0) {
1471        if (machine_kernel_irqchip_split(machine)) {
1472            perror("Split IRQ chip mode not supported.");
1473            exit(1);
1474        } else {
1475            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1476        }
1477    }
1478    if (ret < 0) {
1479        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1480        exit(1);
1481    }
1482
1483    kvm_kernel_irqchip = true;
1484    /* If we have an in-kernel IRQ chip then we must have asynchronous
1485     * interrupt delivery (though the reverse is not necessarily true)
1486     */
1487    kvm_async_interrupts_allowed = true;
1488    kvm_halt_in_kernel_allowed = true;
1489
1490    kvm_init_irq_routing(s);
1491
1492    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1493}
1494
1495/* Find number of supported CPUs using the recommended
1496 * procedure from the kernel API documentation to cope with
1497 * older kernels that may be missing capabilities.
1498 */
1499static int kvm_recommended_vcpus(KVMState *s)
1500{
1501    int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1502    return (ret) ? ret : 4;
1503}
1504
1505static int kvm_max_vcpus(KVMState *s)
1506{
1507    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1508    return (ret) ? ret : kvm_recommended_vcpus(s);
1509}
1510
1511static int kvm_max_vcpu_id(KVMState *s)
1512{
1513    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1514    return (ret) ? ret : kvm_max_vcpus(s);
1515}
1516
1517bool kvm_vcpu_id_is_valid(int vcpu_id)
1518{
1519    KVMState *s = KVM_STATE(current_machine->accelerator);
1520    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1521}
1522
1523static int kvm_init(MachineState *ms)
1524{
1525    MachineClass *mc = MACHINE_GET_CLASS(ms);
1526    static const char upgrade_note[] =
1527        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1528        "(see http://sourceforge.net/projects/kvm).\n";
1529    struct {
1530        const char *name;
1531        int num;
1532    } num_cpus[] = {
1533        { "SMP",          smp_cpus },
1534        { "hotpluggable", max_cpus },
1535        { NULL, }
1536    }, *nc = num_cpus;
1537    int soft_vcpus_limit, hard_vcpus_limit;
1538    KVMState *s;
1539    const KVMCapabilityInfo *missing_cap;
1540    int ret;
1541    int type = 0;
1542    const char *kvm_type;
1543
1544    s = KVM_STATE(ms->accelerator);
1545
1546    /*
1547     * On systems where the kernel can support different base page
1548     * sizes, host page size may be different from TARGET_PAGE_SIZE,
1549     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
1550     * page size for the system though.
1551     */
1552    assert(TARGET_PAGE_SIZE <= getpagesize());
1553
1554    s->sigmask_len = 8;
1555
1556#ifdef KVM_CAP_SET_GUEST_DEBUG
1557    QTAILQ_INIT(&s->kvm_sw_breakpoints);
1558#endif
1559    QLIST_INIT(&s->kvm_parked_vcpus);
1560    s->vmfd = -1;
1561    s->fd = qemu_open("/dev/kvm", O_RDWR);
1562    if (s->fd == -1) {
1563        fprintf(stderr, "Could not access KVM kernel module: %m\n");
1564        ret = -errno;
1565        goto err;
1566    }
1567
1568    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1569    if (ret < KVM_API_VERSION) {
1570        if (ret >= 0) {
1571            ret = -EINVAL;
1572        }
1573        fprintf(stderr, "kvm version too old\n");
1574        goto err;
1575    }
1576
1577    if (ret > KVM_API_VERSION) {
1578        ret = -EINVAL;
1579        fprintf(stderr, "kvm version not supported\n");
1580        goto err;
1581    }
1582
1583    kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
1584    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1585
1586    /* If unspecified, use the default value */
1587    if (!s->nr_slots) {
1588        s->nr_slots = 32;
1589    }
1590
1591    kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1592    if (mc->kvm_type) {
1593        type = mc->kvm_type(kvm_type);
1594    } else if (kvm_type) {
1595        ret = -EINVAL;
1596        fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1597        goto err;
1598    }
1599
1600    do {
1601        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1602    } while (ret == -EINTR);
1603
1604    if (ret < 0) {
1605        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1606                strerror(-ret));
1607
1608#ifdef TARGET_S390X
1609        if (ret == -EINVAL) {
1610            fprintf(stderr,
1611                    "Host kernel setup problem detected. Please verify:\n");
1612            fprintf(stderr, "- for kernels supporting the switch_amode or"
1613                    " user_mode parameters, whether\n");
1614            fprintf(stderr,
1615                    "  user space is running in primary address space\n");
1616            fprintf(stderr,
1617                    "- for kernels supporting the vm.allocate_pgste sysctl, "
1618                    "whether it is enabled\n");
1619        }
1620#endif
1621        goto err;
1622    }
1623
1624    s->vmfd = ret;
1625
1626    /* check the vcpu limits */
1627    soft_vcpus_limit = kvm_recommended_vcpus(s);
1628    hard_vcpus_limit = kvm_max_vcpus(s);
1629
1630    while (nc->name) {
1631        if (nc->num > soft_vcpus_limit) {
1632            warn_report("Number of %s cpus requested (%d) exceeds "
1633                        "the recommended cpus supported by KVM (%d)",
1634                        nc->name, nc->num, soft_vcpus_limit);
1635
1636            if (nc->num > hard_vcpus_limit) {
1637                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1638                        "the maximum cpus supported by KVM (%d)\n",
1639                        nc->name, nc->num, hard_vcpus_limit);
1640                exit(1);
1641            }
1642        }
1643        nc++;
1644    }
1645
1646    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1647    if (!missing_cap) {
1648        missing_cap =
1649            kvm_check_extension_list(s, kvm_arch_required_capabilities);
1650    }
1651    if (missing_cap) {
1652        ret = -EINVAL;
1653        fprintf(stderr, "kvm does not support %s\n%s",
1654                missing_cap->name, upgrade_note);
1655        goto err;
1656    }
1657
1658    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1659    s->coalesced_pio = s->coalesced_mmio &&
1660                       kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
1661
1662#ifdef KVM_CAP_VCPU_EVENTS
1663    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1664#endif
1665
1666    s->robust_singlestep =
1667        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1668
1669#ifdef KVM_CAP_DEBUGREGS
1670    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1671#endif
1672
1673#ifdef KVM_CAP_IRQ_ROUTING
1674    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1675#endif
1676
1677    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1678
1679    s->irq_set_ioctl = KVM_IRQ_LINE;
1680    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1681        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1682    }
1683
1684    kvm_readonly_mem_allowed =
1685        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1686
1687    kvm_eventfds_allowed =
1688        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1689
1690    kvm_irqfds_allowed =
1691        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
1692
1693    kvm_resamplefds_allowed =
1694        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
1695
1696    kvm_vm_attributes_allowed =
1697        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
1698
1699    kvm_ioeventfd_any_length_allowed =
1700        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
1701
1702    kvm_state = s;
1703
1704    /*
1705     * if memory encryption object is specified then initialize the memory
1706     * encryption context.
1707     */
1708    if (ms->memory_encryption) {
1709        kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
1710        if (!kvm_state->memcrypt_handle) {
1711            ret = -1;
1712            goto err;
1713        }
1714
1715        kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
1716    }
1717
1718    ret = kvm_arch_init(ms, s);
1719    if (ret < 0) {
1720        goto err;
1721    }
1722
1723    if (machine_kernel_irqchip_allowed(ms)) {
1724        kvm_irqchip_create(ms, s);
1725    }
1726
1727    if (kvm_eventfds_allowed) {
1728        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
1729        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
1730    }
1731    s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
1732    s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
1733
1734    kvm_memory_listener_register(s, &s->memory_listener,
1735                                 &address_space_memory, 0);
1736    memory_listener_register(&kvm_io_listener,
1737                             &address_space_io);
1738    memory_listener_register(&kvm_coalesced_pio_listener,
1739                             &address_space_io);
1740
1741    s->many_ioeventfds = kvm_check_many_ioeventfds();
1742
1743    s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1744    if (!s->sync_mmu) {
1745        qemu_balloon_inhibit(true);
1746    }
1747
1748    return 0;
1749
1750err:
1751    assert(ret < 0);
1752    if (s->vmfd >= 0) {
1753        close(s->vmfd);
1754    }
1755    if (s->fd != -1) {
1756        close(s->fd);
1757    }
1758    g_free(s->memory_listener.slots);
1759
1760    return ret;
1761}
1762
1763void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1764{
1765    s->sigmask_len = sigmask_len;
1766}
1767
1768static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
1769                          int size, uint32_t count)
1770{
1771    int i;
1772    uint8_t *ptr = data;
1773
1774    for (i = 0; i < count; i++) {
1775        address_space_rw(&address_space_io, port, attrs,
1776                         ptr, size,
1777                         direction == KVM_EXIT_IO_OUT);
1778        ptr += size;
1779    }
1780}
1781
1782static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1783{
1784    fprintf(stderr, "KVM internal error. Suberror: %d\n",
1785            run->internal.suberror);
1786
1787    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1788        int i;
1789
1790        for (i = 0; i < run->internal.ndata; ++i) {
1791            fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1792                    i, (uint64_t)run->internal.data[i]);
1793        }
1794    }
1795    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1796        fprintf(stderr, "emulation failure\n");
1797        if (!kvm_arch_stop_on_emulation_error(cpu)) {
1798            cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1799            return EXCP_INTERRUPT;
1800        }
1801    }
1802    /* FIXME: Should trigger a qmp message to let management know
1803     * something went wrong.
1804     */
1805    return -1;
1806}
1807
1808void kvm_flush_coalesced_mmio_buffer(void)
1809{
1810    KVMState *s = kvm_state;
1811
1812    if (s->coalesced_flush_in_progress) {
1813        return;
1814    }
1815
1816    s->coalesced_flush_in_progress = true;
1817
1818    if (s->coalesced_mmio_ring) {
1819        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1820        while (ring->first != ring->last) {
1821            struct kvm_coalesced_mmio *ent;
1822
1823            ent = &ring->coalesced_mmio[ring->first];
1824
1825            if (ent->pio == 1) {
1826                address_space_rw(&address_space_io, ent->phys_addr,
1827                                 MEMTXATTRS_UNSPECIFIED, ent->data,
1828                                 ent->len, true);
1829            } else {
1830                cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1831            }
1832            smp_wmb();
1833            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1834        }
1835    }
1836
1837    s->coalesced_flush_in_progress = false;
1838}
1839
1840static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1841{
1842    if (!cpu->vcpu_dirty) {
1843        kvm_arch_get_registers(cpu);
1844        cpu->vcpu_dirty = true;
1845    }
1846}
1847
1848void kvm_cpu_synchronize_state(CPUState *cpu)
1849{
1850    if (!cpu->vcpu_dirty) {
1851        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
1852    }
1853}
1854
1855static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
1856{
1857    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1858    cpu->vcpu_dirty = false;
1859}
1860
1861void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1862{
1863    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1864}
1865
1866static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
1867{
1868    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1869    cpu->vcpu_dirty = false;
1870}
1871
1872void kvm_cpu_synchronize_post_init(CPUState *cpu)
1873{
1874    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1875}
1876
1877static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
1878{
1879    cpu->vcpu_dirty = true;
1880}
1881
1882void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
1883{
1884    run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1885}
1886
1887#ifdef KVM_HAVE_MCE_INJECTION
1888static __thread void *pending_sigbus_addr;
1889static __thread int pending_sigbus_code;
1890static __thread bool have_sigbus_pending;
1891#endif
1892
1893static void kvm_cpu_kick(CPUState *cpu)
1894{
1895    atomic_set(&cpu->kvm_run->immediate_exit, 1);
1896}
1897
1898static void kvm_cpu_kick_self(void)
1899{
1900    if (kvm_immediate_exit) {
1901        kvm_cpu_kick(current_cpu);
1902    } else {
1903        qemu_cpu_kick_self();
1904    }
1905}
1906
1907static void kvm_eat_signals(CPUState *cpu)
1908{
1909    struct timespec ts = { 0, 0 };
1910    siginfo_t siginfo;
1911    sigset_t waitset;
1912    sigset_t chkset;
1913    int r;
1914
1915    if (kvm_immediate_exit) {
1916        atomic_set(&cpu->kvm_run->immediate_exit, 0);
1917        /* Write kvm_run->immediate_exit before the cpu->exit_request
1918         * write in kvm_cpu_exec.
1919         */
1920        smp_wmb();
1921        return;
1922    }
1923
1924    sigemptyset(&waitset);
1925    sigaddset(&waitset, SIG_IPI);
1926
1927    do {
1928        r = sigtimedwait(&waitset, &siginfo, &ts);
1929        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
1930            perror("sigtimedwait");
1931            exit(1);
1932        }
1933
1934        r = sigpending(&chkset);
1935        if (r == -1) {
1936            perror("sigpending");
1937            exit(1);
1938        }
1939    } while (sigismember(&chkset, SIG_IPI));
1940}
1941
1942int kvm_cpu_exec(CPUState *cpu)
1943{
1944    struct kvm_run *run = cpu->kvm_run;
1945    int ret, run_ret;
1946
1947    DPRINTF("kvm_cpu_exec()\n");
1948
1949    if (kvm_arch_process_async_events(cpu)) {
1950        atomic_set(&cpu->exit_request, 0);
1951        return EXCP_HLT;
1952    }
1953
1954    qemu_mutex_unlock_iothread();
1955    cpu_exec_start(cpu);
1956
1957    do {
1958        MemTxAttrs attrs;
1959
1960        if (cpu->vcpu_dirty) {
1961            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1962            cpu->vcpu_dirty = false;
1963        }
1964
1965        kvm_arch_pre_run(cpu, run);
1966        if (atomic_read(&cpu->exit_request)) {
1967            DPRINTF("interrupt exit requested\n");
1968            /*
1969             * KVM requires us to reenter the kernel after IO exits to complete
1970             * instruction emulation. This self-signal will ensure that we
1971             * leave ASAP again.
1972             */
1973            kvm_cpu_kick_self();
1974        }
1975
1976        /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
1977         * Matching barrier in kvm_eat_signals.
1978         */
1979        smp_rmb();
1980
1981        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1982
1983        attrs = kvm_arch_post_run(cpu, run);
1984
1985#ifdef KVM_HAVE_MCE_INJECTION
1986        if (unlikely(have_sigbus_pending)) {
1987            qemu_mutex_lock_iothread();
1988            kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
1989                                    pending_sigbus_addr);
1990            have_sigbus_pending = false;
1991            qemu_mutex_unlock_iothread();
1992        }
1993#endif
1994
1995        if (run_ret < 0) {
1996            if (run_ret == -EINTR || run_ret == -EAGAIN) {
1997                DPRINTF("io window exit\n");
1998                kvm_eat_signals(cpu);
1999                ret = EXCP_INTERRUPT;
2000                break;
2001            }
2002            fprintf(stderr, "error: kvm run failed %s\n",
2003                    strerror(-run_ret));
2004#ifdef TARGET_PPC
2005            if (run_ret == -EBUSY) {
2006                fprintf(stderr,
2007                        "This is probably because your SMT is enabled.\n"
2008                        "VCPU can only run on primary threads with all "
2009                        "secondary threads offline.\n");
2010            }
2011#endif
2012            ret = -1;
2013            break;
2014        }
2015
2016        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2017        switch (run->exit_reason) {
2018        case KVM_EXIT_IO:
2019            DPRINTF("handle_io\n");
2020            /* Called outside BQL */
2021            kvm_handle_io(run->io.port, attrs,
2022                          (uint8_t *)run + run->io.data_offset,
2023                          run->io.direction,
2024                          run->io.size,
2025                          run->io.count);
2026            ret = 0;
2027            break;
2028        case KVM_EXIT_MMIO:
2029            DPRINTF("handle_mmio\n");
2030            /* Called outside BQL */
2031            address_space_rw(&address_space_memory,
2032                             run->mmio.phys_addr, attrs,
2033                             run->mmio.data,
2034                             run->mmio.len,
2035                             run->mmio.is_write);
2036            ret = 0;
2037            break;
2038        case KVM_EXIT_IRQ_WINDOW_OPEN:
2039            DPRINTF("irq_window_open\n");
2040            ret = EXCP_INTERRUPT;
2041            break;
2042        case KVM_EXIT_SHUTDOWN:
2043            DPRINTF("shutdown\n");
2044            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2045            ret = EXCP_INTERRUPT;
2046            break;
2047        case KVM_EXIT_UNKNOWN:
2048            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2049                    (uint64_t)run->hw.hardware_exit_reason);
2050            ret = -1;
2051            break;
2052        case KVM_EXIT_INTERNAL_ERROR:
2053            ret = kvm_handle_internal_error(cpu, run);
2054            break;
2055        case KVM_EXIT_SYSTEM_EVENT:
2056            switch (run->system_event.type) {
2057            case KVM_SYSTEM_EVENT_SHUTDOWN:
2058                qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2059                ret = EXCP_INTERRUPT;
2060                break;
2061            case KVM_SYSTEM_EVENT_RESET:
2062                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2063                ret = EXCP_INTERRUPT;
2064                break;
2065            case KVM_SYSTEM_EVENT_CRASH:
2066                kvm_cpu_synchronize_state(cpu);
2067                qemu_mutex_lock_iothread();
2068                qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2069                qemu_mutex_unlock_iothread();
2070                ret = 0;
2071                break;
2072            default:
2073                DPRINTF("kvm_arch_handle_exit\n");
2074                ret = kvm_arch_handle_exit(cpu, run);
2075                break;
2076            }
2077            break;
2078        default:
2079            DPRINTF("kvm_arch_handle_exit\n");
2080            ret = kvm_arch_handle_exit(cpu, run);
2081            break;
2082        }
2083    } while (ret == 0);
2084
2085    cpu_exec_end(cpu);
2086    qemu_mutex_lock_iothread();
2087
2088    if (ret < 0) {
2089        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
2090        vm_stop(RUN_STATE_INTERNAL_ERROR);
2091    }
2092
2093    atomic_set(&cpu->exit_request, 0);
2094    return ret;
2095}
2096
2097int kvm_ioctl(KVMState *s, int type, ...)
2098{
2099    int ret;
2100    void *arg;
2101    va_list ap;
2102
2103    va_start(ap, type);
2104    arg = va_arg(ap, void *);
2105    va_end(ap);
2106
2107    trace_kvm_ioctl(type, arg);
2108    ret = ioctl(s->fd, type, arg);
2109    if (ret == -1) {
2110        ret = -errno;
2111    }
2112    return ret;
2113}
2114
2115int kvm_vm_ioctl(KVMState *s, int type, ...)
2116{
2117    int ret;
2118    void *arg;
2119    va_list ap;
2120
2121    va_start(ap, type);
2122    arg = va_arg(ap, void *);
2123    va_end(ap);
2124
2125    trace_kvm_vm_ioctl(type, arg);
2126    ret = ioctl(s->vmfd, type, arg);
2127    if (ret == -1) {
2128        ret = -errno;
2129    }
2130    return ret;
2131}
2132
2133int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2134{
2135    int ret;
2136    void *arg;
2137    va_list ap;
2138
2139    va_start(ap, type);
2140    arg = va_arg(ap, void *);
2141    va_end(ap);
2142
2143    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2144    ret = ioctl(cpu->kvm_fd, type, arg);
2145    if (ret == -1) {
2146        ret = -errno;
2147    }
2148    return ret;
2149}
2150
2151int kvm_device_ioctl(int fd, int type, ...)
2152{
2153    int ret;
2154    void *arg;
2155    va_list ap;
2156
2157    va_start(ap, type);
2158    arg = va_arg(ap, void *);
2159    va_end(ap);
2160
2161    trace_kvm_device_ioctl(fd, type, arg);
2162    ret = ioctl(fd, type, arg);
2163    if (ret == -1) {
2164        ret = -errno;
2165    }
2166    return ret;
2167}
2168
2169int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2170{
2171    int ret;
2172    struct kvm_device_attr attribute = {
2173        .group = group,
2174        .attr = attr,
2175    };
2176
2177    if (!kvm_vm_attributes_allowed) {
2178        return 0;
2179    }
2180
2181    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2182    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2183    return ret ? 0 : 1;
2184}
2185
2186int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2187{
2188    struct kvm_device_attr attribute = {
2189        .group = group,
2190        .attr = attr,
2191        .flags = 0,
2192    };
2193
2194    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2195}
2196
2197int kvm_device_access(int fd, int group, uint64_t attr,
2198                      void *val, bool write, Error **errp)
2199{
2200    struct kvm_device_attr kvmattr;
2201    int err;
2202
2203    kvmattr.flags = 0;
2204    kvmattr.group = group;
2205    kvmattr.attr = attr;
2206    kvmattr.addr = (uintptr_t)val;
2207
2208    err = kvm_device_ioctl(fd,
2209                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2210                           &kvmattr);
2211    if (err < 0) {
2212        error_setg_errno(errp, -err,
2213                         "KVM_%s_DEVICE_ATTR failed: Group %d "
2214                         "attr 0x%016" PRIx64,
2215                         write ? "SET" : "GET", group, attr);
2216    }
2217    return err;
2218}
2219
2220bool kvm_has_sync_mmu(void)
2221{
2222    return kvm_state->sync_mmu;
2223}
2224
2225int kvm_has_vcpu_events(void)
2226{
2227    return kvm_state->vcpu_events;
2228}
2229
2230int kvm_has_robust_singlestep(void)
2231{
2232    return kvm_state->robust_singlestep;
2233}
2234
2235int kvm_has_debugregs(void)
2236{
2237    return kvm_state->debugregs;
2238}
2239
2240int kvm_has_many_ioeventfds(void)
2241{
2242    if (!kvm_enabled()) {
2243        return 0;
2244    }
2245    return kvm_state->many_ioeventfds;
2246}
2247
2248int kvm_has_gsi_routing(void)
2249{
2250#ifdef KVM_CAP_IRQ_ROUTING
2251    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2252#else
2253    return false;
2254#endif
2255}
2256
2257int kvm_has_intx_set_mask(void)
2258{
2259    return kvm_state->intx_set_mask;
2260}
2261
2262bool kvm_arm_supports_user_irq(void)
2263{
2264    return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2265}
2266
2267#ifdef KVM_CAP_SET_GUEST_DEBUG
2268struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2269                                                 target_ulong pc)
2270{
2271    struct kvm_sw_breakpoint *bp;
2272
2273    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2274        if (bp->pc == pc) {
2275            return bp;
2276        }
2277    }
2278    return NULL;
2279}
2280
2281int kvm_sw_breakpoints_active(CPUState *cpu)
2282{
2283    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2284}
2285
2286struct kvm_set_guest_debug_data {
2287    struct kvm_guest_debug dbg;
2288    int err;
2289};
2290
2291static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2292{
2293    struct kvm_set_guest_debug_data *dbg_data =
2294        (struct kvm_set_guest_debug_data *) data.host_ptr;
2295
2296    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2297                                   &dbg_data->dbg);
2298}
2299
2300int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2301{
2302    struct kvm_set_guest_debug_data data;
2303
2304    data.dbg.control = reinject_trap;
2305
2306    if (cpu->singlestep_enabled) {
2307        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2308    }
2309    kvm_arch_update_guest_debug(cpu, &data.dbg);
2310
2311    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2312               RUN_ON_CPU_HOST_PTR(&data));
2313    return data.err;
2314}
2315
2316int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2317                          target_ulong len, int type)
2318{
2319    struct kvm_sw_breakpoint *bp;
2320    int err;
2321
2322    if (type == GDB_BREAKPOINT_SW) {
2323        bp = kvm_find_sw_breakpoint(cpu, addr);
2324        if (bp) {
2325            bp->use_count++;
2326            return 0;
2327        }
2328
2329        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2330        bp->pc = addr;
2331        bp->use_count = 1;
2332        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2333        if (err) {
2334            g_free(bp);
2335            return err;
2336        }
2337
2338        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2339    } else {
2340        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2341        if (err) {
2342            return err;
2343        }
2344    }
2345
2346    CPU_FOREACH(cpu) {
2347        err = kvm_update_guest_debug(cpu, 0);
2348        if (err) {
2349            return err;
2350        }
2351    }
2352    return 0;
2353}
2354
2355int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2356                          target_ulong len, int type)
2357{
2358    struct kvm_sw_breakpoint *bp;
2359    int err;
2360
2361    if (type == GDB_BREAKPOINT_SW) {
2362        bp = kvm_find_sw_breakpoint(cpu, addr);
2363        if (!bp) {
2364            return -ENOENT;
2365        }
2366
2367        if (bp->use_count > 1) {
2368            bp->use_count--;
2369            return 0;
2370        }
2371
2372        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2373        if (err) {
2374            return err;
2375        }
2376
2377        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2378        g_free(bp);
2379    } else {
2380        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2381        if (err) {
2382            return err;
2383        }
2384    }
2385
2386    CPU_FOREACH(cpu) {
2387        err = kvm_update_guest_debug(cpu, 0);
2388        if (err) {
2389            return err;
2390        }
2391    }
2392    return 0;
2393}
2394
2395void kvm_remove_all_breakpoints(CPUState *cpu)
2396{
2397    struct kvm_sw_breakpoint *bp, *next;
2398    KVMState *s = cpu->kvm_state;
2399    CPUState *tmpcpu;
2400
2401    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2402        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2403            /* Try harder to find a CPU that currently sees the breakpoint. */
2404            CPU_FOREACH(tmpcpu) {
2405                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2406                    break;
2407                }
2408            }
2409        }
2410        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2411        g_free(bp);
2412    }
2413    kvm_arch_remove_all_hw_breakpoints();
2414
2415    CPU_FOREACH(cpu) {
2416        kvm_update_guest_debug(cpu, 0);
2417    }
2418}
2419
2420#else /* !KVM_CAP_SET_GUEST_DEBUG */
2421
2422int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2423{
2424    return -EINVAL;
2425}
2426
2427int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2428                          target_ulong len, int type)
2429{
2430    return -EINVAL;
2431}
2432
2433int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2434                          target_ulong len, int type)
2435{
2436    return -EINVAL;
2437}
2438
2439void kvm_remove_all_breakpoints(CPUState *cpu)
2440{
2441}
2442#endif /* !KVM_CAP_SET_GUEST_DEBUG */
2443
2444static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2445{
2446    KVMState *s = kvm_state;
2447    struct kvm_signal_mask *sigmask;
2448    int r;
2449
2450    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2451
2452    sigmask->len = s->sigmask_len;
2453    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2454    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2455    g_free(sigmask);
2456
2457    return r;
2458}
2459
2460static void kvm_ipi_signal(int sig)
2461{
2462    if (current_cpu) {
2463        assert(kvm_immediate_exit);
2464        kvm_cpu_kick(current_cpu);
2465    }
2466}
2467
2468void kvm_init_cpu_signals(CPUState *cpu)
2469{
2470    int r;
2471    sigset_t set;
2472    struct sigaction sigact;
2473
2474    memset(&sigact, 0, sizeof(sigact));
2475    sigact.sa_handler = kvm_ipi_signal;
2476    sigaction(SIG_IPI, &sigact, NULL);
2477
2478    pthread_sigmask(SIG_BLOCK, NULL, &set);
2479#if defined KVM_HAVE_MCE_INJECTION
2480    sigdelset(&set, SIGBUS);
2481    pthread_sigmask(SIG_SETMASK, &set, NULL);
2482#endif
2483    sigdelset(&set, SIG_IPI);
2484    if (kvm_immediate_exit) {
2485        r = pthread_sigmask(SIG_SETMASK, &set, NULL);
2486    } else {
2487        r = kvm_set_signal_mask(cpu, &set);
2488    }
2489    if (r) {
2490        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
2491        exit(1);
2492    }
2493}
2494
2495/* Called asynchronously in VCPU thread.  */
2496int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2497{
2498#ifdef KVM_HAVE_MCE_INJECTION
2499    if (have_sigbus_pending) {
2500        return 1;
2501    }
2502    have_sigbus_pending = true;
2503    pending_sigbus_addr = addr;
2504    pending_sigbus_code = code;
2505    atomic_set(&cpu->exit_request, 1);
2506    return 0;
2507#else
2508    return 1;
2509#endif
2510}
2511
2512/* Called synchronously (via signalfd) in main thread.  */
2513int kvm_on_sigbus(int code, void *addr)
2514{
2515#ifdef KVM_HAVE_MCE_INJECTION
2516    /* Action required MCE kills the process if SIGBUS is blocked.  Because
2517     * that's what happens in the I/O thread, where we handle MCE via signalfd,
2518     * we can only get action optional here.
2519     */
2520    assert(code != BUS_MCEERR_AR);
2521    kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
2522    return 0;
2523#else
2524    return 1;
2525#endif
2526}
2527
2528int kvm_create_device(KVMState *s, uint64_t type, bool test)
2529{
2530    int ret;
2531    struct kvm_create_device create_dev;
2532
2533    create_dev.type = type;
2534    create_dev.fd = -1;
2535    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2536
2537    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2538        return -ENOTSUP;
2539    }
2540
2541    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2542    if (ret) {
2543        return ret;
2544    }
2545
2546    return test ? 0 : create_dev.fd;
2547}
2548
2549bool kvm_device_supported(int vmfd, uint64_t type)
2550{
2551    struct kvm_create_device create_dev = {
2552        .type = type,
2553        .fd = -1,
2554        .flags = KVM_CREATE_DEVICE_TEST,
2555    };
2556
2557    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2558        return false;
2559    }
2560
2561    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2562}
2563
2564int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2565{
2566    struct kvm_one_reg reg;
2567    int r;
2568
2569    reg.id = id;
2570    reg.addr = (uintptr_t) source;
2571    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2572    if (r) {
2573        trace_kvm_failed_reg_set(id, strerror(-r));
2574    }
2575    return r;
2576}
2577
2578int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2579{
2580    struct kvm_one_reg reg;
2581    int r;
2582
2583    reg.id = id;
2584    reg.addr = (uintptr_t) target;
2585    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2586    if (r) {
2587        trace_kvm_failed_reg_get(id, strerror(-r));
2588    }
2589    return r;
2590}
2591
2592static void kvm_accel_class_init(ObjectClass *oc, void *data)
2593{
2594    AccelClass *ac = ACCEL_CLASS(oc);
2595    ac->name = "KVM";
2596    ac->init_machine = kvm_init;
2597    ac->allowed = &kvm_allowed;
2598}
2599
2600static const TypeInfo kvm_accel_type = {
2601    .name = TYPE_KVM_ACCEL,
2602    .parent = TYPE_ACCEL,
2603    .class_init = kvm_accel_class_init,
2604    .instance_size = sizeof(KVMState),
2605};
2606
2607static void kvm_type_init(void)
2608{
2609    type_register_static(&kvm_accel_type);
2610}
2611
2612type_init(kvm_type_init);
2613