qemu/accel/kvm/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18
  19#include <linux/kvm.h>
  20
  21#include "qemu-common.h"
  22#include "qemu/atomic.h"
  23#include "qemu/option.h"
  24#include "qemu/config-file.h"
  25#include "qemu/error-report.h"
  26#include "qapi/error.h"
  27#include "hw/hw.h"
  28#include "hw/pci/msi.h"
  29#include "hw/pci/msix.h"
  30#include "hw/s390x/adapter.h"
  31#include "exec/gdbstub.h"
  32#include "sysemu/kvm_int.h"
  33#include "sysemu/cpus.h"
  34#include "qemu/bswap.h"
  35#include "exec/memory.h"
  36#include "exec/ram_addr.h"
  37#include "exec/address-spaces.h"
  38#include "qemu/event_notifier.h"
  39#include "trace.h"
  40#include "hw/irq.h"
  41
  42#include "hw/boards.h"
  43
  44/* This check must be after config-host.h is included */
  45#ifdef CONFIG_EVENTFD
  46#include <sys/eventfd.h>
  47#endif
  48
  49/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  50 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  51 */
  52#define PAGE_SIZE getpagesize()
  53
  54//#define DEBUG_KVM
  55
  56#ifdef DEBUG_KVM
  57#define DPRINTF(fmt, ...) \
  58    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  59#else
  60#define DPRINTF(fmt, ...) \
  61    do { } while (0)
  62#endif
  63
  64#define KVM_MSI_HASHTAB_SIZE    256
  65
  66struct KVMParkedVcpu {
  67    unsigned long vcpu_id;
  68    int kvm_fd;
  69    QLIST_ENTRY(KVMParkedVcpu) node;
  70};
  71
  72struct KVMState
  73{
  74    AccelState parent_obj;
  75
  76    int nr_slots;
  77    int fd;
  78    int vmfd;
  79    int coalesced_mmio;
  80    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  81    bool coalesced_flush_in_progress;
  82    int vcpu_events;
  83    int robust_singlestep;
  84    int debugregs;
  85#ifdef KVM_CAP_SET_GUEST_DEBUG
  86    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
  87#endif
  88    int many_ioeventfds;
  89    int intx_set_mask;
  90    bool sync_mmu;
  91    /* The man page (and posix) say ioctl numbers are signed int, but
  92     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
  93     * unsigned, and treating them as signed here can break things */
  94    unsigned irq_set_ioctl;
  95    unsigned int sigmask_len;
  96    GHashTable *gsimap;
  97#ifdef KVM_CAP_IRQ_ROUTING
  98    struct kvm_irq_routing *irq_routes;
  99    int nr_allocated_irq_routes;
 100    unsigned long *used_gsi_bitmap;
 101    unsigned int gsi_count;
 102    QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 103#endif
 104    KVMMemoryListener memory_listener;
 105    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 106};
 107
 108KVMState *kvm_state;
 109bool kvm_kernel_irqchip;
 110bool kvm_split_irqchip;
 111bool kvm_async_interrupts_allowed;
 112bool kvm_halt_in_kernel_allowed;
 113bool kvm_eventfds_allowed;
 114bool kvm_irqfds_allowed;
 115bool kvm_resamplefds_allowed;
 116bool kvm_msi_via_irqfd_allowed;
 117bool kvm_gsi_routing_allowed;
 118bool kvm_gsi_direct_mapping;
 119bool kvm_allowed;
 120bool kvm_readonly_mem_allowed;
 121bool kvm_vm_attributes_allowed;
 122bool kvm_direct_msi_allowed;
 123bool kvm_ioeventfd_any_length_allowed;
 124bool kvm_msi_use_devid;
 125static bool kvm_immediate_exit;
 126
 127static const KVMCapabilityInfo kvm_required_capabilites[] = {
 128    KVM_CAP_INFO(USER_MEMORY),
 129    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 130    KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
 131    KVM_CAP_LAST_INFO
 132};
 133
 134int kvm_get_max_memslots(void)
 135{
 136    KVMState *s = KVM_STATE(current_machine->accelerator);
 137
 138    return s->nr_slots;
 139}
 140
 141static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 142{
 143    KVMState *s = kvm_state;
 144    int i;
 145
 146    for (i = 0; i < s->nr_slots; i++) {
 147        if (kml->slots[i].memory_size == 0) {
 148            return &kml->slots[i];
 149        }
 150    }
 151
 152    return NULL;
 153}
 154
 155bool kvm_has_free_slot(MachineState *ms)
 156{
 157    KVMState *s = KVM_STATE(ms->accelerator);
 158
 159    return kvm_get_free_slot(&s->memory_listener);
 160}
 161
 162static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 163{
 164    KVMSlot *slot = kvm_get_free_slot(kml);
 165
 166    if (slot) {
 167        return slot;
 168    }
 169
 170    fprintf(stderr, "%s: no free slot available\n", __func__);
 171    abort();
 172}
 173
 174static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 175                                         hwaddr start_addr,
 176                                         hwaddr size)
 177{
 178    KVMState *s = kvm_state;
 179    int i;
 180
 181    for (i = 0; i < s->nr_slots; i++) {
 182        KVMSlot *mem = &kml->slots[i];
 183
 184        if (start_addr == mem->start_addr && size == mem->memory_size) {
 185            return mem;
 186        }
 187    }
 188
 189    return NULL;
 190}
 191
 192/*
 193 * Calculate and align the start address and the size of the section.
 194 * Return the size. If the size is 0, the aligned section is empty.
 195 */
 196static hwaddr kvm_align_section(MemoryRegionSection *section,
 197                                hwaddr *start)
 198{
 199    hwaddr size = int128_get64(section->size);
 200    hwaddr delta, aligned;
 201
 202    /* kvm works in page size chunks, but the function may be called
 203       with sub-page size and unaligned start address. Pad the start
 204       address to next and truncate size to previous page boundary. */
 205    aligned = ROUND_UP(section->offset_within_address_space,
 206                       qemu_real_host_page_size);
 207    delta = aligned - section->offset_within_address_space;
 208    *start = aligned;
 209    if (delta > size) {
 210        return 0;
 211    }
 212
 213    return (size - delta) & qemu_real_host_page_mask;
 214}
 215
 216int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 217                                       hwaddr *phys_addr)
 218{
 219    KVMMemoryListener *kml = &s->memory_listener;
 220    int i;
 221
 222    for (i = 0; i < s->nr_slots; i++) {
 223        KVMSlot *mem = &kml->slots[i];
 224
 225        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 226            *phys_addr = mem->start_addr + (ram - mem->ram);
 227            return 1;
 228        }
 229    }
 230
 231    return 0;
 232}
 233
 234static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
 235{
 236    KVMState *s = kvm_state;
 237    struct kvm_userspace_memory_region mem;
 238
 239    mem.slot = slot->slot | (kml->as_id << 16);
 240    mem.guest_phys_addr = slot->start_addr;
 241    mem.userspace_addr = (unsigned long)slot->ram;
 242    mem.flags = slot->flags;
 243
 244    if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
 245        /* Set the slot size to 0 before setting the slot to the desired
 246         * value. This is needed based on KVM commit 75d61fbc. */
 247        mem.memory_size = 0;
 248        kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 249    }
 250    mem.memory_size = slot->memory_size;
 251    return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 252}
 253
 254int kvm_destroy_vcpu(CPUState *cpu)
 255{
 256    KVMState *s = kvm_state;
 257    long mmap_size;
 258    struct KVMParkedVcpu *vcpu = NULL;
 259    int ret = 0;
 260
 261    DPRINTF("kvm_destroy_vcpu\n");
 262
 263    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 264    if (mmap_size < 0) {
 265        ret = mmap_size;
 266        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 267        goto err;
 268    }
 269
 270    ret = munmap(cpu->kvm_run, mmap_size);
 271    if (ret < 0) {
 272        goto err;
 273    }
 274
 275    vcpu = g_malloc0(sizeof(*vcpu));
 276    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 277    vcpu->kvm_fd = cpu->kvm_fd;
 278    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 279err:
 280    return ret;
 281}
 282
 283static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 284{
 285    struct KVMParkedVcpu *cpu;
 286
 287    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 288        if (cpu->vcpu_id == vcpu_id) {
 289            int kvm_fd;
 290
 291            QLIST_REMOVE(cpu, node);
 292            kvm_fd = cpu->kvm_fd;
 293            g_free(cpu);
 294            return kvm_fd;
 295        }
 296    }
 297
 298    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 299}
 300
 301int kvm_init_vcpu(CPUState *cpu)
 302{
 303    KVMState *s = kvm_state;
 304    long mmap_size;
 305    int ret;
 306
 307    DPRINTF("kvm_init_vcpu\n");
 308
 309    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 310    if (ret < 0) {
 311        DPRINTF("kvm_create_vcpu failed\n");
 312        goto err;
 313    }
 314
 315    cpu->kvm_fd = ret;
 316    cpu->kvm_state = s;
 317    cpu->vcpu_dirty = true;
 318
 319    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 320    if (mmap_size < 0) {
 321        ret = mmap_size;
 322        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 323        goto err;
 324    }
 325
 326    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 327                        cpu->kvm_fd, 0);
 328    if (cpu->kvm_run == MAP_FAILED) {
 329        ret = -errno;
 330        DPRINTF("mmap'ing vcpu state failed\n");
 331        goto err;
 332    }
 333
 334    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 335        s->coalesced_mmio_ring =
 336            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 337    }
 338
 339    ret = kvm_arch_init_vcpu(cpu);
 340err:
 341    return ret;
 342}
 343
 344/*
 345 * dirty pages logging control
 346 */
 347
 348static int kvm_mem_flags(MemoryRegion *mr)
 349{
 350    bool readonly = mr->readonly || memory_region_is_romd(mr);
 351    int flags = 0;
 352
 353    if (memory_region_get_dirty_log_mask(mr) != 0) {
 354        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 355    }
 356    if (readonly && kvm_readonly_mem_allowed) {
 357        flags |= KVM_MEM_READONLY;
 358    }
 359    return flags;
 360}
 361
 362static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 363                                 MemoryRegion *mr)
 364{
 365    int old_flags;
 366
 367    old_flags = mem->flags;
 368    mem->flags = kvm_mem_flags(mr);
 369
 370    /* If nothing changed effectively, no need to issue ioctl */
 371    if (mem->flags == old_flags) {
 372        return 0;
 373    }
 374
 375    return kvm_set_user_memory_region(kml, mem);
 376}
 377
 378static int kvm_section_update_flags(KVMMemoryListener *kml,
 379                                    MemoryRegionSection *section)
 380{
 381    hwaddr start_addr, size;
 382    KVMSlot *mem;
 383
 384    size = kvm_align_section(section, &start_addr);
 385    if (!size) {
 386        return 0;
 387    }
 388
 389    mem = kvm_lookup_matching_slot(kml, start_addr, size);
 390    if (!mem) {
 391        /* We don't have a slot if we want to trap every access. */
 392        return 0;
 393    }
 394
 395    return kvm_slot_update_flags(kml, mem, section->mr);
 396}
 397
 398static void kvm_log_start(MemoryListener *listener,
 399                          MemoryRegionSection *section,
 400                          int old, int new)
 401{
 402    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 403    int r;
 404
 405    if (old != 0) {
 406        return;
 407    }
 408
 409    r = kvm_section_update_flags(kml, section);
 410    if (r < 0) {
 411        abort();
 412    }
 413}
 414
 415static void kvm_log_stop(MemoryListener *listener,
 416                          MemoryRegionSection *section,
 417                          int old, int new)
 418{
 419    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 420    int r;
 421
 422    if (new != 0) {
 423        return;
 424    }
 425
 426    r = kvm_section_update_flags(kml, section);
 427    if (r < 0) {
 428        abort();
 429    }
 430}
 431
 432/* get kvm's dirty pages bitmap and update qemu's */
 433static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
 434                                         unsigned long *bitmap)
 435{
 436    ram_addr_t start = section->offset_within_region +
 437                       memory_region_get_ram_addr(section->mr);
 438    ram_addr_t pages = int128_get64(section->size) / getpagesize();
 439
 440    cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
 441    return 0;
 442}
 443
 444#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 445
 446/**
 447 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
 448 * This function updates qemu's dirty bitmap using
 449 * memory_region_set_dirty().  This means all bits are set
 450 * to dirty.
 451 *
 452 * @start_add: start of logged region.
 453 * @end_addr: end of logged region.
 454 */
 455static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 456                                          MemoryRegionSection *section)
 457{
 458    KVMState *s = kvm_state;
 459    struct kvm_dirty_log d = {};
 460    KVMSlot *mem;
 461    hwaddr start_addr, size;
 462
 463    size = kvm_align_section(section, &start_addr);
 464    if (size) {
 465        mem = kvm_lookup_matching_slot(kml, start_addr, size);
 466        if (!mem) {
 467            /* We don't have a slot if we want to trap every access. */
 468            return 0;
 469        }
 470
 471        /* XXX bad kernel interface alert
 472         * For dirty bitmap, kernel allocates array of size aligned to
 473         * bits-per-long.  But for case when the kernel is 64bits and
 474         * the userspace is 32bits, userspace can't align to the same
 475         * bits-per-long, since sizeof(long) is different between kernel
 476         * and user space.  This way, userspace will provide buffer which
 477         * may be 4 bytes less than the kernel will use, resulting in
 478         * userspace memory corruption (which is not detectable by valgrind
 479         * too, in most cases).
 480         * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 481         * a hope that sizeof(long) won't become >8 any time soon.
 482         */
 483        size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
 484                     /*HOST_LONG_BITS*/ 64) / 8;
 485        d.dirty_bitmap = g_malloc0(size);
 486
 487        d.slot = mem->slot | (kml->as_id << 16);
 488        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
 489            DPRINTF("ioctl failed %d\n", errno);
 490            g_free(d.dirty_bitmap);
 491            return -1;
 492        }
 493
 494        kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
 495        g_free(d.dirty_bitmap);
 496    }
 497
 498    return 0;
 499}
 500
 501static void kvm_coalesce_mmio_region(MemoryListener *listener,
 502                                     MemoryRegionSection *secion,
 503                                     hwaddr start, hwaddr size)
 504{
 505    KVMState *s = kvm_state;
 506
 507    if (s->coalesced_mmio) {
 508        struct kvm_coalesced_mmio_zone zone;
 509
 510        zone.addr = start;
 511        zone.size = size;
 512        zone.pad = 0;
 513
 514        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 515    }
 516}
 517
 518static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
 519                                       MemoryRegionSection *secion,
 520                                       hwaddr start, hwaddr size)
 521{
 522    KVMState *s = kvm_state;
 523
 524    if (s->coalesced_mmio) {
 525        struct kvm_coalesced_mmio_zone zone;
 526
 527        zone.addr = start;
 528        zone.size = size;
 529        zone.pad = 0;
 530
 531        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 532    }
 533}
 534
 535int kvm_check_extension(KVMState *s, unsigned int extension)
 536{
 537    int ret;
 538
 539    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 540    if (ret < 0) {
 541        ret = 0;
 542    }
 543
 544    return ret;
 545}
 546
 547int kvm_vm_check_extension(KVMState *s, unsigned int extension)
 548{
 549    int ret;
 550
 551    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 552    if (ret < 0) {
 553        /* VM wide version not implemented, use global one instead */
 554        ret = kvm_check_extension(s, extension);
 555    }
 556
 557    return ret;
 558}
 559
 560static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
 561{
 562#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
 563    /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
 564     * endianness, but the memory core hands them in target endianness.
 565     * For example, PPC is always treated as big-endian even if running
 566     * on KVM and on PPC64LE.  Correct here.
 567     */
 568    switch (size) {
 569    case 2:
 570        val = bswap16(val);
 571        break;
 572    case 4:
 573        val = bswap32(val);
 574        break;
 575    }
 576#endif
 577    return val;
 578}
 579
 580static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
 581                                  bool assign, uint32_t size, bool datamatch)
 582{
 583    int ret;
 584    struct kvm_ioeventfd iofd = {
 585        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 586        .addr = addr,
 587        .len = size,
 588        .flags = 0,
 589        .fd = fd,
 590    };
 591
 592    if (!kvm_enabled()) {
 593        return -ENOSYS;
 594    }
 595
 596    if (datamatch) {
 597        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 598    }
 599    if (!assign) {
 600        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 601    }
 602
 603    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
 604
 605    if (ret < 0) {
 606        return -errno;
 607    }
 608
 609    return 0;
 610}
 611
 612static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
 613                                 bool assign, uint32_t size, bool datamatch)
 614{
 615    struct kvm_ioeventfd kick = {
 616        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 617        .addr = addr,
 618        .flags = KVM_IOEVENTFD_FLAG_PIO,
 619        .len = size,
 620        .fd = fd,
 621    };
 622    int r;
 623    if (!kvm_enabled()) {
 624        return -ENOSYS;
 625    }
 626    if (datamatch) {
 627        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 628    }
 629    if (!assign) {
 630        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 631    }
 632    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
 633    if (r < 0) {
 634        return r;
 635    }
 636    return 0;
 637}
 638
 639
 640static int kvm_check_many_ioeventfds(void)
 641{
 642    /* Userspace can use ioeventfd for io notification.  This requires a host
 643     * that supports eventfd(2) and an I/O thread; since eventfd does not
 644     * support SIGIO it cannot interrupt the vcpu.
 645     *
 646     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
 647     * can avoid creating too many ioeventfds.
 648     */
 649#if defined(CONFIG_EVENTFD)
 650    int ioeventfds[7];
 651    int i, ret = 0;
 652    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
 653        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
 654        if (ioeventfds[i] < 0) {
 655            break;
 656        }
 657        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
 658        if (ret < 0) {
 659            close(ioeventfds[i]);
 660            break;
 661        }
 662    }
 663
 664    /* Decide whether many devices are supported or not */
 665    ret = i == ARRAY_SIZE(ioeventfds);
 666
 667    while (i-- > 0) {
 668        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
 669        close(ioeventfds[i]);
 670    }
 671    return ret;
 672#else
 673    return 0;
 674#endif
 675}
 676
 677static const KVMCapabilityInfo *
 678kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
 679{
 680    while (list->name) {
 681        if (!kvm_check_extension(s, list->value)) {
 682            return list;
 683        }
 684        list++;
 685    }
 686    return NULL;
 687}
 688
 689static void kvm_set_phys_mem(KVMMemoryListener *kml,
 690                             MemoryRegionSection *section, bool add)
 691{
 692    KVMSlot *mem;
 693    int err;
 694    MemoryRegion *mr = section->mr;
 695    bool writeable = !mr->readonly && !mr->rom_device;
 696    hwaddr start_addr, size;
 697    void *ram;
 698
 699    if (!memory_region_is_ram(mr)) {
 700        if (writeable || !kvm_readonly_mem_allowed) {
 701            return;
 702        } else if (!mr->romd_mode) {
 703            /* If the memory device is not in romd_mode, then we actually want
 704             * to remove the kvm memory slot so all accesses will trap. */
 705            add = false;
 706        }
 707    }
 708
 709    size = kvm_align_section(section, &start_addr);
 710    if (!size) {
 711        return;
 712    }
 713
 714    /* use aligned delta to align the ram address */
 715    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
 716          (start_addr - section->offset_within_address_space);
 717
 718    if (!add) {
 719        mem = kvm_lookup_matching_slot(kml, start_addr, size);
 720        if (!mem) {
 721            return;
 722        }
 723        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
 724            kvm_physical_sync_dirty_bitmap(kml, section);
 725        }
 726
 727        /* unregister the slot */
 728        mem->memory_size = 0;
 729        err = kvm_set_user_memory_region(kml, mem);
 730        if (err) {
 731            fprintf(stderr, "%s: error unregistering slot: %s\n",
 732                    __func__, strerror(-err));
 733            abort();
 734        }
 735        return;
 736    }
 737
 738    /* register the new slot */
 739    mem = kvm_alloc_slot(kml);
 740    mem->memory_size = size;
 741    mem->start_addr = start_addr;
 742    mem->ram = ram;
 743    mem->flags = kvm_mem_flags(mr);
 744
 745    err = kvm_set_user_memory_region(kml, mem);
 746    if (err) {
 747        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
 748                strerror(-err));
 749        abort();
 750    }
 751}
 752
 753static void kvm_region_add(MemoryListener *listener,
 754                           MemoryRegionSection *section)
 755{
 756    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 757
 758    memory_region_ref(section->mr);
 759    kvm_set_phys_mem(kml, section, true);
 760}
 761
 762static void kvm_region_del(MemoryListener *listener,
 763                           MemoryRegionSection *section)
 764{
 765    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 766
 767    kvm_set_phys_mem(kml, section, false);
 768    memory_region_unref(section->mr);
 769}
 770
 771static void kvm_log_sync(MemoryListener *listener,
 772                         MemoryRegionSection *section)
 773{
 774    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 775    int r;
 776
 777    r = kvm_physical_sync_dirty_bitmap(kml, section);
 778    if (r < 0) {
 779        abort();
 780    }
 781}
 782
 783static void kvm_mem_ioeventfd_add(MemoryListener *listener,
 784                                  MemoryRegionSection *section,
 785                                  bool match_data, uint64_t data,
 786                                  EventNotifier *e)
 787{
 788    int fd = event_notifier_get_fd(e);
 789    int r;
 790
 791    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
 792                               data, true, int128_get64(section->size),
 793                               match_data);
 794    if (r < 0) {
 795        fprintf(stderr, "%s: error adding ioeventfd: %s\n",
 796                __func__, strerror(-r));
 797        abort();
 798    }
 799}
 800
 801static void kvm_mem_ioeventfd_del(MemoryListener *listener,
 802                                  MemoryRegionSection *section,
 803                                  bool match_data, uint64_t data,
 804                                  EventNotifier *e)
 805{
 806    int fd = event_notifier_get_fd(e);
 807    int r;
 808
 809    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
 810                               data, false, int128_get64(section->size),
 811                               match_data);
 812    if (r < 0) {
 813        abort();
 814    }
 815}
 816
 817static void kvm_io_ioeventfd_add(MemoryListener *listener,
 818                                 MemoryRegionSection *section,
 819                                 bool match_data, uint64_t data,
 820                                 EventNotifier *e)
 821{
 822    int fd = event_notifier_get_fd(e);
 823    int r;
 824
 825    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
 826                              data, true, int128_get64(section->size),
 827                              match_data);
 828    if (r < 0) {
 829        fprintf(stderr, "%s: error adding ioeventfd: %s\n",
 830                __func__, strerror(-r));
 831        abort();
 832    }
 833}
 834
 835static void kvm_io_ioeventfd_del(MemoryListener *listener,
 836                                 MemoryRegionSection *section,
 837                                 bool match_data, uint64_t data,
 838                                 EventNotifier *e)
 839
 840{
 841    int fd = event_notifier_get_fd(e);
 842    int r;
 843
 844    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
 845                              data, false, int128_get64(section->size),
 846                              match_data);
 847    if (r < 0) {
 848        abort();
 849    }
 850}
 851
 852void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
 853                                  AddressSpace *as, int as_id)
 854{
 855    int i;
 856
 857    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
 858    kml->as_id = as_id;
 859
 860    for (i = 0; i < s->nr_slots; i++) {
 861        kml->slots[i].slot = i;
 862    }
 863
 864    kml->listener.region_add = kvm_region_add;
 865    kml->listener.region_del = kvm_region_del;
 866    kml->listener.log_start = kvm_log_start;
 867    kml->listener.log_stop = kvm_log_stop;
 868    kml->listener.log_sync = kvm_log_sync;
 869    kml->listener.priority = 10;
 870
 871    memory_listener_register(&kml->listener, as);
 872}
 873
 874static MemoryListener kvm_io_listener = {
 875    .eventfd_add = kvm_io_ioeventfd_add,
 876    .eventfd_del = kvm_io_ioeventfd_del,
 877    .priority = 10,
 878};
 879
 880int kvm_set_irq(KVMState *s, int irq, int level)
 881{
 882    struct kvm_irq_level event;
 883    int ret;
 884
 885    assert(kvm_async_interrupts_enabled());
 886
 887    event.level = level;
 888    event.irq = irq;
 889    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
 890    if (ret < 0) {
 891        perror("kvm_set_irq");
 892        abort();
 893    }
 894
 895    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
 896}
 897
 898#ifdef KVM_CAP_IRQ_ROUTING
 899typedef struct KVMMSIRoute {
 900    struct kvm_irq_routing_entry kroute;
 901    QTAILQ_ENTRY(KVMMSIRoute) entry;
 902} KVMMSIRoute;
 903
 904static void set_gsi(KVMState *s, unsigned int gsi)
 905{
 906    set_bit(gsi, s->used_gsi_bitmap);
 907}
 908
 909static void clear_gsi(KVMState *s, unsigned int gsi)
 910{
 911    clear_bit(gsi, s->used_gsi_bitmap);
 912}
 913
 914void kvm_init_irq_routing(KVMState *s)
 915{
 916    int gsi_count, i;
 917
 918    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
 919    if (gsi_count > 0) {
 920        /* Round up so we can search ints using ffs */
 921        s->used_gsi_bitmap = bitmap_new(gsi_count);
 922        s->gsi_count = gsi_count;
 923    }
 924
 925    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
 926    s->nr_allocated_irq_routes = 0;
 927
 928    if (!kvm_direct_msi_allowed) {
 929        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
 930            QTAILQ_INIT(&s->msi_hashtab[i]);
 931        }
 932    }
 933
 934    kvm_arch_init_irq_routing(s);
 935}
 936
 937void kvm_irqchip_commit_routes(KVMState *s)
 938{
 939    int ret;
 940
 941    if (kvm_gsi_direct_mapping()) {
 942        return;
 943    }
 944
 945    if (!kvm_gsi_routing_enabled()) {
 946        return;
 947    }
 948
 949    s->irq_routes->flags = 0;
 950    trace_kvm_irqchip_commit_routes();
 951    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
 952    assert(ret == 0);
 953}
 954
 955static void kvm_add_routing_entry(KVMState *s,
 956                                  struct kvm_irq_routing_entry *entry)
 957{
 958    struct kvm_irq_routing_entry *new;
 959    int n, size;
 960
 961    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
 962        n = s->nr_allocated_irq_routes * 2;
 963        if (n < 64) {
 964            n = 64;
 965        }
 966        size = sizeof(struct kvm_irq_routing);
 967        size += n * sizeof(*new);
 968        s->irq_routes = g_realloc(s->irq_routes, size);
 969        s->nr_allocated_irq_routes = n;
 970    }
 971    n = s->irq_routes->nr++;
 972    new = &s->irq_routes->entries[n];
 973
 974    *new = *entry;
 975
 976    set_gsi(s, entry->gsi);
 977}
 978
 979static int kvm_update_routing_entry(KVMState *s,
 980                                    struct kvm_irq_routing_entry *new_entry)
 981{
 982    struct kvm_irq_routing_entry *entry;
 983    int n;
 984
 985    for (n = 0; n < s->irq_routes->nr; n++) {
 986        entry = &s->irq_routes->entries[n];
 987        if (entry->gsi != new_entry->gsi) {
 988            continue;
 989        }
 990
 991        if(!memcmp(entry, new_entry, sizeof *entry)) {
 992            return 0;
 993        }
 994
 995        *entry = *new_entry;
 996
 997        return 0;
 998    }
 999
1000    return -ESRCH;
1001}
1002
1003void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1004{
1005    struct kvm_irq_routing_entry e = {};
1006
1007    assert(pin < s->gsi_count);
1008
1009    e.gsi = irq;
1010    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1011    e.flags = 0;
1012    e.u.irqchip.irqchip = irqchip;
1013    e.u.irqchip.pin = pin;
1014    kvm_add_routing_entry(s, &e);
1015}
1016
1017void kvm_irqchip_release_virq(KVMState *s, int virq)
1018{
1019    struct kvm_irq_routing_entry *e;
1020    int i;
1021
1022    if (kvm_gsi_direct_mapping()) {
1023        return;
1024    }
1025
1026    for (i = 0; i < s->irq_routes->nr; i++) {
1027        e = &s->irq_routes->entries[i];
1028        if (e->gsi == virq) {
1029            s->irq_routes->nr--;
1030            *e = s->irq_routes->entries[s->irq_routes->nr];
1031        }
1032    }
1033    clear_gsi(s, virq);
1034    kvm_arch_release_virq_post(virq);
1035    trace_kvm_irqchip_release_virq(virq);
1036}
1037
1038static unsigned int kvm_hash_msi(uint32_t data)
1039{
1040    /* This is optimized for IA32 MSI layout. However, no other arch shall
1041     * repeat the mistake of not providing a direct MSI injection API. */
1042    return data & 0xff;
1043}
1044
1045static void kvm_flush_dynamic_msi_routes(KVMState *s)
1046{
1047    KVMMSIRoute *route, *next;
1048    unsigned int hash;
1049
1050    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1051        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1052            kvm_irqchip_release_virq(s, route->kroute.gsi);
1053            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1054            g_free(route);
1055        }
1056    }
1057}
1058
1059static int kvm_irqchip_get_virq(KVMState *s)
1060{
1061    int next_virq;
1062
1063    /*
1064     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1065     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1066     * number can succeed even though a new route entry cannot be added.
1067     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1068     */
1069    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1070        kvm_flush_dynamic_msi_routes(s);
1071    }
1072
1073    /* Return the lowest unused GSI in the bitmap */
1074    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1075    if (next_virq >= s->gsi_count) {
1076        return -ENOSPC;
1077    } else {
1078        return next_virq;
1079    }
1080}
1081
1082static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1083{
1084    unsigned int hash = kvm_hash_msi(msg.data);
1085    KVMMSIRoute *route;
1086
1087    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1088        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1089            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1090            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1091            return route;
1092        }
1093    }
1094    return NULL;
1095}
1096
1097int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1098{
1099    struct kvm_msi msi;
1100    KVMMSIRoute *route;
1101
1102    if (kvm_direct_msi_allowed) {
1103        msi.address_lo = (uint32_t)msg.address;
1104        msi.address_hi = msg.address >> 32;
1105        msi.data = le32_to_cpu(msg.data);
1106        msi.flags = 0;
1107        memset(msi.pad, 0, sizeof(msi.pad));
1108
1109        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1110    }
1111
1112    route = kvm_lookup_msi_route(s, msg);
1113    if (!route) {
1114        int virq;
1115
1116        virq = kvm_irqchip_get_virq(s);
1117        if (virq < 0) {
1118            return virq;
1119        }
1120
1121        route = g_malloc0(sizeof(KVMMSIRoute));
1122        route->kroute.gsi = virq;
1123        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1124        route->kroute.flags = 0;
1125        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1126        route->kroute.u.msi.address_hi = msg.address >> 32;
1127        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1128
1129        kvm_add_routing_entry(s, &route->kroute);
1130        kvm_irqchip_commit_routes(s);
1131
1132        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1133                           entry);
1134    }
1135
1136    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1137
1138    return kvm_set_irq(s, route->kroute.gsi, 1);
1139}
1140
1141int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1142{
1143    struct kvm_irq_routing_entry kroute = {};
1144    int virq;
1145    MSIMessage msg = {0, 0};
1146
1147    if (pci_available && dev) {
1148        msg = pci_get_msi_message(dev, vector);
1149    }
1150
1151    if (kvm_gsi_direct_mapping()) {
1152        return kvm_arch_msi_data_to_gsi(msg.data);
1153    }
1154
1155    if (!kvm_gsi_routing_enabled()) {
1156        return -ENOSYS;
1157    }
1158
1159    virq = kvm_irqchip_get_virq(s);
1160    if (virq < 0) {
1161        return virq;
1162    }
1163
1164    kroute.gsi = virq;
1165    kroute.type = KVM_IRQ_ROUTING_MSI;
1166    kroute.flags = 0;
1167    kroute.u.msi.address_lo = (uint32_t)msg.address;
1168    kroute.u.msi.address_hi = msg.address >> 32;
1169    kroute.u.msi.data = le32_to_cpu(msg.data);
1170    if (pci_available && kvm_msi_devid_required()) {
1171        kroute.flags = KVM_MSI_VALID_DEVID;
1172        kroute.u.msi.devid = pci_requester_id(dev);
1173    }
1174    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1175        kvm_irqchip_release_virq(s, virq);
1176        return -EINVAL;
1177    }
1178
1179    trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1180                                    vector, virq);
1181
1182    kvm_add_routing_entry(s, &kroute);
1183    kvm_arch_add_msi_route_post(&kroute, vector, dev);
1184    kvm_irqchip_commit_routes(s);
1185
1186    return virq;
1187}
1188
1189int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1190                                 PCIDevice *dev)
1191{
1192    struct kvm_irq_routing_entry kroute = {};
1193
1194    if (kvm_gsi_direct_mapping()) {
1195        return 0;
1196    }
1197
1198    if (!kvm_irqchip_in_kernel()) {
1199        return -ENOSYS;
1200    }
1201
1202    kroute.gsi = virq;
1203    kroute.type = KVM_IRQ_ROUTING_MSI;
1204    kroute.flags = 0;
1205    kroute.u.msi.address_lo = (uint32_t)msg.address;
1206    kroute.u.msi.address_hi = msg.address >> 32;
1207    kroute.u.msi.data = le32_to_cpu(msg.data);
1208    if (pci_available && kvm_msi_devid_required()) {
1209        kroute.flags = KVM_MSI_VALID_DEVID;
1210        kroute.u.msi.devid = pci_requester_id(dev);
1211    }
1212    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1213        return -EINVAL;
1214    }
1215
1216    trace_kvm_irqchip_update_msi_route(virq);
1217
1218    return kvm_update_routing_entry(s, &kroute);
1219}
1220
1221static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1222                                    bool assign)
1223{
1224    struct kvm_irqfd irqfd = {
1225        .fd = fd,
1226        .gsi = virq,
1227        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1228    };
1229
1230    if (rfd != -1) {
1231        irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1232        irqfd.resamplefd = rfd;
1233    }
1234
1235    if (!kvm_irqfds_enabled()) {
1236        return -ENOSYS;
1237    }
1238
1239    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1240}
1241
1242int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1243{
1244    struct kvm_irq_routing_entry kroute = {};
1245    int virq;
1246
1247    if (!kvm_gsi_routing_enabled()) {
1248        return -ENOSYS;
1249    }
1250
1251    virq = kvm_irqchip_get_virq(s);
1252    if (virq < 0) {
1253        return virq;
1254    }
1255
1256    kroute.gsi = virq;
1257    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1258    kroute.flags = 0;
1259    kroute.u.adapter.summary_addr = adapter->summary_addr;
1260    kroute.u.adapter.ind_addr = adapter->ind_addr;
1261    kroute.u.adapter.summary_offset = adapter->summary_offset;
1262    kroute.u.adapter.ind_offset = adapter->ind_offset;
1263    kroute.u.adapter.adapter_id = adapter->adapter_id;
1264
1265    kvm_add_routing_entry(s, &kroute);
1266
1267    return virq;
1268}
1269
1270int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1271{
1272    struct kvm_irq_routing_entry kroute = {};
1273    int virq;
1274
1275    if (!kvm_gsi_routing_enabled()) {
1276        return -ENOSYS;
1277    }
1278    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1279        return -ENOSYS;
1280    }
1281    virq = kvm_irqchip_get_virq(s);
1282    if (virq < 0) {
1283        return virq;
1284    }
1285
1286    kroute.gsi = virq;
1287    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1288    kroute.flags = 0;
1289    kroute.u.hv_sint.vcpu = vcpu;
1290    kroute.u.hv_sint.sint = sint;
1291
1292    kvm_add_routing_entry(s, &kroute);
1293    kvm_irqchip_commit_routes(s);
1294
1295    return virq;
1296}
1297
1298#else /* !KVM_CAP_IRQ_ROUTING */
1299
1300void kvm_init_irq_routing(KVMState *s)
1301{
1302}
1303
1304void kvm_irqchip_release_virq(KVMState *s, int virq)
1305{
1306}
1307
1308int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1309{
1310    abort();
1311}
1312
1313int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1314{
1315    return -ENOSYS;
1316}
1317
1318int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1319{
1320    return -ENOSYS;
1321}
1322
1323int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1324{
1325    return -ENOSYS;
1326}
1327
1328static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1329{
1330    abort();
1331}
1332
1333int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1334{
1335    return -ENOSYS;
1336}
1337#endif /* !KVM_CAP_IRQ_ROUTING */
1338
1339int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1340                                       EventNotifier *rn, int virq)
1341{
1342    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1343           rn ? event_notifier_get_fd(rn) : -1, virq, true);
1344}
1345
1346int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1347                                          int virq)
1348{
1349    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1350           false);
1351}
1352
1353int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1354                                   EventNotifier *rn, qemu_irq irq)
1355{
1356    gpointer key, gsi;
1357    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1358
1359    if (!found) {
1360        return -ENXIO;
1361    }
1362    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1363}
1364
1365int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1366                                      qemu_irq irq)
1367{
1368    gpointer key, gsi;
1369    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1370
1371    if (!found) {
1372        return -ENXIO;
1373    }
1374    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1375}
1376
1377void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1378{
1379    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1380}
1381
1382static void kvm_irqchip_create(MachineState *machine, KVMState *s)
1383{
1384    int ret;
1385
1386    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1387        ;
1388    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1389        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1390        if (ret < 0) {
1391            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1392            exit(1);
1393        }
1394    } else {
1395        return;
1396    }
1397
1398    /* First probe and see if there's a arch-specific hook to create the
1399     * in-kernel irqchip for us */
1400    ret = kvm_arch_irqchip_create(machine, s);
1401    if (ret == 0) {
1402        if (machine_kernel_irqchip_split(machine)) {
1403            perror("Split IRQ chip mode not supported.");
1404            exit(1);
1405        } else {
1406            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1407        }
1408    }
1409    if (ret < 0) {
1410        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1411        exit(1);
1412    }
1413
1414    kvm_kernel_irqchip = true;
1415    /* If we have an in-kernel IRQ chip then we must have asynchronous
1416     * interrupt delivery (though the reverse is not necessarily true)
1417     */
1418    kvm_async_interrupts_allowed = true;
1419    kvm_halt_in_kernel_allowed = true;
1420
1421    kvm_init_irq_routing(s);
1422
1423    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1424}
1425
1426/* Find number of supported CPUs using the recommended
1427 * procedure from the kernel API documentation to cope with
1428 * older kernels that may be missing capabilities.
1429 */
1430static int kvm_recommended_vcpus(KVMState *s)
1431{
1432    int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1433    return (ret) ? ret : 4;
1434}
1435
1436static int kvm_max_vcpus(KVMState *s)
1437{
1438    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1439    return (ret) ? ret : kvm_recommended_vcpus(s);
1440}
1441
1442static int kvm_max_vcpu_id(KVMState *s)
1443{
1444    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1445    return (ret) ? ret : kvm_max_vcpus(s);
1446}
1447
1448bool kvm_vcpu_id_is_valid(int vcpu_id)
1449{
1450    KVMState *s = KVM_STATE(current_machine->accelerator);
1451    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1452}
1453
1454static int kvm_init(MachineState *ms)
1455{
1456    MachineClass *mc = MACHINE_GET_CLASS(ms);
1457    static const char upgrade_note[] =
1458        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1459        "(see http://sourceforge.net/projects/kvm).\n";
1460    struct {
1461        const char *name;
1462        int num;
1463    } num_cpus[] = {
1464        { "SMP",          smp_cpus },
1465        { "hotpluggable", max_cpus },
1466        { NULL, }
1467    }, *nc = num_cpus;
1468    int soft_vcpus_limit, hard_vcpus_limit;
1469    KVMState *s;
1470    const KVMCapabilityInfo *missing_cap;
1471    int ret;
1472    int type = 0;
1473    const char *kvm_type;
1474
1475    s = KVM_STATE(ms->accelerator);
1476
1477    /*
1478     * On systems where the kernel can support different base page
1479     * sizes, host page size may be different from TARGET_PAGE_SIZE,
1480     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
1481     * page size for the system though.
1482     */
1483    assert(TARGET_PAGE_SIZE <= getpagesize());
1484
1485    s->sigmask_len = 8;
1486
1487#ifdef KVM_CAP_SET_GUEST_DEBUG
1488    QTAILQ_INIT(&s->kvm_sw_breakpoints);
1489#endif
1490    QLIST_INIT(&s->kvm_parked_vcpus);
1491    s->vmfd = -1;
1492    s->fd = qemu_open("/dev/kvm", O_RDWR);
1493    if (s->fd == -1) {
1494        fprintf(stderr, "Could not access KVM kernel module: %m\n");
1495        ret = -errno;
1496        goto err;
1497    }
1498
1499    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1500    if (ret < KVM_API_VERSION) {
1501        if (ret >= 0) {
1502            ret = -EINVAL;
1503        }
1504        fprintf(stderr, "kvm version too old\n");
1505        goto err;
1506    }
1507
1508    if (ret > KVM_API_VERSION) {
1509        ret = -EINVAL;
1510        fprintf(stderr, "kvm version not supported\n");
1511        goto err;
1512    }
1513
1514    kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
1515    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1516
1517    /* If unspecified, use the default value */
1518    if (!s->nr_slots) {
1519        s->nr_slots = 32;
1520    }
1521
1522    kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1523    if (mc->kvm_type) {
1524        type = mc->kvm_type(kvm_type);
1525    } else if (kvm_type) {
1526        ret = -EINVAL;
1527        fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1528        goto err;
1529    }
1530
1531    do {
1532        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1533    } while (ret == -EINTR);
1534
1535    if (ret < 0) {
1536        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1537                strerror(-ret));
1538
1539#ifdef TARGET_S390X
1540        if (ret == -EINVAL) {
1541            fprintf(stderr,
1542                    "Host kernel setup problem detected. Please verify:\n");
1543            fprintf(stderr, "- for kernels supporting the switch_amode or"
1544                    " user_mode parameters, whether\n");
1545            fprintf(stderr,
1546                    "  user space is running in primary address space\n");
1547            fprintf(stderr,
1548                    "- for kernels supporting the vm.allocate_pgste sysctl, "
1549                    "whether it is enabled\n");
1550        }
1551#endif
1552        goto err;
1553    }
1554
1555    s->vmfd = ret;
1556
1557    /* check the vcpu limits */
1558    soft_vcpus_limit = kvm_recommended_vcpus(s);
1559    hard_vcpus_limit = kvm_max_vcpus(s);
1560
1561    while (nc->name) {
1562        if (nc->num > soft_vcpus_limit) {
1563            warn_report("Number of %s cpus requested (%d) exceeds "
1564                        "the recommended cpus supported by KVM (%d)",
1565                        nc->name, nc->num, soft_vcpus_limit);
1566
1567            if (nc->num > hard_vcpus_limit) {
1568                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1569                        "the maximum cpus supported by KVM (%d)\n",
1570                        nc->name, nc->num, hard_vcpus_limit);
1571                exit(1);
1572            }
1573        }
1574        nc++;
1575    }
1576
1577    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1578    if (!missing_cap) {
1579        missing_cap =
1580            kvm_check_extension_list(s, kvm_arch_required_capabilities);
1581    }
1582    if (missing_cap) {
1583        ret = -EINVAL;
1584        fprintf(stderr, "kvm does not support %s\n%s",
1585                missing_cap->name, upgrade_note);
1586        goto err;
1587    }
1588
1589    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1590
1591#ifdef KVM_CAP_VCPU_EVENTS
1592    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1593#endif
1594
1595    s->robust_singlestep =
1596        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1597
1598#ifdef KVM_CAP_DEBUGREGS
1599    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1600#endif
1601
1602#ifdef KVM_CAP_IRQ_ROUTING
1603    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1604#endif
1605
1606    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1607
1608    s->irq_set_ioctl = KVM_IRQ_LINE;
1609    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1610        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1611    }
1612
1613#ifdef KVM_CAP_READONLY_MEM
1614    kvm_readonly_mem_allowed =
1615        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1616#endif
1617
1618    kvm_eventfds_allowed =
1619        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1620
1621    kvm_irqfds_allowed =
1622        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
1623
1624    kvm_resamplefds_allowed =
1625        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
1626
1627    kvm_vm_attributes_allowed =
1628        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
1629
1630    kvm_ioeventfd_any_length_allowed =
1631        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
1632
1633    kvm_state = s;
1634
1635    ret = kvm_arch_init(ms, s);
1636    if (ret < 0) {
1637        goto err;
1638    }
1639
1640    if (machine_kernel_irqchip_allowed(ms)) {
1641        kvm_irqchip_create(ms, s);
1642    }
1643
1644    if (kvm_eventfds_allowed) {
1645        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
1646        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
1647    }
1648    s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
1649    s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region;
1650
1651    kvm_memory_listener_register(s, &s->memory_listener,
1652                                 &address_space_memory, 0);
1653    memory_listener_register(&kvm_io_listener,
1654                             &address_space_io);
1655
1656    s->many_ioeventfds = kvm_check_many_ioeventfds();
1657
1658    s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1659
1660    return 0;
1661
1662err:
1663    assert(ret < 0);
1664    if (s->vmfd >= 0) {
1665        close(s->vmfd);
1666    }
1667    if (s->fd != -1) {
1668        close(s->fd);
1669    }
1670    g_free(s->memory_listener.slots);
1671
1672    return ret;
1673}
1674
1675void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1676{
1677    s->sigmask_len = sigmask_len;
1678}
1679
1680static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
1681                          int size, uint32_t count)
1682{
1683    int i;
1684    uint8_t *ptr = data;
1685
1686    for (i = 0; i < count; i++) {
1687        address_space_rw(&address_space_io, port, attrs,
1688                         ptr, size,
1689                         direction == KVM_EXIT_IO_OUT);
1690        ptr += size;
1691    }
1692}
1693
1694static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1695{
1696    fprintf(stderr, "KVM internal error. Suberror: %d\n",
1697            run->internal.suberror);
1698
1699    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1700        int i;
1701
1702        for (i = 0; i < run->internal.ndata; ++i) {
1703            fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1704                    i, (uint64_t)run->internal.data[i]);
1705        }
1706    }
1707    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1708        fprintf(stderr, "emulation failure\n");
1709        if (!kvm_arch_stop_on_emulation_error(cpu)) {
1710            cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1711            return EXCP_INTERRUPT;
1712        }
1713    }
1714    /* FIXME: Should trigger a qmp message to let management know
1715     * something went wrong.
1716     */
1717    return -1;
1718}
1719
1720void kvm_flush_coalesced_mmio_buffer(void)
1721{
1722    KVMState *s = kvm_state;
1723
1724    if (s->coalesced_flush_in_progress) {
1725        return;
1726    }
1727
1728    s->coalesced_flush_in_progress = true;
1729
1730    if (s->coalesced_mmio_ring) {
1731        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1732        while (ring->first != ring->last) {
1733            struct kvm_coalesced_mmio *ent;
1734
1735            ent = &ring->coalesced_mmio[ring->first];
1736
1737            cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1738            smp_wmb();
1739            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1740        }
1741    }
1742
1743    s->coalesced_flush_in_progress = false;
1744}
1745
1746static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1747{
1748    if (!cpu->vcpu_dirty) {
1749        kvm_arch_get_registers(cpu);
1750        cpu->vcpu_dirty = true;
1751    }
1752}
1753
1754void kvm_cpu_synchronize_state(CPUState *cpu)
1755{
1756    if (!cpu->vcpu_dirty) {
1757        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
1758    }
1759}
1760
1761static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
1762{
1763    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1764    cpu->vcpu_dirty = false;
1765}
1766
1767void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1768{
1769    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1770}
1771
1772static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
1773{
1774    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1775    cpu->vcpu_dirty = false;
1776}
1777
1778void kvm_cpu_synchronize_post_init(CPUState *cpu)
1779{
1780    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1781}
1782
1783static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
1784{
1785    cpu->vcpu_dirty = true;
1786}
1787
1788void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
1789{
1790    run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
1791}
1792
1793#ifdef KVM_HAVE_MCE_INJECTION
1794static __thread void *pending_sigbus_addr;
1795static __thread int pending_sigbus_code;
1796static __thread bool have_sigbus_pending;
1797#endif
1798
1799static void kvm_cpu_kick(CPUState *cpu)
1800{
1801    atomic_set(&cpu->kvm_run->immediate_exit, 1);
1802}
1803
1804static void kvm_cpu_kick_self(void)
1805{
1806    if (kvm_immediate_exit) {
1807        kvm_cpu_kick(current_cpu);
1808    } else {
1809        qemu_cpu_kick_self();
1810    }
1811}
1812
1813static void kvm_eat_signals(CPUState *cpu)
1814{
1815    struct timespec ts = { 0, 0 };
1816    siginfo_t siginfo;
1817    sigset_t waitset;
1818    sigset_t chkset;
1819    int r;
1820
1821    if (kvm_immediate_exit) {
1822        atomic_set(&cpu->kvm_run->immediate_exit, 0);
1823        /* Write kvm_run->immediate_exit before the cpu->exit_request
1824         * write in kvm_cpu_exec.
1825         */
1826        smp_wmb();
1827        return;
1828    }
1829
1830    sigemptyset(&waitset);
1831    sigaddset(&waitset, SIG_IPI);
1832
1833    do {
1834        r = sigtimedwait(&waitset, &siginfo, &ts);
1835        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
1836            perror("sigtimedwait");
1837            exit(1);
1838        }
1839
1840        r = sigpending(&chkset);
1841        if (r == -1) {
1842            perror("sigpending");
1843            exit(1);
1844        }
1845    } while (sigismember(&chkset, SIG_IPI));
1846}
1847
1848int kvm_cpu_exec(CPUState *cpu)
1849{
1850    struct kvm_run *run = cpu->kvm_run;
1851    int ret, run_ret;
1852
1853    DPRINTF("kvm_cpu_exec()\n");
1854
1855    if (kvm_arch_process_async_events(cpu)) {
1856        atomic_set(&cpu->exit_request, 0);
1857        return EXCP_HLT;
1858    }
1859
1860    qemu_mutex_unlock_iothread();
1861    cpu_exec_start(cpu);
1862
1863    do {
1864        MemTxAttrs attrs;
1865
1866        if (cpu->vcpu_dirty) {
1867            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1868            cpu->vcpu_dirty = false;
1869        }
1870
1871        kvm_arch_pre_run(cpu, run);
1872        if (atomic_read(&cpu->exit_request)) {
1873            DPRINTF("interrupt exit requested\n");
1874            /*
1875             * KVM requires us to reenter the kernel after IO exits to complete
1876             * instruction emulation. This self-signal will ensure that we
1877             * leave ASAP again.
1878             */
1879            kvm_cpu_kick_self();
1880        }
1881
1882        /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
1883         * Matching barrier in kvm_eat_signals.
1884         */
1885        smp_rmb();
1886
1887        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1888
1889        attrs = kvm_arch_post_run(cpu, run);
1890
1891#ifdef KVM_HAVE_MCE_INJECTION
1892        if (unlikely(have_sigbus_pending)) {
1893            qemu_mutex_lock_iothread();
1894            kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
1895                                    pending_sigbus_addr);
1896            have_sigbus_pending = false;
1897            qemu_mutex_unlock_iothread();
1898        }
1899#endif
1900
1901        if (run_ret < 0) {
1902            if (run_ret == -EINTR || run_ret == -EAGAIN) {
1903                DPRINTF("io window exit\n");
1904                kvm_eat_signals(cpu);
1905                ret = EXCP_INTERRUPT;
1906                break;
1907            }
1908            fprintf(stderr, "error: kvm run failed %s\n",
1909                    strerror(-run_ret));
1910#ifdef TARGET_PPC
1911            if (run_ret == -EBUSY) {
1912                fprintf(stderr,
1913                        "This is probably because your SMT is enabled.\n"
1914                        "VCPU can only run on primary threads with all "
1915                        "secondary threads offline.\n");
1916            }
1917#endif
1918            ret = -1;
1919            break;
1920        }
1921
1922        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
1923        switch (run->exit_reason) {
1924        case KVM_EXIT_IO:
1925            DPRINTF("handle_io\n");
1926            /* Called outside BQL */
1927            kvm_handle_io(run->io.port, attrs,
1928                          (uint8_t *)run + run->io.data_offset,
1929                          run->io.direction,
1930                          run->io.size,
1931                          run->io.count);
1932            ret = 0;
1933            break;
1934        case KVM_EXIT_MMIO:
1935            DPRINTF("handle_mmio\n");
1936            /* Called outside BQL */
1937            address_space_rw(&address_space_memory,
1938                             run->mmio.phys_addr, attrs,
1939                             run->mmio.data,
1940                             run->mmio.len,
1941                             run->mmio.is_write);
1942            ret = 0;
1943            break;
1944        case KVM_EXIT_IRQ_WINDOW_OPEN:
1945            DPRINTF("irq_window_open\n");
1946            ret = EXCP_INTERRUPT;
1947            break;
1948        case KVM_EXIT_SHUTDOWN:
1949            DPRINTF("shutdown\n");
1950            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1951            ret = EXCP_INTERRUPT;
1952            break;
1953        case KVM_EXIT_UNKNOWN:
1954            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1955                    (uint64_t)run->hw.hardware_exit_reason);
1956            ret = -1;
1957            break;
1958        case KVM_EXIT_INTERNAL_ERROR:
1959            ret = kvm_handle_internal_error(cpu, run);
1960            break;
1961        case KVM_EXIT_SYSTEM_EVENT:
1962            switch (run->system_event.type) {
1963            case KVM_SYSTEM_EVENT_SHUTDOWN:
1964                qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
1965                ret = EXCP_INTERRUPT;
1966                break;
1967            case KVM_SYSTEM_EVENT_RESET:
1968                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1969                ret = EXCP_INTERRUPT;
1970                break;
1971            case KVM_SYSTEM_EVENT_CRASH:
1972                kvm_cpu_synchronize_state(cpu);
1973                qemu_mutex_lock_iothread();
1974                qemu_system_guest_panicked(cpu_get_crash_info(cpu));
1975                qemu_mutex_unlock_iothread();
1976                ret = 0;
1977                break;
1978            default:
1979                DPRINTF("kvm_arch_handle_exit\n");
1980                ret = kvm_arch_handle_exit(cpu, run);
1981                break;
1982            }
1983            break;
1984        default:
1985            DPRINTF("kvm_arch_handle_exit\n");
1986            ret = kvm_arch_handle_exit(cpu, run);
1987            break;
1988        }
1989    } while (ret == 0);
1990
1991    cpu_exec_end(cpu);
1992    qemu_mutex_lock_iothread();
1993
1994    if (ret < 0) {
1995        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1996        vm_stop(RUN_STATE_INTERNAL_ERROR);
1997    }
1998
1999    atomic_set(&cpu->exit_request, 0);
2000    return ret;
2001}
2002
2003int kvm_ioctl(KVMState *s, int type, ...)
2004{
2005    int ret;
2006    void *arg;
2007    va_list ap;
2008
2009    va_start(ap, type);
2010    arg = va_arg(ap, void *);
2011    va_end(ap);
2012
2013    trace_kvm_ioctl(type, arg);
2014    ret = ioctl(s->fd, type, arg);
2015    if (ret == -1) {
2016        ret = -errno;
2017    }
2018    return ret;
2019}
2020
2021int kvm_vm_ioctl(KVMState *s, int type, ...)
2022{
2023    int ret;
2024    void *arg;
2025    va_list ap;
2026
2027    va_start(ap, type);
2028    arg = va_arg(ap, void *);
2029    va_end(ap);
2030
2031    trace_kvm_vm_ioctl(type, arg);
2032    ret = ioctl(s->vmfd, type, arg);
2033    if (ret == -1) {
2034        ret = -errno;
2035    }
2036    return ret;
2037}
2038
2039int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2040{
2041    int ret;
2042    void *arg;
2043    va_list ap;
2044
2045    va_start(ap, type);
2046    arg = va_arg(ap, void *);
2047    va_end(ap);
2048
2049    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2050    ret = ioctl(cpu->kvm_fd, type, arg);
2051    if (ret == -1) {
2052        ret = -errno;
2053    }
2054    return ret;
2055}
2056
2057int kvm_device_ioctl(int fd, int type, ...)
2058{
2059    int ret;
2060    void *arg;
2061    va_list ap;
2062
2063    va_start(ap, type);
2064    arg = va_arg(ap, void *);
2065    va_end(ap);
2066
2067    trace_kvm_device_ioctl(fd, type, arg);
2068    ret = ioctl(fd, type, arg);
2069    if (ret == -1) {
2070        ret = -errno;
2071    }
2072    return ret;
2073}
2074
2075int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2076{
2077    int ret;
2078    struct kvm_device_attr attribute = {
2079        .group = group,
2080        .attr = attr,
2081    };
2082
2083    if (!kvm_vm_attributes_allowed) {
2084        return 0;
2085    }
2086
2087    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2088    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2089    return ret ? 0 : 1;
2090}
2091
2092int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2093{
2094    struct kvm_device_attr attribute = {
2095        .group = group,
2096        .attr = attr,
2097        .flags = 0,
2098    };
2099
2100    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2101}
2102
2103int kvm_device_access(int fd, int group, uint64_t attr,
2104                      void *val, bool write, Error **errp)
2105{
2106    struct kvm_device_attr kvmattr;
2107    int err;
2108
2109    kvmattr.flags = 0;
2110    kvmattr.group = group;
2111    kvmattr.attr = attr;
2112    kvmattr.addr = (uintptr_t)val;
2113
2114    err = kvm_device_ioctl(fd,
2115                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2116                           &kvmattr);
2117    if (err < 0) {
2118        error_setg_errno(errp, -err,
2119                         "KVM_%s_DEVICE_ATTR failed: Group %d "
2120                         "attr 0x%016" PRIx64,
2121                         write ? "SET" : "GET", group, attr);
2122    }
2123    return err;
2124}
2125
2126bool kvm_has_sync_mmu(void)
2127{
2128    return kvm_state->sync_mmu;
2129}
2130
2131int kvm_has_vcpu_events(void)
2132{
2133    return kvm_state->vcpu_events;
2134}
2135
2136int kvm_has_robust_singlestep(void)
2137{
2138    return kvm_state->robust_singlestep;
2139}
2140
2141int kvm_has_debugregs(void)
2142{
2143    return kvm_state->debugregs;
2144}
2145
2146int kvm_has_many_ioeventfds(void)
2147{
2148    if (!kvm_enabled()) {
2149        return 0;
2150    }
2151    return kvm_state->many_ioeventfds;
2152}
2153
2154int kvm_has_gsi_routing(void)
2155{
2156#ifdef KVM_CAP_IRQ_ROUTING
2157    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2158#else
2159    return false;
2160#endif
2161}
2162
2163int kvm_has_intx_set_mask(void)
2164{
2165    return kvm_state->intx_set_mask;
2166}
2167
2168bool kvm_arm_supports_user_irq(void)
2169{
2170    return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2171}
2172
2173#ifdef KVM_CAP_SET_GUEST_DEBUG
2174struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2175                                                 target_ulong pc)
2176{
2177    struct kvm_sw_breakpoint *bp;
2178
2179    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2180        if (bp->pc == pc) {
2181            return bp;
2182        }
2183    }
2184    return NULL;
2185}
2186
2187int kvm_sw_breakpoints_active(CPUState *cpu)
2188{
2189    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2190}
2191
2192struct kvm_set_guest_debug_data {
2193    struct kvm_guest_debug dbg;
2194    int err;
2195};
2196
2197static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2198{
2199    struct kvm_set_guest_debug_data *dbg_data =
2200        (struct kvm_set_guest_debug_data *) data.host_ptr;
2201
2202    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2203                                   &dbg_data->dbg);
2204}
2205
2206int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2207{
2208    struct kvm_set_guest_debug_data data;
2209
2210    data.dbg.control = reinject_trap;
2211
2212    if (cpu->singlestep_enabled) {
2213        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2214    }
2215    kvm_arch_update_guest_debug(cpu, &data.dbg);
2216
2217    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2218               RUN_ON_CPU_HOST_PTR(&data));
2219    return data.err;
2220}
2221
2222int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2223                          target_ulong len, int type)
2224{
2225    struct kvm_sw_breakpoint *bp;
2226    int err;
2227
2228    if (type == GDB_BREAKPOINT_SW) {
2229        bp = kvm_find_sw_breakpoint(cpu, addr);
2230        if (bp) {
2231            bp->use_count++;
2232            return 0;
2233        }
2234
2235        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2236        bp->pc = addr;
2237        bp->use_count = 1;
2238        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2239        if (err) {
2240            g_free(bp);
2241            return err;
2242        }
2243
2244        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2245    } else {
2246        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2247        if (err) {
2248            return err;
2249        }
2250    }
2251
2252    CPU_FOREACH(cpu) {
2253        err = kvm_update_guest_debug(cpu, 0);
2254        if (err) {
2255            return err;
2256        }
2257    }
2258    return 0;
2259}
2260
2261int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2262                          target_ulong len, int type)
2263{
2264    struct kvm_sw_breakpoint *bp;
2265    int err;
2266
2267    if (type == GDB_BREAKPOINT_SW) {
2268        bp = kvm_find_sw_breakpoint(cpu, addr);
2269        if (!bp) {
2270            return -ENOENT;
2271        }
2272
2273        if (bp->use_count > 1) {
2274            bp->use_count--;
2275            return 0;
2276        }
2277
2278        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2279        if (err) {
2280            return err;
2281        }
2282
2283        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2284        g_free(bp);
2285    } else {
2286        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2287        if (err) {
2288            return err;
2289        }
2290    }
2291
2292    CPU_FOREACH(cpu) {
2293        err = kvm_update_guest_debug(cpu, 0);
2294        if (err) {
2295            return err;
2296        }
2297    }
2298    return 0;
2299}
2300
2301void kvm_remove_all_breakpoints(CPUState *cpu)
2302{
2303    struct kvm_sw_breakpoint *bp, *next;
2304    KVMState *s = cpu->kvm_state;
2305    CPUState *tmpcpu;
2306
2307    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2308        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2309            /* Try harder to find a CPU that currently sees the breakpoint. */
2310            CPU_FOREACH(tmpcpu) {
2311                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2312                    break;
2313                }
2314            }
2315        }
2316        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2317        g_free(bp);
2318    }
2319    kvm_arch_remove_all_hw_breakpoints();
2320
2321    CPU_FOREACH(cpu) {
2322        kvm_update_guest_debug(cpu, 0);
2323    }
2324}
2325
2326#else /* !KVM_CAP_SET_GUEST_DEBUG */
2327
2328int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2329{
2330    return -EINVAL;
2331}
2332
2333int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2334                          target_ulong len, int type)
2335{
2336    return -EINVAL;
2337}
2338
2339int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2340                          target_ulong len, int type)
2341{
2342    return -EINVAL;
2343}
2344
2345void kvm_remove_all_breakpoints(CPUState *cpu)
2346{
2347}
2348#endif /* !KVM_CAP_SET_GUEST_DEBUG */
2349
2350static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2351{
2352    KVMState *s = kvm_state;
2353    struct kvm_signal_mask *sigmask;
2354    int r;
2355
2356    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2357
2358    sigmask->len = s->sigmask_len;
2359    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2360    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2361    g_free(sigmask);
2362
2363    return r;
2364}
2365
2366static void kvm_ipi_signal(int sig)
2367{
2368    if (current_cpu) {
2369        assert(kvm_immediate_exit);
2370        kvm_cpu_kick(current_cpu);
2371    }
2372}
2373
2374void kvm_init_cpu_signals(CPUState *cpu)
2375{
2376    int r;
2377    sigset_t set;
2378    struct sigaction sigact;
2379
2380    memset(&sigact, 0, sizeof(sigact));
2381    sigact.sa_handler = kvm_ipi_signal;
2382    sigaction(SIG_IPI, &sigact, NULL);
2383
2384    pthread_sigmask(SIG_BLOCK, NULL, &set);
2385#if defined KVM_HAVE_MCE_INJECTION
2386    sigdelset(&set, SIGBUS);
2387    pthread_sigmask(SIG_SETMASK, &set, NULL);
2388#endif
2389    sigdelset(&set, SIG_IPI);
2390    if (kvm_immediate_exit) {
2391        r = pthread_sigmask(SIG_SETMASK, &set, NULL);
2392    } else {
2393        r = kvm_set_signal_mask(cpu, &set);
2394    }
2395    if (r) {
2396        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
2397        exit(1);
2398    }
2399}
2400
2401/* Called asynchronously in VCPU thread.  */
2402int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2403{
2404#ifdef KVM_HAVE_MCE_INJECTION
2405    if (have_sigbus_pending) {
2406        return 1;
2407    }
2408    have_sigbus_pending = true;
2409    pending_sigbus_addr = addr;
2410    pending_sigbus_code = code;
2411    atomic_set(&cpu->exit_request, 1);
2412    return 0;
2413#else
2414    return 1;
2415#endif
2416}
2417
2418/* Called synchronously (via signalfd) in main thread.  */
2419int kvm_on_sigbus(int code, void *addr)
2420{
2421#ifdef KVM_HAVE_MCE_INJECTION
2422    /* Action required MCE kills the process if SIGBUS is blocked.  Because
2423     * that's what happens in the I/O thread, where we handle MCE via signalfd,
2424     * we can only get action optional here.
2425     */
2426    assert(code != BUS_MCEERR_AR);
2427    kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
2428    return 0;
2429#else
2430    return 1;
2431#endif
2432}
2433
2434int kvm_create_device(KVMState *s, uint64_t type, bool test)
2435{
2436    int ret;
2437    struct kvm_create_device create_dev;
2438
2439    create_dev.type = type;
2440    create_dev.fd = -1;
2441    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2442
2443    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2444        return -ENOTSUP;
2445    }
2446
2447    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2448    if (ret) {
2449        return ret;
2450    }
2451
2452    return test ? 0 : create_dev.fd;
2453}
2454
2455bool kvm_device_supported(int vmfd, uint64_t type)
2456{
2457    struct kvm_create_device create_dev = {
2458        .type = type,
2459        .fd = -1,
2460        .flags = KVM_CREATE_DEVICE_TEST,
2461    };
2462
2463    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2464        return false;
2465    }
2466
2467    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2468}
2469
2470int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2471{
2472    struct kvm_one_reg reg;
2473    int r;
2474
2475    reg.id = id;
2476    reg.addr = (uintptr_t) source;
2477    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2478    if (r) {
2479        trace_kvm_failed_reg_set(id, strerror(-r));
2480    }
2481    return r;
2482}
2483
2484int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2485{
2486    struct kvm_one_reg reg;
2487    int r;
2488
2489    reg.id = id;
2490    reg.addr = (uintptr_t) target;
2491    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2492    if (r) {
2493        trace_kvm_failed_reg_get(id, strerror(-r));
2494    }
2495    return r;
2496}
2497
2498static void kvm_accel_class_init(ObjectClass *oc, void *data)
2499{
2500    AccelClass *ac = ACCEL_CLASS(oc);
2501    ac->name = "KVM";
2502    ac->init_machine = kvm_init;
2503    ac->allowed = &kvm_allowed;
2504}
2505
2506static const TypeInfo kvm_accel_type = {
2507    .name = TYPE_KVM_ACCEL,
2508    .parent = TYPE_ACCEL,
2509    .class_init = kvm_accel_class_init,
2510    .instance_size = sizeof(KVMState),
2511};
2512
2513static void kvm_type_init(void)
2514{
2515    type_register_static(&kvm_accel_type);
2516}
2517
2518type_init(kvm_type_init);
2519