qemu/accel/kvm/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18#include <poll.h>
  19
  20#include <linux/kvm.h>
  21
  22#include "qemu/atomic.h"
  23#include "qemu/option.h"
  24#include "qemu/config-file.h"
  25#include "qemu/error-report.h"
  26#include "qapi/error.h"
  27#include "hw/pci/msi.h"
  28#include "hw/pci/msix.h"
  29#include "hw/s390x/adapter.h"
  30#include "exec/gdbstub.h"
  31#include "sysemu/kvm_int.h"
  32#include "sysemu/runstate.h"
  33#include "sysemu/cpus.h"
  34#include "qemu/bswap.h"
  35#include "exec/memory.h"
  36#include "exec/ram_addr.h"
  37#include "qemu/event_notifier.h"
  38#include "qemu/main-loop.h"
  39#include "trace.h"
  40#include "hw/irq.h"
  41#include "qapi/visitor.h"
  42#include "qapi/qapi-types-common.h"
  43#include "qapi/qapi-visit-common.h"
  44#include "sysemu/reset.h"
  45#include "qemu/guest-random.h"
  46#include "sysemu/hw_accel.h"
  47#include "kvm-cpus.h"
  48
  49#include "hw/boards.h"
  50
  51/* This check must be after config-host.h is included */
  52#ifdef CONFIG_EVENTFD
  53#include <sys/eventfd.h>
  54#endif
  55
  56/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  57 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  58 */
  59#ifdef PAGE_SIZE
  60#undef PAGE_SIZE
  61#endif
  62#define PAGE_SIZE qemu_real_host_page_size
  63
  64//#define DEBUG_KVM
  65
  66#ifdef DEBUG_KVM
  67#define DPRINTF(fmt, ...) \
  68    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  69#else
  70#define DPRINTF(fmt, ...) \
  71    do { } while (0)
  72#endif
  73
  74#define KVM_MSI_HASHTAB_SIZE    256
  75
  76struct KVMParkedVcpu {
  77    unsigned long vcpu_id;
  78    int kvm_fd;
  79    QLIST_ENTRY(KVMParkedVcpu) node;
  80};
  81
  82enum KVMDirtyRingReaperState {
  83    KVM_DIRTY_RING_REAPER_NONE = 0,
  84    /* The reaper is sleeping */
  85    KVM_DIRTY_RING_REAPER_WAIT,
  86    /* The reaper is reaping for dirty pages */
  87    KVM_DIRTY_RING_REAPER_REAPING,
  88};
  89
  90/*
  91 * KVM reaper instance, responsible for collecting the KVM dirty bits
  92 * via the dirty ring.
  93 */
  94struct KVMDirtyRingReaper {
  95    /* The reaper thread */
  96    QemuThread reaper_thr;
  97    volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
  98    volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
  99};
 100
 101struct KVMState
 102{
 103    AccelState parent_obj;
 104
 105    int nr_slots;
 106    int fd;
 107    int vmfd;
 108    int coalesced_mmio;
 109    int coalesced_pio;
 110    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 111    bool coalesced_flush_in_progress;
 112    int vcpu_events;
 113    int robust_singlestep;
 114    int debugregs;
 115#ifdef KVM_CAP_SET_GUEST_DEBUG
 116    QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
 117#endif
 118    int max_nested_state_len;
 119    int many_ioeventfds;
 120    int intx_set_mask;
 121    int kvm_shadow_mem;
 122    bool kernel_irqchip_allowed;
 123    bool kernel_irqchip_required;
 124    OnOffAuto kernel_irqchip_split;
 125    bool sync_mmu;
 126    uint64_t manual_dirty_log_protect;
 127    /* The man page (and posix) say ioctl numbers are signed int, but
 128     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
 129     * unsigned, and treating them as signed here can break things */
 130    unsigned irq_set_ioctl;
 131    unsigned int sigmask_len;
 132    GHashTable *gsimap;
 133#ifdef KVM_CAP_IRQ_ROUTING
 134    struct kvm_irq_routing *irq_routes;
 135    int nr_allocated_irq_routes;
 136    unsigned long *used_gsi_bitmap;
 137    unsigned int gsi_count;
 138    QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 139#endif
 140    KVMMemoryListener memory_listener;
 141    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 142
 143    /* For "info mtree -f" to tell if an MR is registered in KVM */
 144    int nr_as;
 145    struct KVMAs {
 146        KVMMemoryListener *ml;
 147        AddressSpace *as;
 148    } *as;
 149    uint64_t kvm_dirty_ring_bytes;  /* Size of the per-vcpu dirty ring */
 150    uint32_t kvm_dirty_ring_size;   /* Number of dirty GFNs per ring */
 151    struct KVMDirtyRingReaper reaper;
 152};
 153
 154KVMState *kvm_state;
 155bool kvm_kernel_irqchip;
 156bool kvm_split_irqchip;
 157bool kvm_async_interrupts_allowed;
 158bool kvm_halt_in_kernel_allowed;
 159bool kvm_eventfds_allowed;
 160bool kvm_irqfds_allowed;
 161bool kvm_resamplefds_allowed;
 162bool kvm_msi_via_irqfd_allowed;
 163bool kvm_gsi_routing_allowed;
 164bool kvm_gsi_direct_mapping;
 165bool kvm_allowed;
 166bool kvm_readonly_mem_allowed;
 167bool kvm_vm_attributes_allowed;
 168bool kvm_direct_msi_allowed;
 169bool kvm_ioeventfd_any_length_allowed;
 170bool kvm_msi_use_devid;
 171static bool kvm_immediate_exit;
 172static hwaddr kvm_max_slot_size = ~0;
 173
 174static const KVMCapabilityInfo kvm_required_capabilites[] = {
 175    KVM_CAP_INFO(USER_MEMORY),
 176    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 177    KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
 178    KVM_CAP_LAST_INFO
 179};
 180
 181static NotifierList kvm_irqchip_change_notifiers =
 182    NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
 183
 184struct KVMResampleFd {
 185    int gsi;
 186    EventNotifier *resample_event;
 187    QLIST_ENTRY(KVMResampleFd) node;
 188};
 189typedef struct KVMResampleFd KVMResampleFd;
 190
 191/*
 192 * Only used with split irqchip where we need to do the resample fd
 193 * kick for the kernel from userspace.
 194 */
 195static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
 196    QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
 197
 198static QemuMutex kml_slots_lock;
 199
 200#define kvm_slots_lock()    qemu_mutex_lock(&kml_slots_lock)
 201#define kvm_slots_unlock()  qemu_mutex_unlock(&kml_slots_lock)
 202
 203static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
 204
 205static inline void kvm_resample_fd_remove(int gsi)
 206{
 207    KVMResampleFd *rfd;
 208
 209    QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
 210        if (rfd->gsi == gsi) {
 211            QLIST_REMOVE(rfd, node);
 212            g_free(rfd);
 213            break;
 214        }
 215    }
 216}
 217
 218static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
 219{
 220    KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
 221
 222    rfd->gsi = gsi;
 223    rfd->resample_event = event;
 224
 225    QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
 226}
 227
 228void kvm_resample_fd_notify(int gsi)
 229{
 230    KVMResampleFd *rfd;
 231
 232    QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
 233        if (rfd->gsi == gsi) {
 234            event_notifier_set(rfd->resample_event);
 235            trace_kvm_resample_fd_notify(gsi);
 236            return;
 237        }
 238    }
 239}
 240
 241int kvm_get_max_memslots(void)
 242{
 243    KVMState *s = KVM_STATE(current_accel());
 244
 245    return s->nr_slots;
 246}
 247
 248/* Called with KVMMemoryListener.slots_lock held */
 249static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 250{
 251    KVMState *s = kvm_state;
 252    int i;
 253
 254    for (i = 0; i < s->nr_slots; i++) {
 255        if (kml->slots[i].memory_size == 0) {
 256            return &kml->slots[i];
 257        }
 258    }
 259
 260    return NULL;
 261}
 262
 263bool kvm_has_free_slot(MachineState *ms)
 264{
 265    KVMState *s = KVM_STATE(ms->accelerator);
 266    bool result;
 267    KVMMemoryListener *kml = &s->memory_listener;
 268
 269    kvm_slots_lock();
 270    result = !!kvm_get_free_slot(kml);
 271    kvm_slots_unlock();
 272
 273    return result;
 274}
 275
 276/* Called with KVMMemoryListener.slots_lock held */
 277static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 278{
 279    KVMSlot *slot = kvm_get_free_slot(kml);
 280
 281    if (slot) {
 282        return slot;
 283    }
 284
 285    fprintf(stderr, "%s: no free slot available\n", __func__);
 286    abort();
 287}
 288
 289static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 290                                         hwaddr start_addr,
 291                                         hwaddr size)
 292{
 293    KVMState *s = kvm_state;
 294    int i;
 295
 296    for (i = 0; i < s->nr_slots; i++) {
 297        KVMSlot *mem = &kml->slots[i];
 298
 299        if (start_addr == mem->start_addr && size == mem->memory_size) {
 300            return mem;
 301        }
 302    }
 303
 304    return NULL;
 305}
 306
 307/*
 308 * Calculate and align the start address and the size of the section.
 309 * Return the size. If the size is 0, the aligned section is empty.
 310 */
 311static hwaddr kvm_align_section(MemoryRegionSection *section,
 312                                hwaddr *start)
 313{
 314    hwaddr size = int128_get64(section->size);
 315    hwaddr delta, aligned;
 316
 317    /* kvm works in page size chunks, but the function may be called
 318       with sub-page size and unaligned start address. Pad the start
 319       address to next and truncate size to previous page boundary. */
 320    aligned = ROUND_UP(section->offset_within_address_space,
 321                       qemu_real_host_page_size);
 322    delta = aligned - section->offset_within_address_space;
 323    *start = aligned;
 324    if (delta > size) {
 325        return 0;
 326    }
 327
 328    return (size - delta) & qemu_real_host_page_mask;
 329}
 330
 331int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 332                                       hwaddr *phys_addr)
 333{
 334    KVMMemoryListener *kml = &s->memory_listener;
 335    int i, ret = 0;
 336
 337    kvm_slots_lock();
 338    for (i = 0; i < s->nr_slots; i++) {
 339        KVMSlot *mem = &kml->slots[i];
 340
 341        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 342            *phys_addr = mem->start_addr + (ram - mem->ram);
 343            ret = 1;
 344            break;
 345        }
 346    }
 347    kvm_slots_unlock();
 348
 349    return ret;
 350}
 351
 352static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
 353{
 354    KVMState *s = kvm_state;
 355    struct kvm_userspace_memory_region mem;
 356    int ret;
 357
 358    mem.slot = slot->slot | (kml->as_id << 16);
 359    mem.guest_phys_addr = slot->start_addr;
 360    mem.userspace_addr = (unsigned long)slot->ram;
 361    mem.flags = slot->flags;
 362
 363    if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
 364        /* Set the slot size to 0 before setting the slot to the desired
 365         * value. This is needed based on KVM commit 75d61fbc. */
 366        mem.memory_size = 0;
 367        ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 368        if (ret < 0) {
 369            goto err;
 370        }
 371    }
 372    mem.memory_size = slot->memory_size;
 373    ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 374    slot->old_flags = mem.flags;
 375err:
 376    trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
 377                              mem.memory_size, mem.userspace_addr, ret);
 378    if (ret < 0) {
 379        error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
 380                     " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
 381                     __func__, mem.slot, slot->start_addr,
 382                     (uint64_t)mem.memory_size, strerror(errno));
 383    }
 384    return ret;
 385}
 386
 387static int do_kvm_destroy_vcpu(CPUState *cpu)
 388{
 389    KVMState *s = kvm_state;
 390    long mmap_size;
 391    struct KVMParkedVcpu *vcpu = NULL;
 392    int ret = 0;
 393
 394    DPRINTF("kvm_destroy_vcpu\n");
 395
 396    ret = kvm_arch_destroy_vcpu(cpu);
 397    if (ret < 0) {
 398        goto err;
 399    }
 400
 401    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 402    if (mmap_size < 0) {
 403        ret = mmap_size;
 404        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 405        goto err;
 406    }
 407
 408    ret = munmap(cpu->kvm_run, mmap_size);
 409    if (ret < 0) {
 410        goto err;
 411    }
 412
 413    if (cpu->kvm_dirty_gfns) {
 414        ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
 415        if (ret < 0) {
 416            goto err;
 417        }
 418    }
 419
 420    vcpu = g_malloc0(sizeof(*vcpu));
 421    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 422    vcpu->kvm_fd = cpu->kvm_fd;
 423    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 424err:
 425    return ret;
 426}
 427
 428void kvm_destroy_vcpu(CPUState *cpu)
 429{
 430    if (do_kvm_destroy_vcpu(cpu) < 0) {
 431        error_report("kvm_destroy_vcpu failed");
 432        exit(EXIT_FAILURE);
 433    }
 434}
 435
 436static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 437{
 438    struct KVMParkedVcpu *cpu;
 439
 440    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 441        if (cpu->vcpu_id == vcpu_id) {
 442            int kvm_fd;
 443
 444            QLIST_REMOVE(cpu, node);
 445            kvm_fd = cpu->kvm_fd;
 446            g_free(cpu);
 447            return kvm_fd;
 448        }
 449    }
 450
 451    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 452}
 453
 454int kvm_init_vcpu(CPUState *cpu, Error **errp)
 455{
 456    KVMState *s = kvm_state;
 457    long mmap_size;
 458    int ret;
 459
 460    trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
 461
 462    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 463    if (ret < 0) {
 464        error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
 465                         kvm_arch_vcpu_id(cpu));
 466        goto err;
 467    }
 468
 469    cpu->kvm_fd = ret;
 470    cpu->kvm_state = s;
 471    cpu->vcpu_dirty = true;
 472    cpu->dirty_pages = 0;
 473
 474    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 475    if (mmap_size < 0) {
 476        ret = mmap_size;
 477        error_setg_errno(errp, -mmap_size,
 478                         "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
 479        goto err;
 480    }
 481
 482    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 483                        cpu->kvm_fd, 0);
 484    if (cpu->kvm_run == MAP_FAILED) {
 485        ret = -errno;
 486        error_setg_errno(errp, ret,
 487                         "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
 488                         kvm_arch_vcpu_id(cpu));
 489        goto err;
 490    }
 491
 492    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 493        s->coalesced_mmio_ring =
 494            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 495    }
 496
 497    if (s->kvm_dirty_ring_size) {
 498        /* Use MAP_SHARED to share pages with the kernel */
 499        cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
 500                                   PROT_READ | PROT_WRITE, MAP_SHARED,
 501                                   cpu->kvm_fd,
 502                                   PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
 503        if (cpu->kvm_dirty_gfns == MAP_FAILED) {
 504            ret = -errno;
 505            DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
 506            goto err;
 507        }
 508    }
 509
 510    ret = kvm_arch_init_vcpu(cpu);
 511    if (ret < 0) {
 512        error_setg_errno(errp, -ret,
 513                         "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
 514                         kvm_arch_vcpu_id(cpu));
 515    }
 516err:
 517    return ret;
 518}
 519
 520/*
 521 * dirty pages logging control
 522 */
 523
 524static int kvm_mem_flags(MemoryRegion *mr)
 525{
 526    bool readonly = mr->readonly || memory_region_is_romd(mr);
 527    int flags = 0;
 528
 529    if (memory_region_get_dirty_log_mask(mr) != 0) {
 530        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 531    }
 532    if (readonly && kvm_readonly_mem_allowed) {
 533        flags |= KVM_MEM_READONLY;
 534    }
 535    return flags;
 536}
 537
 538/* Called with KVMMemoryListener.slots_lock held */
 539static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 540                                 MemoryRegion *mr)
 541{
 542    mem->flags = kvm_mem_flags(mr);
 543
 544    /* If nothing changed effectively, no need to issue ioctl */
 545    if (mem->flags == mem->old_flags) {
 546        return 0;
 547    }
 548
 549    kvm_slot_init_dirty_bitmap(mem);
 550    return kvm_set_user_memory_region(kml, mem, false);
 551}
 552
 553static int kvm_section_update_flags(KVMMemoryListener *kml,
 554                                    MemoryRegionSection *section)
 555{
 556    hwaddr start_addr, size, slot_size;
 557    KVMSlot *mem;
 558    int ret = 0;
 559
 560    size = kvm_align_section(section, &start_addr);
 561    if (!size) {
 562        return 0;
 563    }
 564
 565    kvm_slots_lock();
 566
 567    while (size && !ret) {
 568        slot_size = MIN(kvm_max_slot_size, size);
 569        mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
 570        if (!mem) {
 571            /* We don't have a slot if we want to trap every access. */
 572            goto out;
 573        }
 574
 575        ret = kvm_slot_update_flags(kml, mem, section->mr);
 576        start_addr += slot_size;
 577        size -= slot_size;
 578    }
 579
 580out:
 581    kvm_slots_unlock();
 582    return ret;
 583}
 584
 585static void kvm_log_start(MemoryListener *listener,
 586                          MemoryRegionSection *section,
 587                          int old, int new)
 588{
 589    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 590    int r;
 591
 592    if (old != 0) {
 593        return;
 594    }
 595
 596    r = kvm_section_update_flags(kml, section);
 597    if (r < 0) {
 598        abort();
 599    }
 600}
 601
 602static void kvm_log_stop(MemoryListener *listener,
 603                          MemoryRegionSection *section,
 604                          int old, int new)
 605{
 606    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 607    int r;
 608
 609    if (new != 0) {
 610        return;
 611    }
 612
 613    r = kvm_section_update_flags(kml, section);
 614    if (r < 0) {
 615        abort();
 616    }
 617}
 618
 619/* get kvm's dirty pages bitmap and update qemu's */
 620static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
 621{
 622    ram_addr_t start = slot->ram_start_offset;
 623    ram_addr_t pages = slot->memory_size / qemu_real_host_page_size;
 624
 625    cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
 626}
 627
 628static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
 629{
 630    memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
 631}
 632
 633#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 634
 635/* Allocate the dirty bitmap for a slot  */
 636static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
 637{
 638    if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
 639        return;
 640    }
 641
 642    /*
 643     * XXX bad kernel interface alert
 644     * For dirty bitmap, kernel allocates array of size aligned to
 645     * bits-per-long.  But for case when the kernel is 64bits and
 646     * the userspace is 32bits, userspace can't align to the same
 647     * bits-per-long, since sizeof(long) is different between kernel
 648     * and user space.  This way, userspace will provide buffer which
 649     * may be 4 bytes less than the kernel will use, resulting in
 650     * userspace memory corruption (which is not detectable by valgrind
 651     * too, in most cases).
 652     * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 653     * a hope that sizeof(long) won't become >8 any time soon.
 654     *
 655     * Note: the granule of kvm dirty log is qemu_real_host_page_size.
 656     * And mem->memory_size is aligned to it (otherwise this mem can't
 657     * be registered to KVM).
 658     */
 659    hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size,
 660                                        /*HOST_LONG_BITS*/ 64) / 8;
 661    mem->dirty_bmap = g_malloc0(bitmap_size);
 662    mem->dirty_bmap_size = bitmap_size;
 663}
 664
 665/*
 666 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
 667 * succeeded, false otherwise
 668 */
 669static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
 670{
 671    struct kvm_dirty_log d = {};
 672    int ret;
 673
 674    d.dirty_bitmap = slot->dirty_bmap;
 675    d.slot = slot->slot | (slot->as_id << 16);
 676    ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
 677
 678    if (ret == -ENOENT) {
 679        /* kernel does not have dirty bitmap in this slot */
 680        ret = 0;
 681    }
 682    if (ret) {
 683        error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
 684                          __func__, ret);
 685    }
 686    return ret == 0;
 687}
 688
 689/* Should be with all slots_lock held for the address spaces. */
 690static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
 691                                     uint32_t slot_id, uint64_t offset)
 692{
 693    KVMMemoryListener *kml;
 694    KVMSlot *mem;
 695
 696    if (as_id >= s->nr_as) {
 697        return;
 698    }
 699
 700    kml = s->as[as_id].ml;
 701    mem = &kml->slots[slot_id];
 702
 703    if (!mem->memory_size || offset >=
 704        (mem->memory_size / qemu_real_host_page_size)) {
 705        return;
 706    }
 707
 708    set_bit(offset, mem->dirty_bmap);
 709}
 710
 711static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
 712{
 713    return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
 714}
 715
 716static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
 717{
 718    gfn->flags = KVM_DIRTY_GFN_F_RESET;
 719}
 720
 721/*
 722 * Should be with all slots_lock held for the address spaces.  It returns the
 723 * dirty page we've collected on this dirty ring.
 724 */
 725static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
 726{
 727    struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
 728    uint32_t ring_size = s->kvm_dirty_ring_size;
 729    uint32_t count = 0, fetch = cpu->kvm_fetch_index;
 730
 731    assert(dirty_gfns && ring_size);
 732    trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
 733
 734    while (true) {
 735        cur = &dirty_gfns[fetch % ring_size];
 736        if (!dirty_gfn_is_dirtied(cur)) {
 737            break;
 738        }
 739        kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
 740                                 cur->offset);
 741        dirty_gfn_set_collected(cur);
 742        trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
 743        fetch++;
 744        count++;
 745    }
 746    cpu->kvm_fetch_index = fetch;
 747    cpu->dirty_pages += count;
 748
 749    return count;
 750}
 751
 752/* Must be with slots_lock held */
 753static uint64_t kvm_dirty_ring_reap_locked(KVMState *s)
 754{
 755    int ret;
 756    CPUState *cpu;
 757    uint64_t total = 0;
 758    int64_t stamp;
 759
 760    stamp = get_clock();
 761
 762    CPU_FOREACH(cpu) {
 763        total += kvm_dirty_ring_reap_one(s, cpu);
 764    }
 765
 766    if (total) {
 767        ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
 768        assert(ret == total);
 769    }
 770
 771    stamp = get_clock() - stamp;
 772
 773    if (total) {
 774        trace_kvm_dirty_ring_reap(total, stamp / 1000);
 775    }
 776
 777    return total;
 778}
 779
 780/*
 781 * Currently for simplicity, we must hold BQL before calling this.  We can
 782 * consider to drop the BQL if we're clear with all the race conditions.
 783 */
 784static uint64_t kvm_dirty_ring_reap(KVMState *s)
 785{
 786    uint64_t total;
 787
 788    /*
 789     * We need to lock all kvm slots for all address spaces here,
 790     * because:
 791     *
 792     * (1) We need to mark dirty for dirty bitmaps in multiple slots
 793     *     and for tons of pages, so it's better to take the lock here
 794     *     once rather than once per page.  And more importantly,
 795     *
 796     * (2) We must _NOT_ publish dirty bits to the other threads
 797     *     (e.g., the migration thread) via the kvm memory slot dirty
 798     *     bitmaps before correctly re-protect those dirtied pages.
 799     *     Otherwise we can have potential risk of data corruption if
 800     *     the page data is read in the other thread before we do
 801     *     reset below.
 802     */
 803    kvm_slots_lock();
 804    total = kvm_dirty_ring_reap_locked(s);
 805    kvm_slots_unlock();
 806
 807    return total;
 808}
 809
 810static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
 811{
 812    /* No need to do anything */
 813}
 814
 815/*
 816 * Kick all vcpus out in a synchronized way.  When returned, we
 817 * guarantee that every vcpu has been kicked and at least returned to
 818 * userspace once.
 819 */
 820static void kvm_cpu_synchronize_kick_all(void)
 821{
 822    CPUState *cpu;
 823
 824    CPU_FOREACH(cpu) {
 825        run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
 826    }
 827}
 828
 829/*
 830 * Flush all the existing dirty pages to the KVM slot buffers.  When
 831 * this call returns, we guarantee that all the touched dirty pages
 832 * before calling this function have been put into the per-kvmslot
 833 * dirty bitmap.
 834 *
 835 * This function must be called with BQL held.
 836 */
 837static void kvm_dirty_ring_flush(void)
 838{
 839    trace_kvm_dirty_ring_flush(0);
 840    /*
 841     * The function needs to be serialized.  Since this function
 842     * should always be with BQL held, serialization is guaranteed.
 843     * However, let's be sure of it.
 844     */
 845    assert(qemu_mutex_iothread_locked());
 846    /*
 847     * First make sure to flush the hardware buffers by kicking all
 848     * vcpus out in a synchronous way.
 849     */
 850    kvm_cpu_synchronize_kick_all();
 851    kvm_dirty_ring_reap(kvm_state);
 852    trace_kvm_dirty_ring_flush(1);
 853}
 854
 855/**
 856 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
 857 *
 858 * This function will first try to fetch dirty bitmap from the kernel,
 859 * and then updates qemu's dirty bitmap.
 860 *
 861 * NOTE: caller must be with kml->slots_lock held.
 862 *
 863 * @kml: the KVM memory listener object
 864 * @section: the memory section to sync the dirty bitmap with
 865 */
 866static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 867                                           MemoryRegionSection *section)
 868{
 869    KVMState *s = kvm_state;
 870    KVMSlot *mem;
 871    hwaddr start_addr, size;
 872    hwaddr slot_size;
 873
 874    size = kvm_align_section(section, &start_addr);
 875    while (size) {
 876        slot_size = MIN(kvm_max_slot_size, size);
 877        mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
 878        if (!mem) {
 879            /* We don't have a slot if we want to trap every access. */
 880            return;
 881        }
 882        if (kvm_slot_get_dirty_log(s, mem)) {
 883            kvm_slot_sync_dirty_pages(mem);
 884        }
 885        start_addr += slot_size;
 886        size -= slot_size;
 887    }
 888}
 889
 890/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
 891#define KVM_CLEAR_LOG_SHIFT  6
 892#define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
 893#define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
 894
 895static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
 896                                  uint64_t size)
 897{
 898    KVMState *s = kvm_state;
 899    uint64_t end, bmap_start, start_delta, bmap_npages;
 900    struct kvm_clear_dirty_log d;
 901    unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
 902    int ret;
 903
 904    /*
 905     * We need to extend either the start or the size or both to
 906     * satisfy the KVM interface requirement.  Firstly, do the start
 907     * page alignment on 64 host pages
 908     */
 909    bmap_start = start & KVM_CLEAR_LOG_MASK;
 910    start_delta = start - bmap_start;
 911    bmap_start /= psize;
 912
 913    /*
 914     * The kernel interface has restriction on the size too, that either:
 915     *
 916     * (1) the size is 64 host pages aligned (just like the start), or
 917     * (2) the size fills up until the end of the KVM memslot.
 918     */
 919    bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
 920        << KVM_CLEAR_LOG_SHIFT;
 921    end = mem->memory_size / psize;
 922    if (bmap_npages > end - bmap_start) {
 923        bmap_npages = end - bmap_start;
 924    }
 925    start_delta /= psize;
 926
 927    /*
 928     * Prepare the bitmap to clear dirty bits.  Here we must guarantee
 929     * that we won't clear any unknown dirty bits otherwise we might
 930     * accidentally clear some set bits which are not yet synced from
 931     * the kernel into QEMU's bitmap, then we'll lose track of the
 932     * guest modifications upon those pages (which can directly lead
 933     * to guest data loss or panic after migration).
 934     *
 935     * Layout of the KVMSlot.dirty_bmap:
 936     *
 937     *                   |<-------- bmap_npages -----------..>|
 938     *                                                     [1]
 939     *                     start_delta         size
 940     *  |----------------|-------------|------------------|------------|
 941     *  ^                ^             ^                               ^
 942     *  |                |             |                               |
 943     * start          bmap_start     (start)                         end
 944     * of memslot                                             of memslot
 945     *
 946     * [1] bmap_npages can be aligned to either 64 pages or the end of slot
 947     */
 948
 949    assert(bmap_start % BITS_PER_LONG == 0);
 950    /* We should never do log_clear before log_sync */
 951    assert(mem->dirty_bmap);
 952    if (start_delta || bmap_npages - size / psize) {
 953        /* Slow path - we need to manipulate a temp bitmap */
 954        bmap_clear = bitmap_new(bmap_npages);
 955        bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
 956                                    bmap_start, start_delta + size / psize);
 957        /*
 958         * We need to fill the holes at start because that was not
 959         * specified by the caller and we extended the bitmap only for
 960         * 64 pages alignment
 961         */
 962        bitmap_clear(bmap_clear, 0, start_delta);
 963        d.dirty_bitmap = bmap_clear;
 964    } else {
 965        /*
 966         * Fast path - both start and size align well with BITS_PER_LONG
 967         * (or the end of memory slot)
 968         */
 969        d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
 970    }
 971
 972    d.first_page = bmap_start;
 973    /* It should never overflow.  If it happens, say something */
 974    assert(bmap_npages <= UINT32_MAX);
 975    d.num_pages = bmap_npages;
 976    d.slot = mem->slot | (as_id << 16);
 977
 978    ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
 979    if (ret < 0 && ret != -ENOENT) {
 980        error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
 981                     "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
 982                     __func__, d.slot, (uint64_t)d.first_page,
 983                     (uint32_t)d.num_pages, ret);
 984    } else {
 985        ret = 0;
 986        trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
 987    }
 988
 989    /*
 990     * After we have updated the remote dirty bitmap, we update the
 991     * cached bitmap as well for the memslot, then if another user
 992     * clears the same region we know we shouldn't clear it again on
 993     * the remote otherwise it's data loss as well.
 994     */
 995    bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
 996                 size / psize);
 997    /* This handles the NULL case well */
 998    g_free(bmap_clear);
 999    return ret;
1000}
1001
1002
1003/**
1004 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1005 *
1006 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1007 * protection in the host kernel because in that case this operation
1008 * will be done within log_sync().
1009 *
1010 * @kml:     the kvm memory listener
1011 * @section: the memory range to clear dirty bitmap
1012 */
1013static int kvm_physical_log_clear(KVMMemoryListener *kml,
1014                                  MemoryRegionSection *section)
1015{
1016    KVMState *s = kvm_state;
1017    uint64_t start, size, offset, count;
1018    KVMSlot *mem;
1019    int ret = 0, i;
1020
1021    if (!s->manual_dirty_log_protect) {
1022        /* No need to do explicit clear */
1023        return ret;
1024    }
1025
1026    start = section->offset_within_address_space;
1027    size = int128_get64(section->size);
1028
1029    if (!size) {
1030        /* Nothing more we can do... */
1031        return ret;
1032    }
1033
1034    kvm_slots_lock();
1035
1036    for (i = 0; i < s->nr_slots; i++) {
1037        mem = &kml->slots[i];
1038        /* Discard slots that are empty or do not overlap the section */
1039        if (!mem->memory_size ||
1040            mem->start_addr > start + size - 1 ||
1041            start > mem->start_addr + mem->memory_size - 1) {
1042            continue;
1043        }
1044
1045        if (start >= mem->start_addr) {
1046            /* The slot starts before section or is aligned to it.  */
1047            offset = start - mem->start_addr;
1048            count = MIN(mem->memory_size - offset, size);
1049        } else {
1050            /* The slot starts after section.  */
1051            offset = 0;
1052            count = MIN(mem->memory_size, size - (mem->start_addr - start));
1053        }
1054        ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1055        if (ret < 0) {
1056            break;
1057        }
1058    }
1059
1060    kvm_slots_unlock();
1061
1062    return ret;
1063}
1064
1065static void kvm_coalesce_mmio_region(MemoryListener *listener,
1066                                     MemoryRegionSection *secion,
1067                                     hwaddr start, hwaddr size)
1068{
1069    KVMState *s = kvm_state;
1070
1071    if (s->coalesced_mmio) {
1072        struct kvm_coalesced_mmio_zone zone;
1073
1074        zone.addr = start;
1075        zone.size = size;
1076        zone.pad = 0;
1077
1078        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1079    }
1080}
1081
1082static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1083                                       MemoryRegionSection *secion,
1084                                       hwaddr start, hwaddr size)
1085{
1086    KVMState *s = kvm_state;
1087
1088    if (s->coalesced_mmio) {
1089        struct kvm_coalesced_mmio_zone zone;
1090
1091        zone.addr = start;
1092        zone.size = size;
1093        zone.pad = 0;
1094
1095        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1096    }
1097}
1098
1099static void kvm_coalesce_pio_add(MemoryListener *listener,
1100                                MemoryRegionSection *section,
1101                                hwaddr start, hwaddr size)
1102{
1103    KVMState *s = kvm_state;
1104
1105    if (s->coalesced_pio) {
1106        struct kvm_coalesced_mmio_zone zone;
1107
1108        zone.addr = start;
1109        zone.size = size;
1110        zone.pio = 1;
1111
1112        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1113    }
1114}
1115
1116static void kvm_coalesce_pio_del(MemoryListener *listener,
1117                                MemoryRegionSection *section,
1118                                hwaddr start, hwaddr size)
1119{
1120    KVMState *s = kvm_state;
1121
1122    if (s->coalesced_pio) {
1123        struct kvm_coalesced_mmio_zone zone;
1124
1125        zone.addr = start;
1126        zone.size = size;
1127        zone.pio = 1;
1128
1129        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1130     }
1131}
1132
1133static MemoryListener kvm_coalesced_pio_listener = {
1134    .name = "kvm-coalesced-pio",
1135    .coalesced_io_add = kvm_coalesce_pio_add,
1136    .coalesced_io_del = kvm_coalesce_pio_del,
1137};
1138
1139int kvm_check_extension(KVMState *s, unsigned int extension)
1140{
1141    int ret;
1142
1143    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1144    if (ret < 0) {
1145        ret = 0;
1146    }
1147
1148    return ret;
1149}
1150
1151int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1152{
1153    int ret;
1154
1155    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1156    if (ret < 0) {
1157        /* VM wide version not implemented, use global one instead */
1158        ret = kvm_check_extension(s, extension);
1159    }
1160
1161    return ret;
1162}
1163
1164typedef struct HWPoisonPage {
1165    ram_addr_t ram_addr;
1166    QLIST_ENTRY(HWPoisonPage) list;
1167} HWPoisonPage;
1168
1169static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1170    QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1171
1172static void kvm_unpoison_all(void *param)
1173{
1174    HWPoisonPage *page, *next_page;
1175
1176    QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1177        QLIST_REMOVE(page, list);
1178        qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1179        g_free(page);
1180    }
1181}
1182
1183void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1184{
1185    HWPoisonPage *page;
1186
1187    QLIST_FOREACH(page, &hwpoison_page_list, list) {
1188        if (page->ram_addr == ram_addr) {
1189            return;
1190        }
1191    }
1192    page = g_new(HWPoisonPage, 1);
1193    page->ram_addr = ram_addr;
1194    QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1195}
1196
1197static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1198{
1199#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
1200    /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
1201     * endianness, but the memory core hands them in target endianness.
1202     * For example, PPC is always treated as big-endian even if running
1203     * on KVM and on PPC64LE.  Correct here.
1204     */
1205    switch (size) {
1206    case 2:
1207        val = bswap16(val);
1208        break;
1209    case 4:
1210        val = bswap32(val);
1211        break;
1212    }
1213#endif
1214    return val;
1215}
1216
1217static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1218                                  bool assign, uint32_t size, bool datamatch)
1219{
1220    int ret;
1221    struct kvm_ioeventfd iofd = {
1222        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1223        .addr = addr,
1224        .len = size,
1225        .flags = 0,
1226        .fd = fd,
1227    };
1228
1229    trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1230                                 datamatch);
1231    if (!kvm_enabled()) {
1232        return -ENOSYS;
1233    }
1234
1235    if (datamatch) {
1236        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1237    }
1238    if (!assign) {
1239        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1240    }
1241
1242    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1243
1244    if (ret < 0) {
1245        return -errno;
1246    }
1247
1248    return 0;
1249}
1250
1251static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1252                                 bool assign, uint32_t size, bool datamatch)
1253{
1254    struct kvm_ioeventfd kick = {
1255        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1256        .addr = addr,
1257        .flags = KVM_IOEVENTFD_FLAG_PIO,
1258        .len = size,
1259        .fd = fd,
1260    };
1261    int r;
1262    trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1263    if (!kvm_enabled()) {
1264        return -ENOSYS;
1265    }
1266    if (datamatch) {
1267        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1268    }
1269    if (!assign) {
1270        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1271    }
1272    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1273    if (r < 0) {
1274        return r;
1275    }
1276    return 0;
1277}
1278
1279
1280static int kvm_check_many_ioeventfds(void)
1281{
1282    /* Userspace can use ioeventfd for io notification.  This requires a host
1283     * that supports eventfd(2) and an I/O thread; since eventfd does not
1284     * support SIGIO it cannot interrupt the vcpu.
1285     *
1286     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
1287     * can avoid creating too many ioeventfds.
1288     */
1289#if defined(CONFIG_EVENTFD)
1290    int ioeventfds[7];
1291    int i, ret = 0;
1292    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
1293        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
1294        if (ioeventfds[i] < 0) {
1295            break;
1296        }
1297        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
1298        if (ret < 0) {
1299            close(ioeventfds[i]);
1300            break;
1301        }
1302    }
1303
1304    /* Decide whether many devices are supported or not */
1305    ret = i == ARRAY_SIZE(ioeventfds);
1306
1307    while (i-- > 0) {
1308        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
1309        close(ioeventfds[i]);
1310    }
1311    return ret;
1312#else
1313    return 0;
1314#endif
1315}
1316
1317static const KVMCapabilityInfo *
1318kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1319{
1320    while (list->name) {
1321        if (!kvm_check_extension(s, list->value)) {
1322            return list;
1323        }
1324        list++;
1325    }
1326    return NULL;
1327}
1328
1329void kvm_set_max_memslot_size(hwaddr max_slot_size)
1330{
1331    g_assert(
1332        ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
1333    );
1334    kvm_max_slot_size = max_slot_size;
1335}
1336
1337static void kvm_set_phys_mem(KVMMemoryListener *kml,
1338                             MemoryRegionSection *section, bool add)
1339{
1340    KVMSlot *mem;
1341    int err;
1342    MemoryRegion *mr = section->mr;
1343    bool writeable = !mr->readonly && !mr->rom_device;
1344    hwaddr start_addr, size, slot_size, mr_offset;
1345    ram_addr_t ram_start_offset;
1346    void *ram;
1347
1348    if (!memory_region_is_ram(mr)) {
1349        if (writeable || !kvm_readonly_mem_allowed) {
1350            return;
1351        } else if (!mr->romd_mode) {
1352            /* If the memory device is not in romd_mode, then we actually want
1353             * to remove the kvm memory slot so all accesses will trap. */
1354            add = false;
1355        }
1356    }
1357
1358    size = kvm_align_section(section, &start_addr);
1359    if (!size) {
1360        return;
1361    }
1362
1363    /* The offset of the kvmslot within the memory region */
1364    mr_offset = section->offset_within_region + start_addr -
1365        section->offset_within_address_space;
1366
1367    /* use aligned delta to align the ram address and offset */
1368    ram = memory_region_get_ram_ptr(mr) + mr_offset;
1369    ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1370
1371    kvm_slots_lock();
1372
1373    if (!add) {
1374        do {
1375            slot_size = MIN(kvm_max_slot_size, size);
1376            mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1377            if (!mem) {
1378                goto out;
1379            }
1380            if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1381                /*
1382                 * NOTE: We should be aware of the fact that here we're only
1383                 * doing a best effort to sync dirty bits.  No matter whether
1384                 * we're using dirty log or dirty ring, we ignored two facts:
1385                 *
1386                 * (1) dirty bits can reside in hardware buffers (PML)
1387                 *
1388                 * (2) after we collected dirty bits here, pages can be dirtied
1389                 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1390                 * remove the slot.
1391                 *
1392                 * Not easy.  Let's cross the fingers until it's fixed.
1393                 */
1394                if (kvm_state->kvm_dirty_ring_size) {
1395                    kvm_dirty_ring_reap_locked(kvm_state);
1396                } else {
1397                    kvm_slot_get_dirty_log(kvm_state, mem);
1398                }
1399                kvm_slot_sync_dirty_pages(mem);
1400            }
1401
1402            /* unregister the slot */
1403            g_free(mem->dirty_bmap);
1404            mem->dirty_bmap = NULL;
1405            mem->memory_size = 0;
1406            mem->flags = 0;
1407            err = kvm_set_user_memory_region(kml, mem, false);
1408            if (err) {
1409                fprintf(stderr, "%s: error unregistering slot: %s\n",
1410                        __func__, strerror(-err));
1411                abort();
1412            }
1413            start_addr += slot_size;
1414            size -= slot_size;
1415        } while (size);
1416        goto out;
1417    }
1418
1419    /* register the new slot */
1420    do {
1421        slot_size = MIN(kvm_max_slot_size, size);
1422        mem = kvm_alloc_slot(kml);
1423        mem->as_id = kml->as_id;
1424        mem->memory_size = slot_size;
1425        mem->start_addr = start_addr;
1426        mem->ram_start_offset = ram_start_offset;
1427        mem->ram = ram;
1428        mem->flags = kvm_mem_flags(mr);
1429        kvm_slot_init_dirty_bitmap(mem);
1430        err = kvm_set_user_memory_region(kml, mem, true);
1431        if (err) {
1432            fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1433                    strerror(-err));
1434            abort();
1435        }
1436        start_addr += slot_size;
1437        ram_start_offset += slot_size;
1438        ram += slot_size;
1439        size -= slot_size;
1440    } while (size);
1441
1442out:
1443    kvm_slots_unlock();
1444}
1445
1446static void *kvm_dirty_ring_reaper_thread(void *data)
1447{
1448    KVMState *s = data;
1449    struct KVMDirtyRingReaper *r = &s->reaper;
1450
1451    rcu_register_thread();
1452
1453    trace_kvm_dirty_ring_reaper("init");
1454
1455    while (true) {
1456        r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1457        trace_kvm_dirty_ring_reaper("wait");
1458        /*
1459         * TODO: provide a smarter timeout rather than a constant?
1460         */
1461        sleep(1);
1462
1463        trace_kvm_dirty_ring_reaper("wakeup");
1464        r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1465
1466        qemu_mutex_lock_iothread();
1467        kvm_dirty_ring_reap(s);
1468        qemu_mutex_unlock_iothread();
1469
1470        r->reaper_iteration++;
1471    }
1472
1473    trace_kvm_dirty_ring_reaper("exit");
1474
1475    rcu_unregister_thread();
1476
1477    return NULL;
1478}
1479
1480static int kvm_dirty_ring_reaper_init(KVMState *s)
1481{
1482    struct KVMDirtyRingReaper *r = &s->reaper;
1483
1484    qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1485                       kvm_dirty_ring_reaper_thread,
1486                       s, QEMU_THREAD_JOINABLE);
1487
1488    return 0;
1489}
1490
1491static void kvm_region_add(MemoryListener *listener,
1492                           MemoryRegionSection *section)
1493{
1494    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1495
1496    memory_region_ref(section->mr);
1497    kvm_set_phys_mem(kml, section, true);
1498}
1499
1500static void kvm_region_del(MemoryListener *listener,
1501                           MemoryRegionSection *section)
1502{
1503    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1504
1505    kvm_set_phys_mem(kml, section, false);
1506    memory_region_unref(section->mr);
1507}
1508
1509static void kvm_log_sync(MemoryListener *listener,
1510                         MemoryRegionSection *section)
1511{
1512    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1513
1514    kvm_slots_lock();
1515    kvm_physical_sync_dirty_bitmap(kml, section);
1516    kvm_slots_unlock();
1517}
1518
1519static void kvm_log_sync_global(MemoryListener *l)
1520{
1521    KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1522    KVMState *s = kvm_state;
1523    KVMSlot *mem;
1524    int i;
1525
1526    /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1527    kvm_dirty_ring_flush();
1528
1529    /*
1530     * TODO: make this faster when nr_slots is big while there are
1531     * only a few used slots (small VMs).
1532     */
1533    kvm_slots_lock();
1534    for (i = 0; i < s->nr_slots; i++) {
1535        mem = &kml->slots[i];
1536        if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1537            kvm_slot_sync_dirty_pages(mem);
1538            /*
1539             * This is not needed by KVM_GET_DIRTY_LOG because the
1540             * ioctl will unconditionally overwrite the whole region.
1541             * However kvm dirty ring has no such side effect.
1542             */
1543            kvm_slot_reset_dirty_pages(mem);
1544        }
1545    }
1546    kvm_slots_unlock();
1547}
1548
1549static void kvm_log_clear(MemoryListener *listener,
1550                          MemoryRegionSection *section)
1551{
1552    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1553    int r;
1554
1555    r = kvm_physical_log_clear(kml, section);
1556    if (r < 0) {
1557        error_report_once("%s: kvm log clear failed: mr=%s "
1558                          "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1559                          section->mr->name, section->offset_within_region,
1560                          int128_get64(section->size));
1561        abort();
1562    }
1563}
1564
1565static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1566                                  MemoryRegionSection *section,
1567                                  bool match_data, uint64_t data,
1568                                  EventNotifier *e)
1569{
1570    int fd = event_notifier_get_fd(e);
1571    int r;
1572
1573    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1574                               data, true, int128_get64(section->size),
1575                               match_data);
1576    if (r < 0) {
1577        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1578                __func__, strerror(-r), -r);
1579        abort();
1580    }
1581}
1582
1583static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1584                                  MemoryRegionSection *section,
1585                                  bool match_data, uint64_t data,
1586                                  EventNotifier *e)
1587{
1588    int fd = event_notifier_get_fd(e);
1589    int r;
1590
1591    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1592                               data, false, int128_get64(section->size),
1593                               match_data);
1594    if (r < 0) {
1595        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1596                __func__, strerror(-r), -r);
1597        abort();
1598    }
1599}
1600
1601static void kvm_io_ioeventfd_add(MemoryListener *listener,
1602                                 MemoryRegionSection *section,
1603                                 bool match_data, uint64_t data,
1604                                 EventNotifier *e)
1605{
1606    int fd = event_notifier_get_fd(e);
1607    int r;
1608
1609    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1610                              data, true, int128_get64(section->size),
1611                              match_data);
1612    if (r < 0) {
1613        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1614                __func__, strerror(-r), -r);
1615        abort();
1616    }
1617}
1618
1619static void kvm_io_ioeventfd_del(MemoryListener *listener,
1620                                 MemoryRegionSection *section,
1621                                 bool match_data, uint64_t data,
1622                                 EventNotifier *e)
1623
1624{
1625    int fd = event_notifier_get_fd(e);
1626    int r;
1627
1628    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1629                              data, false, int128_get64(section->size),
1630                              match_data);
1631    if (r < 0) {
1632        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1633                __func__, strerror(-r), -r);
1634        abort();
1635    }
1636}
1637
1638void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1639                                  AddressSpace *as, int as_id, const char *name)
1640{
1641    int i;
1642
1643    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1644    kml->as_id = as_id;
1645
1646    for (i = 0; i < s->nr_slots; i++) {
1647        kml->slots[i].slot = i;
1648    }
1649
1650    kml->listener.region_add = kvm_region_add;
1651    kml->listener.region_del = kvm_region_del;
1652    kml->listener.log_start = kvm_log_start;
1653    kml->listener.log_stop = kvm_log_stop;
1654    kml->listener.priority = 10;
1655    kml->listener.name = name;
1656
1657    if (s->kvm_dirty_ring_size) {
1658        kml->listener.log_sync_global = kvm_log_sync_global;
1659    } else {
1660        kml->listener.log_sync = kvm_log_sync;
1661        kml->listener.log_clear = kvm_log_clear;
1662    }
1663
1664    memory_listener_register(&kml->listener, as);
1665
1666    for (i = 0; i < s->nr_as; ++i) {
1667        if (!s->as[i].as) {
1668            s->as[i].as = as;
1669            s->as[i].ml = kml;
1670            break;
1671        }
1672    }
1673}
1674
1675static MemoryListener kvm_io_listener = {
1676    .name = "kvm-io",
1677    .eventfd_add = kvm_io_ioeventfd_add,
1678    .eventfd_del = kvm_io_ioeventfd_del,
1679    .priority = 10,
1680};
1681
1682int kvm_set_irq(KVMState *s, int irq, int level)
1683{
1684    struct kvm_irq_level event;
1685    int ret;
1686
1687    assert(kvm_async_interrupts_enabled());
1688
1689    event.level = level;
1690    event.irq = irq;
1691    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1692    if (ret < 0) {
1693        perror("kvm_set_irq");
1694        abort();
1695    }
1696
1697    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1698}
1699
1700#ifdef KVM_CAP_IRQ_ROUTING
1701typedef struct KVMMSIRoute {
1702    struct kvm_irq_routing_entry kroute;
1703    QTAILQ_ENTRY(KVMMSIRoute) entry;
1704} KVMMSIRoute;
1705
1706static void set_gsi(KVMState *s, unsigned int gsi)
1707{
1708    set_bit(gsi, s->used_gsi_bitmap);
1709}
1710
1711static void clear_gsi(KVMState *s, unsigned int gsi)
1712{
1713    clear_bit(gsi, s->used_gsi_bitmap);
1714}
1715
1716void kvm_init_irq_routing(KVMState *s)
1717{
1718    int gsi_count, i;
1719
1720    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1721    if (gsi_count > 0) {
1722        /* Round up so we can search ints using ffs */
1723        s->used_gsi_bitmap = bitmap_new(gsi_count);
1724        s->gsi_count = gsi_count;
1725    }
1726
1727    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1728    s->nr_allocated_irq_routes = 0;
1729
1730    if (!kvm_direct_msi_allowed) {
1731        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1732            QTAILQ_INIT(&s->msi_hashtab[i]);
1733        }
1734    }
1735
1736    kvm_arch_init_irq_routing(s);
1737}
1738
1739void kvm_irqchip_commit_routes(KVMState *s)
1740{
1741    int ret;
1742
1743    if (kvm_gsi_direct_mapping()) {
1744        return;
1745    }
1746
1747    if (!kvm_gsi_routing_enabled()) {
1748        return;
1749    }
1750
1751    s->irq_routes->flags = 0;
1752    trace_kvm_irqchip_commit_routes();
1753    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1754    assert(ret == 0);
1755}
1756
1757static void kvm_add_routing_entry(KVMState *s,
1758                                  struct kvm_irq_routing_entry *entry)
1759{
1760    struct kvm_irq_routing_entry *new;
1761    int n, size;
1762
1763    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1764        n = s->nr_allocated_irq_routes * 2;
1765        if (n < 64) {
1766            n = 64;
1767        }
1768        size = sizeof(struct kvm_irq_routing);
1769        size += n * sizeof(*new);
1770        s->irq_routes = g_realloc(s->irq_routes, size);
1771        s->nr_allocated_irq_routes = n;
1772    }
1773    n = s->irq_routes->nr++;
1774    new = &s->irq_routes->entries[n];
1775
1776    *new = *entry;
1777
1778    set_gsi(s, entry->gsi);
1779}
1780
1781static int kvm_update_routing_entry(KVMState *s,
1782                                    struct kvm_irq_routing_entry *new_entry)
1783{
1784    struct kvm_irq_routing_entry *entry;
1785    int n;
1786
1787    for (n = 0; n < s->irq_routes->nr; n++) {
1788        entry = &s->irq_routes->entries[n];
1789        if (entry->gsi != new_entry->gsi) {
1790            continue;
1791        }
1792
1793        if(!memcmp(entry, new_entry, sizeof *entry)) {
1794            return 0;
1795        }
1796
1797        *entry = *new_entry;
1798
1799        return 0;
1800    }
1801
1802    return -ESRCH;
1803}
1804
1805void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1806{
1807    struct kvm_irq_routing_entry e = {};
1808
1809    assert(pin < s->gsi_count);
1810
1811    e.gsi = irq;
1812    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1813    e.flags = 0;
1814    e.u.irqchip.irqchip = irqchip;
1815    e.u.irqchip.pin = pin;
1816    kvm_add_routing_entry(s, &e);
1817}
1818
1819void kvm_irqchip_release_virq(KVMState *s, int virq)
1820{
1821    struct kvm_irq_routing_entry *e;
1822    int i;
1823
1824    if (kvm_gsi_direct_mapping()) {
1825        return;
1826    }
1827
1828    for (i = 0; i < s->irq_routes->nr; i++) {
1829        e = &s->irq_routes->entries[i];
1830        if (e->gsi == virq) {
1831            s->irq_routes->nr--;
1832            *e = s->irq_routes->entries[s->irq_routes->nr];
1833        }
1834    }
1835    clear_gsi(s, virq);
1836    kvm_arch_release_virq_post(virq);
1837    trace_kvm_irqchip_release_virq(virq);
1838}
1839
1840void kvm_irqchip_add_change_notifier(Notifier *n)
1841{
1842    notifier_list_add(&kvm_irqchip_change_notifiers, n);
1843}
1844
1845void kvm_irqchip_remove_change_notifier(Notifier *n)
1846{
1847    notifier_remove(n);
1848}
1849
1850void kvm_irqchip_change_notify(void)
1851{
1852    notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1853}
1854
1855static unsigned int kvm_hash_msi(uint32_t data)
1856{
1857    /* This is optimized for IA32 MSI layout. However, no other arch shall
1858     * repeat the mistake of not providing a direct MSI injection API. */
1859    return data & 0xff;
1860}
1861
1862static void kvm_flush_dynamic_msi_routes(KVMState *s)
1863{
1864    KVMMSIRoute *route, *next;
1865    unsigned int hash;
1866
1867    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1868        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1869            kvm_irqchip_release_virq(s, route->kroute.gsi);
1870            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1871            g_free(route);
1872        }
1873    }
1874}
1875
1876static int kvm_irqchip_get_virq(KVMState *s)
1877{
1878    int next_virq;
1879
1880    /*
1881     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1882     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1883     * number can succeed even though a new route entry cannot be added.
1884     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1885     */
1886    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1887        kvm_flush_dynamic_msi_routes(s);
1888    }
1889
1890    /* Return the lowest unused GSI in the bitmap */
1891    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1892    if (next_virq >= s->gsi_count) {
1893        return -ENOSPC;
1894    } else {
1895        return next_virq;
1896    }
1897}
1898
1899static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1900{
1901    unsigned int hash = kvm_hash_msi(msg.data);
1902    KVMMSIRoute *route;
1903
1904    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1905        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1906            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1907            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1908            return route;
1909        }
1910    }
1911    return NULL;
1912}
1913
1914int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1915{
1916    struct kvm_msi msi;
1917    KVMMSIRoute *route;
1918
1919    if (kvm_direct_msi_allowed) {
1920        msi.address_lo = (uint32_t)msg.address;
1921        msi.address_hi = msg.address >> 32;
1922        msi.data = le32_to_cpu(msg.data);
1923        msi.flags = 0;
1924        memset(msi.pad, 0, sizeof(msi.pad));
1925
1926        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1927    }
1928
1929    route = kvm_lookup_msi_route(s, msg);
1930    if (!route) {
1931        int virq;
1932
1933        virq = kvm_irqchip_get_virq(s);
1934        if (virq < 0) {
1935            return virq;
1936        }
1937
1938        route = g_malloc0(sizeof(KVMMSIRoute));
1939        route->kroute.gsi = virq;
1940        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1941        route->kroute.flags = 0;
1942        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1943        route->kroute.u.msi.address_hi = msg.address >> 32;
1944        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1945
1946        kvm_add_routing_entry(s, &route->kroute);
1947        kvm_irqchip_commit_routes(s);
1948
1949        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1950                           entry);
1951    }
1952
1953    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1954
1955    return kvm_set_irq(s, route->kroute.gsi, 1);
1956}
1957
1958int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1959{
1960    struct kvm_irq_routing_entry kroute = {};
1961    int virq;
1962    MSIMessage msg = {0, 0};
1963
1964    if (pci_available && dev) {
1965        msg = pci_get_msi_message(dev, vector);
1966    }
1967
1968    if (kvm_gsi_direct_mapping()) {
1969        return kvm_arch_msi_data_to_gsi(msg.data);
1970    }
1971
1972    if (!kvm_gsi_routing_enabled()) {
1973        return -ENOSYS;
1974    }
1975
1976    virq = kvm_irqchip_get_virq(s);
1977    if (virq < 0) {
1978        return virq;
1979    }
1980
1981    kroute.gsi = virq;
1982    kroute.type = KVM_IRQ_ROUTING_MSI;
1983    kroute.flags = 0;
1984    kroute.u.msi.address_lo = (uint32_t)msg.address;
1985    kroute.u.msi.address_hi = msg.address >> 32;
1986    kroute.u.msi.data = le32_to_cpu(msg.data);
1987    if (pci_available && kvm_msi_devid_required()) {
1988        kroute.flags = KVM_MSI_VALID_DEVID;
1989        kroute.u.msi.devid = pci_requester_id(dev);
1990    }
1991    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1992        kvm_irqchip_release_virq(s, virq);
1993        return -EINVAL;
1994    }
1995
1996    trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1997                                    vector, virq);
1998
1999    kvm_add_routing_entry(s, &kroute);
2000    kvm_arch_add_msi_route_post(&kroute, vector, dev);
2001    kvm_irqchip_commit_routes(s);
2002
2003    return virq;
2004}
2005
2006int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2007                                 PCIDevice *dev)
2008{
2009    struct kvm_irq_routing_entry kroute = {};
2010
2011    if (kvm_gsi_direct_mapping()) {
2012        return 0;
2013    }
2014
2015    if (!kvm_irqchip_in_kernel()) {
2016        return -ENOSYS;
2017    }
2018
2019    kroute.gsi = virq;
2020    kroute.type = KVM_IRQ_ROUTING_MSI;
2021    kroute.flags = 0;
2022    kroute.u.msi.address_lo = (uint32_t)msg.address;
2023    kroute.u.msi.address_hi = msg.address >> 32;
2024    kroute.u.msi.data = le32_to_cpu(msg.data);
2025    if (pci_available && kvm_msi_devid_required()) {
2026        kroute.flags = KVM_MSI_VALID_DEVID;
2027        kroute.u.msi.devid = pci_requester_id(dev);
2028    }
2029    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2030        return -EINVAL;
2031    }
2032
2033    trace_kvm_irqchip_update_msi_route(virq);
2034
2035    return kvm_update_routing_entry(s, &kroute);
2036}
2037
2038static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2039                                    EventNotifier *resample, int virq,
2040                                    bool assign)
2041{
2042    int fd = event_notifier_get_fd(event);
2043    int rfd = resample ? event_notifier_get_fd(resample) : -1;
2044
2045    struct kvm_irqfd irqfd = {
2046        .fd = fd,
2047        .gsi = virq,
2048        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2049    };
2050
2051    if (rfd != -1) {
2052        assert(assign);
2053        if (kvm_irqchip_is_split()) {
2054            /*
2055             * When the slow irqchip (e.g. IOAPIC) is in the
2056             * userspace, KVM kernel resamplefd will not work because
2057             * the EOI of the interrupt will be delivered to userspace
2058             * instead, so the KVM kernel resamplefd kick will be
2059             * skipped.  The userspace here mimics what the kernel
2060             * provides with resamplefd, remember the resamplefd and
2061             * kick it when we receive EOI of this IRQ.
2062             *
2063             * This is hackery because IOAPIC is mostly bypassed
2064             * (except EOI broadcasts) when irqfd is used.  However
2065             * this can bring much performance back for split irqchip
2066             * with INTx IRQs (for VFIO, this gives 93% perf of the
2067             * full fast path, which is 46% perf boost comparing to
2068             * the INTx slow path).
2069             */
2070            kvm_resample_fd_insert(virq, resample);
2071        } else {
2072            irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2073            irqfd.resamplefd = rfd;
2074        }
2075    } else if (!assign) {
2076        if (kvm_irqchip_is_split()) {
2077            kvm_resample_fd_remove(virq);
2078        }
2079    }
2080
2081    if (!kvm_irqfds_enabled()) {
2082        return -ENOSYS;
2083    }
2084
2085    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2086}
2087
2088int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2089{
2090    struct kvm_irq_routing_entry kroute = {};
2091    int virq;
2092
2093    if (!kvm_gsi_routing_enabled()) {
2094        return -ENOSYS;
2095    }
2096
2097    virq = kvm_irqchip_get_virq(s);
2098    if (virq < 0) {
2099        return virq;
2100    }
2101
2102    kroute.gsi = virq;
2103    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
2104    kroute.flags = 0;
2105    kroute.u.adapter.summary_addr = adapter->summary_addr;
2106    kroute.u.adapter.ind_addr = adapter->ind_addr;
2107    kroute.u.adapter.summary_offset = adapter->summary_offset;
2108    kroute.u.adapter.ind_offset = adapter->ind_offset;
2109    kroute.u.adapter.adapter_id = adapter->adapter_id;
2110
2111    kvm_add_routing_entry(s, &kroute);
2112
2113    return virq;
2114}
2115
2116int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2117{
2118    struct kvm_irq_routing_entry kroute = {};
2119    int virq;
2120
2121    if (!kvm_gsi_routing_enabled()) {
2122        return -ENOSYS;
2123    }
2124    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
2125        return -ENOSYS;
2126    }
2127    virq = kvm_irqchip_get_virq(s);
2128    if (virq < 0) {
2129        return virq;
2130    }
2131
2132    kroute.gsi = virq;
2133    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
2134    kroute.flags = 0;
2135    kroute.u.hv_sint.vcpu = vcpu;
2136    kroute.u.hv_sint.sint = sint;
2137
2138    kvm_add_routing_entry(s, &kroute);
2139    kvm_irqchip_commit_routes(s);
2140
2141    return virq;
2142}
2143
2144#else /* !KVM_CAP_IRQ_ROUTING */
2145
2146void kvm_init_irq_routing(KVMState *s)
2147{
2148}
2149
2150void kvm_irqchip_release_virq(KVMState *s, int virq)
2151{
2152}
2153
2154int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2155{
2156    abort();
2157}
2158
2159int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
2160{
2161    return -ENOSYS;
2162}
2163
2164int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2165{
2166    return -ENOSYS;
2167}
2168
2169int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2170{
2171    return -ENOSYS;
2172}
2173
2174static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2175                                    EventNotifier *resample, int virq,
2176                                    bool assign)
2177{
2178    abort();
2179}
2180
2181int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2182{
2183    return -ENOSYS;
2184}
2185#endif /* !KVM_CAP_IRQ_ROUTING */
2186
2187int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2188                                       EventNotifier *rn, int virq)
2189{
2190    return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2191}
2192
2193int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2194                                          int virq)
2195{
2196    return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2197}
2198
2199int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2200                                   EventNotifier *rn, qemu_irq irq)
2201{
2202    gpointer key, gsi;
2203    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2204
2205    if (!found) {
2206        return -ENXIO;
2207    }
2208    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2209}
2210
2211int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2212                                      qemu_irq irq)
2213{
2214    gpointer key, gsi;
2215    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2216
2217    if (!found) {
2218        return -ENXIO;
2219    }
2220    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2221}
2222
2223void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2224{
2225    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2226}
2227
2228static void kvm_irqchip_create(KVMState *s)
2229{
2230    int ret;
2231
2232    assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2233    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2234        ;
2235    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2236        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2237        if (ret < 0) {
2238            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2239            exit(1);
2240        }
2241    } else {
2242        return;
2243    }
2244
2245    /* First probe and see if there's a arch-specific hook to create the
2246     * in-kernel irqchip for us */
2247    ret = kvm_arch_irqchip_create(s);
2248    if (ret == 0) {
2249        if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2250            perror("Split IRQ chip mode not supported.");
2251            exit(1);
2252        } else {
2253            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2254        }
2255    }
2256    if (ret < 0) {
2257        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2258        exit(1);
2259    }
2260
2261    kvm_kernel_irqchip = true;
2262    /* If we have an in-kernel IRQ chip then we must have asynchronous
2263     * interrupt delivery (though the reverse is not necessarily true)
2264     */
2265    kvm_async_interrupts_allowed = true;
2266    kvm_halt_in_kernel_allowed = true;
2267
2268    kvm_init_irq_routing(s);
2269
2270    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2271}
2272
2273/* Find number of supported CPUs using the recommended
2274 * procedure from the kernel API documentation to cope with
2275 * older kernels that may be missing capabilities.
2276 */
2277static int kvm_recommended_vcpus(KVMState *s)
2278{
2279    int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2280    return (ret) ? ret : 4;
2281}
2282
2283static int kvm_max_vcpus(KVMState *s)
2284{
2285    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2286    return (ret) ? ret : kvm_recommended_vcpus(s);
2287}
2288
2289static int kvm_max_vcpu_id(KVMState *s)
2290{
2291    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2292    return (ret) ? ret : kvm_max_vcpus(s);
2293}
2294
2295bool kvm_vcpu_id_is_valid(int vcpu_id)
2296{
2297    KVMState *s = KVM_STATE(current_accel());
2298    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2299}
2300
2301bool kvm_dirty_ring_enabled(void)
2302{
2303    return kvm_state->kvm_dirty_ring_size ? true : false;
2304}
2305
2306static int kvm_init(MachineState *ms)
2307{
2308    MachineClass *mc = MACHINE_GET_CLASS(ms);
2309    static const char upgrade_note[] =
2310        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2311        "(see http://sourceforge.net/projects/kvm).\n";
2312    struct {
2313        const char *name;
2314        int num;
2315    } num_cpus[] = {
2316        { "SMP",          ms->smp.cpus },
2317        { "hotpluggable", ms->smp.max_cpus },
2318        { NULL, }
2319    }, *nc = num_cpus;
2320    int soft_vcpus_limit, hard_vcpus_limit;
2321    KVMState *s;
2322    const KVMCapabilityInfo *missing_cap;
2323    int ret;
2324    int type = 0;
2325    uint64_t dirty_log_manual_caps;
2326
2327    qemu_mutex_init(&kml_slots_lock);
2328
2329    s = KVM_STATE(ms->accelerator);
2330
2331    /*
2332     * On systems where the kernel can support different base page
2333     * sizes, host page size may be different from TARGET_PAGE_SIZE,
2334     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
2335     * page size for the system though.
2336     */
2337    assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
2338
2339    s->sigmask_len = 8;
2340
2341#ifdef KVM_CAP_SET_GUEST_DEBUG
2342    QTAILQ_INIT(&s->kvm_sw_breakpoints);
2343#endif
2344    QLIST_INIT(&s->kvm_parked_vcpus);
2345    s->fd = qemu_open_old("/dev/kvm", O_RDWR);
2346    if (s->fd == -1) {
2347        fprintf(stderr, "Could not access KVM kernel module: %m\n");
2348        ret = -errno;
2349        goto err;
2350    }
2351
2352    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2353    if (ret < KVM_API_VERSION) {
2354        if (ret >= 0) {
2355            ret = -EINVAL;
2356        }
2357        fprintf(stderr, "kvm version too old\n");
2358        goto err;
2359    }
2360
2361    if (ret > KVM_API_VERSION) {
2362        ret = -EINVAL;
2363        fprintf(stderr, "kvm version not supported\n");
2364        goto err;
2365    }
2366
2367    kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2368    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2369
2370    /* If unspecified, use the default value */
2371    if (!s->nr_slots) {
2372        s->nr_slots = 32;
2373    }
2374
2375    s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2376    if (s->nr_as <= 1) {
2377        s->nr_as = 1;
2378    }
2379    s->as = g_new0(struct KVMAs, s->nr_as);
2380
2381    if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2382        g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2383                                                            "kvm-type",
2384                                                            &error_abort);
2385        type = mc->kvm_type(ms, kvm_type);
2386    } else if (mc->kvm_type) {
2387        type = mc->kvm_type(ms, NULL);
2388    }
2389
2390    do {
2391        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2392    } while (ret == -EINTR);
2393
2394    if (ret < 0) {
2395        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2396                strerror(-ret));
2397
2398#ifdef TARGET_S390X
2399        if (ret == -EINVAL) {
2400            fprintf(stderr,
2401                    "Host kernel setup problem detected. Please verify:\n");
2402            fprintf(stderr, "- for kernels supporting the switch_amode or"
2403                    " user_mode parameters, whether\n");
2404            fprintf(stderr,
2405                    "  user space is running in primary address space\n");
2406            fprintf(stderr,
2407                    "- for kernels supporting the vm.allocate_pgste sysctl, "
2408                    "whether it is enabled\n");
2409        }
2410#elif defined(TARGET_PPC)
2411        if (ret == -EINVAL) {
2412            fprintf(stderr,
2413                    "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2414                    (type == 2) ? "pr" : "hv");
2415        }
2416#endif
2417        goto err;
2418    }
2419
2420    s->vmfd = ret;
2421
2422    /* check the vcpu limits */
2423    soft_vcpus_limit = kvm_recommended_vcpus(s);
2424    hard_vcpus_limit = kvm_max_vcpus(s);
2425
2426    while (nc->name) {
2427        if (nc->num > soft_vcpus_limit) {
2428            warn_report("Number of %s cpus requested (%d) exceeds "
2429                        "the recommended cpus supported by KVM (%d)",
2430                        nc->name, nc->num, soft_vcpus_limit);
2431
2432            if (nc->num > hard_vcpus_limit) {
2433                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2434                        "the maximum cpus supported by KVM (%d)\n",
2435                        nc->name, nc->num, hard_vcpus_limit);
2436                exit(1);
2437            }
2438        }
2439        nc++;
2440    }
2441
2442    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2443    if (!missing_cap) {
2444        missing_cap =
2445            kvm_check_extension_list(s, kvm_arch_required_capabilities);
2446    }
2447    if (missing_cap) {
2448        ret = -EINVAL;
2449        fprintf(stderr, "kvm does not support %s\n%s",
2450                missing_cap->name, upgrade_note);
2451        goto err;
2452    }
2453
2454    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2455    s->coalesced_pio = s->coalesced_mmio &&
2456                       kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2457
2458    /*
2459     * Enable KVM dirty ring if supported, otherwise fall back to
2460     * dirty logging mode
2461     */
2462    if (s->kvm_dirty_ring_size > 0) {
2463        uint64_t ring_bytes;
2464
2465        ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
2466
2467        /* Read the max supported pages */
2468        ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
2469        if (ret > 0) {
2470            if (ring_bytes > ret) {
2471                error_report("KVM dirty ring size %" PRIu32 " too big "
2472                             "(maximum is %ld).  Please use a smaller value.",
2473                             s->kvm_dirty_ring_size,
2474                             (long)ret / sizeof(struct kvm_dirty_gfn));
2475                ret = -EINVAL;
2476                goto err;
2477            }
2478
2479            ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
2480            if (ret) {
2481                error_report("Enabling of KVM dirty ring failed: %s. "
2482                             "Suggested minimum value is 1024.", strerror(-ret));
2483                goto err;
2484            }
2485
2486            s->kvm_dirty_ring_bytes = ring_bytes;
2487         } else {
2488             warn_report("KVM dirty ring not available, using bitmap method");
2489             s->kvm_dirty_ring_size = 0;
2490        }
2491    }
2492
2493    /*
2494     * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2495     * enabled.  More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2496     * page is wr-protected initially, which is against how kvm dirty ring is
2497     * usage - kvm dirty ring requires all pages are wr-protected at the very
2498     * beginning.  Enabling this feature for dirty ring causes data corruption.
2499     *
2500     * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2501     * we may expect a higher stall time when starting the migration.  In the
2502     * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2503     * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2504     * guest pages.
2505     */
2506    if (!s->kvm_dirty_ring_size) {
2507        dirty_log_manual_caps =
2508            kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2509        dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2510                                  KVM_DIRTY_LOG_INITIALLY_SET);
2511        s->manual_dirty_log_protect = dirty_log_manual_caps;
2512        if (dirty_log_manual_caps) {
2513            ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2514                                    dirty_log_manual_caps);
2515            if (ret) {
2516                warn_report("Trying to enable capability %"PRIu64" of "
2517                            "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2518                            "Falling back to the legacy mode. ",
2519                            dirty_log_manual_caps);
2520                s->manual_dirty_log_protect = 0;
2521            }
2522        }
2523    }
2524
2525#ifdef KVM_CAP_VCPU_EVENTS
2526    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2527#endif
2528
2529    s->robust_singlestep =
2530        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2531
2532#ifdef KVM_CAP_DEBUGREGS
2533    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2534#endif
2535
2536    s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2537
2538#ifdef KVM_CAP_IRQ_ROUTING
2539    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2540#endif
2541
2542    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2543
2544    s->irq_set_ioctl = KVM_IRQ_LINE;
2545    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2546        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2547    }
2548
2549    kvm_readonly_mem_allowed =
2550        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2551
2552    kvm_eventfds_allowed =
2553        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2554
2555    kvm_irqfds_allowed =
2556        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2557
2558    kvm_resamplefds_allowed =
2559        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2560
2561    kvm_vm_attributes_allowed =
2562        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2563
2564    kvm_ioeventfd_any_length_allowed =
2565        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2566
2567    kvm_state = s;
2568
2569    ret = kvm_arch_init(ms, s);
2570    if (ret < 0) {
2571        goto err;
2572    }
2573
2574    if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2575        s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2576    }
2577
2578    qemu_register_reset(kvm_unpoison_all, NULL);
2579
2580    if (s->kernel_irqchip_allowed) {
2581        kvm_irqchip_create(s);
2582    }
2583
2584    if (kvm_eventfds_allowed) {
2585        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2586        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2587    }
2588    s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2589    s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2590
2591    kvm_memory_listener_register(s, &s->memory_listener,
2592                                 &address_space_memory, 0, "kvm-memory");
2593    if (kvm_eventfds_allowed) {
2594        memory_listener_register(&kvm_io_listener,
2595                                 &address_space_io);
2596    }
2597    memory_listener_register(&kvm_coalesced_pio_listener,
2598                             &address_space_io);
2599
2600    s->many_ioeventfds = kvm_check_many_ioeventfds();
2601
2602    s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2603    if (!s->sync_mmu) {
2604        ret = ram_block_discard_disable(true);
2605        assert(!ret);
2606    }
2607
2608    if (s->kvm_dirty_ring_size) {
2609        ret = kvm_dirty_ring_reaper_init(s);
2610        if (ret) {
2611            goto err;
2612        }
2613    }
2614
2615    return 0;
2616
2617err:
2618    assert(ret < 0);
2619    if (s->vmfd >= 0) {
2620        close(s->vmfd);
2621    }
2622    if (s->fd != -1) {
2623        close(s->fd);
2624    }
2625    g_free(s->memory_listener.slots);
2626
2627    return ret;
2628}
2629
2630void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2631{
2632    s->sigmask_len = sigmask_len;
2633}
2634
2635static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2636                          int size, uint32_t count)
2637{
2638    int i;
2639    uint8_t *ptr = data;
2640
2641    for (i = 0; i < count; i++) {
2642        address_space_rw(&address_space_io, port, attrs,
2643                         ptr, size,
2644                         direction == KVM_EXIT_IO_OUT);
2645        ptr += size;
2646    }
2647}
2648
2649static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2650{
2651    fprintf(stderr, "KVM internal error. Suberror: %d\n",
2652            run->internal.suberror);
2653
2654    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2655        int i;
2656
2657        for (i = 0; i < run->internal.ndata; ++i) {
2658            fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2659                    i, (uint64_t)run->internal.data[i]);
2660        }
2661    }
2662    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2663        fprintf(stderr, "emulation failure\n");
2664        if (!kvm_arch_stop_on_emulation_error(cpu)) {
2665            cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2666            return EXCP_INTERRUPT;
2667        }
2668    }
2669    /* FIXME: Should trigger a qmp message to let management know
2670     * something went wrong.
2671     */
2672    return -1;
2673}
2674
2675void kvm_flush_coalesced_mmio_buffer(void)
2676{
2677    KVMState *s = kvm_state;
2678
2679    if (s->coalesced_flush_in_progress) {
2680        return;
2681    }
2682
2683    s->coalesced_flush_in_progress = true;
2684
2685    if (s->coalesced_mmio_ring) {
2686        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2687        while (ring->first != ring->last) {
2688            struct kvm_coalesced_mmio *ent;
2689
2690            ent = &ring->coalesced_mmio[ring->first];
2691
2692            if (ent->pio == 1) {
2693                address_space_write(&address_space_io, ent->phys_addr,
2694                                    MEMTXATTRS_UNSPECIFIED, ent->data,
2695                                    ent->len);
2696            } else {
2697                cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2698            }
2699            smp_wmb();
2700            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2701        }
2702    }
2703
2704    s->coalesced_flush_in_progress = false;
2705}
2706
2707bool kvm_cpu_check_are_resettable(void)
2708{
2709    return kvm_arch_cpu_check_are_resettable();
2710}
2711
2712static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2713{
2714    if (!cpu->vcpu_dirty) {
2715        kvm_arch_get_registers(cpu);
2716        cpu->vcpu_dirty = true;
2717    }
2718}
2719
2720void kvm_cpu_synchronize_state(CPUState *cpu)
2721{
2722    if (!cpu->vcpu_dirty) {
2723        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2724    }
2725}
2726
2727static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2728{
2729    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2730    cpu->vcpu_dirty = false;
2731}
2732
2733void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2734{
2735    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2736}
2737
2738static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2739{
2740    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2741    cpu->vcpu_dirty = false;
2742}
2743
2744void kvm_cpu_synchronize_post_init(CPUState *cpu)
2745{
2746    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2747}
2748
2749static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2750{
2751    cpu->vcpu_dirty = true;
2752}
2753
2754void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2755{
2756    run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2757}
2758
2759#ifdef KVM_HAVE_MCE_INJECTION
2760static __thread void *pending_sigbus_addr;
2761static __thread int pending_sigbus_code;
2762static __thread bool have_sigbus_pending;
2763#endif
2764
2765static void kvm_cpu_kick(CPUState *cpu)
2766{
2767    qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2768}
2769
2770static void kvm_cpu_kick_self(void)
2771{
2772    if (kvm_immediate_exit) {
2773        kvm_cpu_kick(current_cpu);
2774    } else {
2775        qemu_cpu_kick_self();
2776    }
2777}
2778
2779static void kvm_eat_signals(CPUState *cpu)
2780{
2781    struct timespec ts = { 0, 0 };
2782    siginfo_t siginfo;
2783    sigset_t waitset;
2784    sigset_t chkset;
2785    int r;
2786
2787    if (kvm_immediate_exit) {
2788        qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2789        /* Write kvm_run->immediate_exit before the cpu->exit_request
2790         * write in kvm_cpu_exec.
2791         */
2792        smp_wmb();
2793        return;
2794    }
2795
2796    sigemptyset(&waitset);
2797    sigaddset(&waitset, SIG_IPI);
2798
2799    do {
2800        r = sigtimedwait(&waitset, &siginfo, &ts);
2801        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2802            perror("sigtimedwait");
2803            exit(1);
2804        }
2805
2806        r = sigpending(&chkset);
2807        if (r == -1) {
2808            perror("sigpending");
2809            exit(1);
2810        }
2811    } while (sigismember(&chkset, SIG_IPI));
2812}
2813
2814int kvm_cpu_exec(CPUState *cpu)
2815{
2816    struct kvm_run *run = cpu->kvm_run;
2817    int ret, run_ret;
2818
2819    DPRINTF("kvm_cpu_exec()\n");
2820
2821    if (kvm_arch_process_async_events(cpu)) {
2822        qatomic_set(&cpu->exit_request, 0);
2823        return EXCP_HLT;
2824    }
2825
2826    qemu_mutex_unlock_iothread();
2827    cpu_exec_start(cpu);
2828
2829    do {
2830        MemTxAttrs attrs;
2831
2832        if (cpu->vcpu_dirty) {
2833            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2834            cpu->vcpu_dirty = false;
2835        }
2836
2837        kvm_arch_pre_run(cpu, run);
2838        if (qatomic_read(&cpu->exit_request)) {
2839            DPRINTF("interrupt exit requested\n");
2840            /*
2841             * KVM requires us to reenter the kernel after IO exits to complete
2842             * instruction emulation. This self-signal will ensure that we
2843             * leave ASAP again.
2844             */
2845            kvm_cpu_kick_self();
2846        }
2847
2848        /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2849         * Matching barrier in kvm_eat_signals.
2850         */
2851        smp_rmb();
2852
2853        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2854
2855        attrs = kvm_arch_post_run(cpu, run);
2856
2857#ifdef KVM_HAVE_MCE_INJECTION
2858        if (unlikely(have_sigbus_pending)) {
2859            qemu_mutex_lock_iothread();
2860            kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2861                                    pending_sigbus_addr);
2862            have_sigbus_pending = false;
2863            qemu_mutex_unlock_iothread();
2864        }
2865#endif
2866
2867        if (run_ret < 0) {
2868            if (run_ret == -EINTR || run_ret == -EAGAIN) {
2869                DPRINTF("io window exit\n");
2870                kvm_eat_signals(cpu);
2871                ret = EXCP_INTERRUPT;
2872                break;
2873            }
2874            fprintf(stderr, "error: kvm run failed %s\n",
2875                    strerror(-run_ret));
2876#ifdef TARGET_PPC
2877            if (run_ret == -EBUSY) {
2878                fprintf(stderr,
2879                        "This is probably because your SMT is enabled.\n"
2880                        "VCPU can only run on primary threads with all "
2881                        "secondary threads offline.\n");
2882            }
2883#endif
2884            ret = -1;
2885            break;
2886        }
2887
2888        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2889        switch (run->exit_reason) {
2890        case KVM_EXIT_IO:
2891            DPRINTF("handle_io\n");
2892            /* Called outside BQL */
2893            kvm_handle_io(run->io.port, attrs,
2894                          (uint8_t *)run + run->io.data_offset,
2895                          run->io.direction,
2896                          run->io.size,
2897                          run->io.count);
2898            ret = 0;
2899            break;
2900        case KVM_EXIT_MMIO:
2901            DPRINTF("handle_mmio\n");
2902            /* Called outside BQL */
2903            address_space_rw(&address_space_memory,
2904                             run->mmio.phys_addr, attrs,
2905                             run->mmio.data,
2906                             run->mmio.len,
2907                             run->mmio.is_write);
2908            ret = 0;
2909            break;
2910        case KVM_EXIT_IRQ_WINDOW_OPEN:
2911            DPRINTF("irq_window_open\n");
2912            ret = EXCP_INTERRUPT;
2913            break;
2914        case KVM_EXIT_SHUTDOWN:
2915            DPRINTF("shutdown\n");
2916            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2917            ret = EXCP_INTERRUPT;
2918            break;
2919        case KVM_EXIT_UNKNOWN:
2920            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2921                    (uint64_t)run->hw.hardware_exit_reason);
2922            ret = -1;
2923            break;
2924        case KVM_EXIT_INTERNAL_ERROR:
2925            ret = kvm_handle_internal_error(cpu, run);
2926            break;
2927        case KVM_EXIT_DIRTY_RING_FULL:
2928            /*
2929             * We shouldn't continue if the dirty ring of this vcpu is
2930             * still full.  Got kicked by KVM_RESET_DIRTY_RINGS.
2931             */
2932            trace_kvm_dirty_ring_full(cpu->cpu_index);
2933            qemu_mutex_lock_iothread();
2934            kvm_dirty_ring_reap(kvm_state);
2935            qemu_mutex_unlock_iothread();
2936            ret = 0;
2937            break;
2938        case KVM_EXIT_SYSTEM_EVENT:
2939            switch (run->system_event.type) {
2940            case KVM_SYSTEM_EVENT_SHUTDOWN:
2941                qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2942                ret = EXCP_INTERRUPT;
2943                break;
2944            case KVM_SYSTEM_EVENT_RESET:
2945                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2946                ret = EXCP_INTERRUPT;
2947                break;
2948            case KVM_SYSTEM_EVENT_CRASH:
2949                kvm_cpu_synchronize_state(cpu);
2950                qemu_mutex_lock_iothread();
2951                qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2952                qemu_mutex_unlock_iothread();
2953                ret = 0;
2954                break;
2955            default:
2956                DPRINTF("kvm_arch_handle_exit\n");
2957                ret = kvm_arch_handle_exit(cpu, run);
2958                break;
2959            }
2960            break;
2961        default:
2962            DPRINTF("kvm_arch_handle_exit\n");
2963            ret = kvm_arch_handle_exit(cpu, run);
2964            break;
2965        }
2966    } while (ret == 0);
2967
2968    cpu_exec_end(cpu);
2969    qemu_mutex_lock_iothread();
2970
2971    if (ret < 0) {
2972        cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2973        vm_stop(RUN_STATE_INTERNAL_ERROR);
2974    }
2975
2976    qatomic_set(&cpu->exit_request, 0);
2977    return ret;
2978}
2979
2980int kvm_ioctl(KVMState *s, int type, ...)
2981{
2982    int ret;
2983    void *arg;
2984    va_list ap;
2985
2986    va_start(ap, type);
2987    arg = va_arg(ap, void *);
2988    va_end(ap);
2989
2990    trace_kvm_ioctl(type, arg);
2991    ret = ioctl(s->fd, type, arg);
2992    if (ret == -1) {
2993        ret = -errno;
2994    }
2995    return ret;
2996}
2997
2998int kvm_vm_ioctl(KVMState *s, int type, ...)
2999{
3000    int ret;
3001    void *arg;
3002    va_list ap;
3003
3004    va_start(ap, type);
3005    arg = va_arg(ap, void *);
3006    va_end(ap);
3007
3008    trace_kvm_vm_ioctl(type, arg);
3009    ret = ioctl(s->vmfd, type, arg);
3010    if (ret == -1) {
3011        ret = -errno;
3012    }
3013    return ret;
3014}
3015
3016int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
3017{
3018    int ret;
3019    void *arg;
3020    va_list ap;
3021
3022    va_start(ap, type);
3023    arg = va_arg(ap, void *);
3024    va_end(ap);
3025
3026    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3027    ret = ioctl(cpu->kvm_fd, type, arg);
3028    if (ret == -1) {
3029        ret = -errno;
3030    }
3031    return ret;
3032}
3033
3034int kvm_device_ioctl(int fd, int type, ...)
3035{
3036    int ret;
3037    void *arg;
3038    va_list ap;
3039
3040    va_start(ap, type);
3041    arg = va_arg(ap, void *);
3042    va_end(ap);
3043
3044    trace_kvm_device_ioctl(fd, type, arg);
3045    ret = ioctl(fd, type, arg);
3046    if (ret == -1) {
3047        ret = -errno;
3048    }
3049    return ret;
3050}
3051
3052int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3053{
3054    int ret;
3055    struct kvm_device_attr attribute = {
3056        .group = group,
3057        .attr = attr,
3058    };
3059
3060    if (!kvm_vm_attributes_allowed) {
3061        return 0;
3062    }
3063
3064    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3065    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3066    return ret ? 0 : 1;
3067}
3068
3069int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3070{
3071    struct kvm_device_attr attribute = {
3072        .group = group,
3073        .attr = attr,
3074        .flags = 0,
3075    };
3076
3077    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3078}
3079
3080int kvm_device_access(int fd, int group, uint64_t attr,
3081                      void *val, bool write, Error **errp)
3082{
3083    struct kvm_device_attr kvmattr;
3084    int err;
3085
3086    kvmattr.flags = 0;
3087    kvmattr.group = group;
3088    kvmattr.attr = attr;
3089    kvmattr.addr = (uintptr_t)val;
3090
3091    err = kvm_device_ioctl(fd,
3092                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3093                           &kvmattr);
3094    if (err < 0) {
3095        error_setg_errno(errp, -err,
3096                         "KVM_%s_DEVICE_ATTR failed: Group %d "
3097                         "attr 0x%016" PRIx64,
3098                         write ? "SET" : "GET", group, attr);
3099    }
3100    return err;
3101}
3102
3103bool kvm_has_sync_mmu(void)
3104{
3105    return kvm_state->sync_mmu;
3106}
3107
3108int kvm_has_vcpu_events(void)
3109{
3110    return kvm_state->vcpu_events;
3111}
3112
3113int kvm_has_robust_singlestep(void)
3114{
3115    return kvm_state->robust_singlestep;
3116}
3117
3118int kvm_has_debugregs(void)
3119{
3120    return kvm_state->debugregs;
3121}
3122
3123int kvm_max_nested_state_length(void)
3124{
3125    return kvm_state->max_nested_state_len;
3126}
3127
3128int kvm_has_many_ioeventfds(void)
3129{
3130    if (!kvm_enabled()) {
3131        return 0;
3132    }
3133    return kvm_state->many_ioeventfds;
3134}
3135
3136int kvm_has_gsi_routing(void)
3137{
3138#ifdef KVM_CAP_IRQ_ROUTING
3139    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3140#else
3141    return false;
3142#endif
3143}
3144
3145int kvm_has_intx_set_mask(void)
3146{
3147    return kvm_state->intx_set_mask;
3148}
3149
3150bool kvm_arm_supports_user_irq(void)
3151{
3152    return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3153}
3154
3155#ifdef KVM_CAP_SET_GUEST_DEBUG
3156struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
3157                                                 target_ulong pc)
3158{
3159    struct kvm_sw_breakpoint *bp;
3160
3161    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3162        if (bp->pc == pc) {
3163            return bp;
3164        }
3165    }
3166    return NULL;
3167}
3168
3169int kvm_sw_breakpoints_active(CPUState *cpu)
3170{
3171    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3172}
3173
3174struct kvm_set_guest_debug_data {
3175    struct kvm_guest_debug dbg;
3176    int err;
3177};
3178
3179static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3180{
3181    struct kvm_set_guest_debug_data *dbg_data =
3182        (struct kvm_set_guest_debug_data *) data.host_ptr;
3183
3184    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3185                                   &dbg_data->dbg);
3186}
3187
3188int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3189{
3190    struct kvm_set_guest_debug_data data;
3191
3192    data.dbg.control = reinject_trap;
3193
3194    if (cpu->singlestep_enabled) {
3195        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3196    }
3197    kvm_arch_update_guest_debug(cpu, &data.dbg);
3198
3199    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3200               RUN_ON_CPU_HOST_PTR(&data));
3201    return data.err;
3202}
3203
3204int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
3205                          target_ulong len, int type)
3206{
3207    struct kvm_sw_breakpoint *bp;
3208    int err;
3209
3210    if (type == GDB_BREAKPOINT_SW) {
3211        bp = kvm_find_sw_breakpoint(cpu, addr);
3212        if (bp) {
3213            bp->use_count++;
3214            return 0;
3215        }
3216
3217        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
3218        bp->pc = addr;
3219        bp->use_count = 1;
3220        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3221        if (err) {
3222            g_free(bp);
3223            return err;
3224        }
3225
3226        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3227    } else {
3228        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3229        if (err) {
3230            return err;
3231        }
3232    }
3233
3234    CPU_FOREACH(cpu) {
3235        err = kvm_update_guest_debug(cpu, 0);
3236        if (err) {
3237            return err;
3238        }
3239    }
3240    return 0;
3241}
3242
3243int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
3244                          target_ulong len, int type)
3245{
3246    struct kvm_sw_breakpoint *bp;
3247    int err;
3248
3249    if (type == GDB_BREAKPOINT_SW) {
3250        bp = kvm_find_sw_breakpoint(cpu, addr);
3251        if (!bp) {
3252            return -ENOENT;
3253        }
3254
3255        if (bp->use_count > 1) {
3256            bp->use_count--;
3257            return 0;
3258        }
3259
3260        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3261        if (err) {
3262            return err;
3263        }
3264
3265        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3266        g_free(bp);
3267    } else {
3268        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3269        if (err) {
3270            return err;
3271        }
3272    }
3273
3274    CPU_FOREACH(cpu) {
3275        err = kvm_update_guest_debug(cpu, 0);
3276        if (err) {
3277            return err;
3278        }
3279    }
3280    return 0;
3281}
3282
3283void kvm_remove_all_breakpoints(CPUState *cpu)
3284{
3285    struct kvm_sw_breakpoint *bp, *next;
3286    KVMState *s = cpu->kvm_state;
3287    CPUState *tmpcpu;
3288
3289    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3290        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3291            /* Try harder to find a CPU that currently sees the breakpoint. */
3292            CPU_FOREACH(tmpcpu) {
3293                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3294                    break;
3295                }
3296            }
3297        }
3298        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3299        g_free(bp);
3300    }
3301    kvm_arch_remove_all_hw_breakpoints();
3302
3303    CPU_FOREACH(cpu) {
3304        kvm_update_guest_debug(cpu, 0);
3305    }
3306}
3307
3308#else /* !KVM_CAP_SET_GUEST_DEBUG */
3309
3310int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3311{
3312    return -EINVAL;
3313}
3314
3315int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
3316                          target_ulong len, int type)
3317{
3318    return -EINVAL;
3319}
3320
3321int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
3322                          target_ulong len, int type)
3323{
3324    return -EINVAL;
3325}
3326
3327void kvm_remove_all_breakpoints(CPUState *cpu)
3328{
3329}
3330#endif /* !KVM_CAP_SET_GUEST_DEBUG */
3331
3332static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3333{
3334    KVMState *s = kvm_state;
3335    struct kvm_signal_mask *sigmask;
3336    int r;
3337
3338    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3339
3340    sigmask->len = s->sigmask_len;
3341    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3342    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3343    g_free(sigmask);
3344
3345    return r;
3346}
3347
3348static void kvm_ipi_signal(int sig)
3349{
3350    if (current_cpu) {
3351        assert(kvm_immediate_exit);
3352        kvm_cpu_kick(current_cpu);
3353    }
3354}
3355
3356void kvm_init_cpu_signals(CPUState *cpu)
3357{
3358    int r;
3359    sigset_t set;
3360    struct sigaction sigact;
3361
3362    memset(&sigact, 0, sizeof(sigact));
3363    sigact.sa_handler = kvm_ipi_signal;
3364    sigaction(SIG_IPI, &sigact, NULL);
3365
3366    pthread_sigmask(SIG_BLOCK, NULL, &set);
3367#if defined KVM_HAVE_MCE_INJECTION
3368    sigdelset(&set, SIGBUS);
3369    pthread_sigmask(SIG_SETMASK, &set, NULL);
3370#endif
3371    sigdelset(&set, SIG_IPI);
3372    if (kvm_immediate_exit) {
3373        r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3374    } else {
3375        r = kvm_set_signal_mask(cpu, &set);
3376    }
3377    if (r) {
3378        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3379        exit(1);
3380    }
3381}
3382
3383/* Called asynchronously in VCPU thread.  */
3384int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3385{
3386#ifdef KVM_HAVE_MCE_INJECTION
3387    if (have_sigbus_pending) {
3388        return 1;
3389    }
3390    have_sigbus_pending = true;
3391    pending_sigbus_addr = addr;
3392    pending_sigbus_code = code;
3393    qatomic_set(&cpu->exit_request, 1);
3394    return 0;
3395#else
3396    return 1;
3397#endif
3398}
3399
3400/* Called synchronously (via signalfd) in main thread.  */
3401int kvm_on_sigbus(int code, void *addr)
3402{
3403#ifdef KVM_HAVE_MCE_INJECTION
3404    /* Action required MCE kills the process if SIGBUS is blocked.  Because
3405     * that's what happens in the I/O thread, where we handle MCE via signalfd,
3406     * we can only get action optional here.
3407     */
3408    assert(code != BUS_MCEERR_AR);
3409    kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3410    return 0;
3411#else
3412    return 1;
3413#endif
3414}
3415
3416int kvm_create_device(KVMState *s, uint64_t type, bool test)
3417{
3418    int ret;
3419    struct kvm_create_device create_dev;
3420
3421    create_dev.type = type;
3422    create_dev.fd = -1;
3423    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3424
3425    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3426        return -ENOTSUP;
3427    }
3428
3429    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3430    if (ret) {
3431        return ret;
3432    }
3433
3434    return test ? 0 : create_dev.fd;
3435}
3436
3437bool kvm_device_supported(int vmfd, uint64_t type)
3438{
3439    struct kvm_create_device create_dev = {
3440        .type = type,
3441        .fd = -1,
3442        .flags = KVM_CREATE_DEVICE_TEST,
3443    };
3444
3445    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3446        return false;
3447    }
3448
3449    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3450}
3451
3452int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3453{
3454    struct kvm_one_reg reg;
3455    int r;
3456
3457    reg.id = id;
3458    reg.addr = (uintptr_t) source;
3459    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3460    if (r) {
3461        trace_kvm_failed_reg_set(id, strerror(-r));
3462    }
3463    return r;
3464}
3465
3466int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3467{
3468    struct kvm_one_reg reg;
3469    int r;
3470
3471    reg.id = id;
3472    reg.addr = (uintptr_t) target;
3473    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3474    if (r) {
3475        trace_kvm_failed_reg_get(id, strerror(-r));
3476    }
3477    return r;
3478}
3479
3480static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3481                                 hwaddr start_addr, hwaddr size)
3482{
3483    KVMState *kvm = KVM_STATE(ms->accelerator);
3484    int i;
3485
3486    for (i = 0; i < kvm->nr_as; ++i) {
3487        if (kvm->as[i].as == as && kvm->as[i].ml) {
3488            size = MIN(kvm_max_slot_size, size);
3489            return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3490                                                    start_addr, size);
3491        }
3492    }
3493
3494    return false;
3495}
3496
3497static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3498                                   const char *name, void *opaque,
3499                                   Error **errp)
3500{
3501    KVMState *s = KVM_STATE(obj);
3502    int64_t value = s->kvm_shadow_mem;
3503
3504    visit_type_int(v, name, &value, errp);
3505}
3506
3507static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3508                                   const char *name, void *opaque,
3509                                   Error **errp)
3510{
3511    KVMState *s = KVM_STATE(obj);
3512    int64_t value;
3513
3514    if (s->fd != -1) {
3515        error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3516        return;
3517    }
3518
3519    if (!visit_type_int(v, name, &value, errp)) {
3520        return;
3521    }
3522
3523    s->kvm_shadow_mem = value;
3524}
3525
3526static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3527                                   const char *name, void *opaque,
3528                                   Error **errp)
3529{
3530    KVMState *s = KVM_STATE(obj);
3531    OnOffSplit mode;
3532
3533    if (s->fd != -1) {
3534        error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3535        return;
3536    }
3537
3538    if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3539        return;
3540    }
3541    switch (mode) {
3542    case ON_OFF_SPLIT_ON:
3543        s->kernel_irqchip_allowed = true;
3544        s->kernel_irqchip_required = true;
3545        s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3546        break;
3547    case ON_OFF_SPLIT_OFF:
3548        s->kernel_irqchip_allowed = false;
3549        s->kernel_irqchip_required = false;
3550        s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3551        break;
3552    case ON_OFF_SPLIT_SPLIT:
3553        s->kernel_irqchip_allowed = true;
3554        s->kernel_irqchip_required = true;
3555        s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3556        break;
3557    default:
3558        /* The value was checked in visit_type_OnOffSplit() above. If
3559         * we get here, then something is wrong in QEMU.
3560         */
3561        abort();
3562    }
3563}
3564
3565bool kvm_kernel_irqchip_allowed(void)
3566{
3567    return kvm_state->kernel_irqchip_allowed;
3568}
3569
3570bool kvm_kernel_irqchip_required(void)
3571{
3572    return kvm_state->kernel_irqchip_required;
3573}
3574
3575bool kvm_kernel_irqchip_split(void)
3576{
3577    return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3578}
3579
3580static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3581                                    const char *name, void *opaque,
3582                                    Error **errp)
3583{
3584    KVMState *s = KVM_STATE(obj);
3585    uint32_t value = s->kvm_dirty_ring_size;
3586
3587    visit_type_uint32(v, name, &value, errp);
3588}
3589
3590static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3591                                    const char *name, void *opaque,
3592                                    Error **errp)
3593{
3594    KVMState *s = KVM_STATE(obj);
3595    Error *error = NULL;
3596    uint32_t value;
3597
3598    if (s->fd != -1) {
3599        error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3600        return;
3601    }
3602
3603    visit_type_uint32(v, name, &value, &error);
3604    if (error) {
3605        error_propagate(errp, error);
3606        return;
3607    }
3608    if (value & (value - 1)) {
3609        error_setg(errp, "dirty-ring-size must be a power of two.");
3610        return;
3611    }
3612
3613    s->kvm_dirty_ring_size = value;
3614}
3615
3616static void kvm_accel_instance_init(Object *obj)
3617{
3618    KVMState *s = KVM_STATE(obj);
3619
3620    s->fd = -1;
3621    s->vmfd = -1;
3622    s->kvm_shadow_mem = -1;
3623    s->kernel_irqchip_allowed = true;
3624    s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3625    /* KVM dirty ring is by default off */
3626    s->kvm_dirty_ring_size = 0;
3627}
3628
3629static void kvm_accel_class_init(ObjectClass *oc, void *data)
3630{
3631    AccelClass *ac = ACCEL_CLASS(oc);
3632    ac->name = "KVM";
3633    ac->init_machine = kvm_init;
3634    ac->has_memory = kvm_accel_has_memory;
3635    ac->allowed = &kvm_allowed;
3636
3637    object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3638        NULL, kvm_set_kernel_irqchip,
3639        NULL, NULL);
3640    object_class_property_set_description(oc, "kernel-irqchip",
3641        "Configure KVM in-kernel irqchip");
3642
3643    object_class_property_add(oc, "kvm-shadow-mem", "int",
3644        kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3645        NULL, NULL);
3646    object_class_property_set_description(oc, "kvm-shadow-mem",
3647        "KVM shadow MMU size");
3648
3649    object_class_property_add(oc, "dirty-ring-size", "uint32",
3650        kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3651        NULL, NULL);
3652    object_class_property_set_description(oc, "dirty-ring-size",
3653        "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3654}
3655
3656static const TypeInfo kvm_accel_type = {
3657    .name = TYPE_KVM_ACCEL,
3658    .parent = TYPE_ACCEL,
3659    .instance_init = kvm_accel_instance_init,
3660    .class_init = kvm_accel_class_init,
3661    .instance_size = sizeof(KVMState),
3662};
3663
3664static void kvm_type_init(void)
3665{
3666    type_register_static(&kvm_accel_type);
3667}
3668
3669type_init(kvm_type_init);
3670