qemu/accel/kvm/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18#include <poll.h>
  19
  20#include <linux/kvm.h>
  21
  22#include "qemu/atomic.h"
  23#include "qemu/option.h"
  24#include "qemu/config-file.h"
  25#include "qemu/error-report.h"
  26#include "qapi/error.h"
  27#include "hw/pci/msi.h"
  28#include "hw/pci/msix.h"
  29#include "hw/s390x/adapter.h"
  30#include "exec/gdbstub.h"
  31#include "sysemu/kvm_int.h"
  32#include "sysemu/runstate.h"
  33#include "sysemu/cpus.h"
  34#include "qemu/bswap.h"
  35#include "exec/memory.h"
  36#include "exec/ram_addr.h"
  37#include "qemu/event_notifier.h"
  38#include "qemu/main-loop.h"
  39#include "trace.h"
  40#include "hw/irq.h"
  41#include "qapi/visitor.h"
  42#include "qapi/qapi-types-common.h"
  43#include "qapi/qapi-visit-common.h"
  44#include "sysemu/reset.h"
  45#include "qemu/guest-random.h"
  46#include "sysemu/hw_accel.h"
  47#include "kvm-cpus.h"
  48#include "sysemu/dirtylimit.h"
  49
  50#include "hw/boards.h"
  51#include "monitor/stats.h"
  52
  53/* This check must be after config-host.h is included */
  54#ifdef CONFIG_EVENTFD
  55#include <sys/eventfd.h>
  56#endif
  57
  58/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  59 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  60 */
  61#ifdef PAGE_SIZE
  62#undef PAGE_SIZE
  63#endif
  64#define PAGE_SIZE qemu_real_host_page_size()
  65
  66#ifndef KVM_GUESTDBG_BLOCKIRQ
  67#define KVM_GUESTDBG_BLOCKIRQ 0
  68#endif
  69
  70//#define DEBUG_KVM
  71
  72#ifdef DEBUG_KVM
  73#define DPRINTF(fmt, ...) \
  74    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  75#else
  76#define DPRINTF(fmt, ...) \
  77    do { } while (0)
  78#endif
  79
  80#define KVM_MSI_HASHTAB_SIZE    256
  81
  82struct KVMParkedVcpu {
  83    unsigned long vcpu_id;
  84    int kvm_fd;
  85    QLIST_ENTRY(KVMParkedVcpu) node;
  86};
  87
  88enum KVMDirtyRingReaperState {
  89    KVM_DIRTY_RING_REAPER_NONE = 0,
  90    /* The reaper is sleeping */
  91    KVM_DIRTY_RING_REAPER_WAIT,
  92    /* The reaper is reaping for dirty pages */
  93    KVM_DIRTY_RING_REAPER_REAPING,
  94};
  95
  96/*
  97 * KVM reaper instance, responsible for collecting the KVM dirty bits
  98 * via the dirty ring.
  99 */
 100struct KVMDirtyRingReaper {
 101    /* The reaper thread */
 102    QemuThread reaper_thr;
 103    volatile uint64_t reaper_iteration; /* iteration number of reaper thr */
 104    volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */
 105};
 106
 107struct KVMState
 108{
 109    AccelState parent_obj;
 110
 111    int nr_slots;
 112    int fd;
 113    int vmfd;
 114    int coalesced_mmio;
 115    int coalesced_pio;
 116    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 117    bool coalesced_flush_in_progress;
 118    int vcpu_events;
 119    int robust_singlestep;
 120    int debugregs;
 121#ifdef KVM_CAP_SET_GUEST_DEBUG
 122    QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
 123#endif
 124    int max_nested_state_len;
 125    int many_ioeventfds;
 126    int intx_set_mask;
 127    int kvm_shadow_mem;
 128    bool kernel_irqchip_allowed;
 129    bool kernel_irqchip_required;
 130    OnOffAuto kernel_irqchip_split;
 131    bool sync_mmu;
 132    uint64_t manual_dirty_log_protect;
 133    /* The man page (and posix) say ioctl numbers are signed int, but
 134     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
 135     * unsigned, and treating them as signed here can break things */
 136    unsigned irq_set_ioctl;
 137    unsigned int sigmask_len;
 138    GHashTable *gsimap;
 139#ifdef KVM_CAP_IRQ_ROUTING
 140    struct kvm_irq_routing *irq_routes;
 141    int nr_allocated_irq_routes;
 142    unsigned long *used_gsi_bitmap;
 143    unsigned int gsi_count;
 144    QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 145#endif
 146    KVMMemoryListener memory_listener;
 147    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 148
 149    /* For "info mtree -f" to tell if an MR is registered in KVM */
 150    int nr_as;
 151    struct KVMAs {
 152        KVMMemoryListener *ml;
 153        AddressSpace *as;
 154    } *as;
 155    uint64_t kvm_dirty_ring_bytes;  /* Size of the per-vcpu dirty ring */
 156    uint32_t kvm_dirty_ring_size;   /* Number of dirty GFNs per ring */
 157    struct KVMDirtyRingReaper reaper;
 158};
 159
 160KVMState *kvm_state;
 161bool kvm_kernel_irqchip;
 162bool kvm_split_irqchip;
 163bool kvm_async_interrupts_allowed;
 164bool kvm_halt_in_kernel_allowed;
 165bool kvm_eventfds_allowed;
 166bool kvm_irqfds_allowed;
 167bool kvm_resamplefds_allowed;
 168bool kvm_msi_via_irqfd_allowed;
 169bool kvm_gsi_routing_allowed;
 170bool kvm_gsi_direct_mapping;
 171bool kvm_allowed;
 172bool kvm_readonly_mem_allowed;
 173bool kvm_vm_attributes_allowed;
 174bool kvm_direct_msi_allowed;
 175bool kvm_ioeventfd_any_length_allowed;
 176bool kvm_msi_use_devid;
 177bool kvm_has_guest_debug;
 178int kvm_sstep_flags;
 179static bool kvm_immediate_exit;
 180static hwaddr kvm_max_slot_size = ~0;
 181
 182static const KVMCapabilityInfo kvm_required_capabilites[] = {
 183    KVM_CAP_INFO(USER_MEMORY),
 184    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 185    KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
 186    KVM_CAP_LAST_INFO
 187};
 188
 189static NotifierList kvm_irqchip_change_notifiers =
 190    NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
 191
 192struct KVMResampleFd {
 193    int gsi;
 194    EventNotifier *resample_event;
 195    QLIST_ENTRY(KVMResampleFd) node;
 196};
 197typedef struct KVMResampleFd KVMResampleFd;
 198
 199/*
 200 * Only used with split irqchip where we need to do the resample fd
 201 * kick for the kernel from userspace.
 202 */
 203static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
 204    QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
 205
 206static QemuMutex kml_slots_lock;
 207
 208#define kvm_slots_lock()    qemu_mutex_lock(&kml_slots_lock)
 209#define kvm_slots_unlock()  qemu_mutex_unlock(&kml_slots_lock)
 210
 211static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
 212
 213static inline void kvm_resample_fd_remove(int gsi)
 214{
 215    KVMResampleFd *rfd;
 216
 217    QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
 218        if (rfd->gsi == gsi) {
 219            QLIST_REMOVE(rfd, node);
 220            g_free(rfd);
 221            break;
 222        }
 223    }
 224}
 225
 226static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
 227{
 228    KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
 229
 230    rfd->gsi = gsi;
 231    rfd->resample_event = event;
 232
 233    QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
 234}
 235
 236void kvm_resample_fd_notify(int gsi)
 237{
 238    KVMResampleFd *rfd;
 239
 240    QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
 241        if (rfd->gsi == gsi) {
 242            event_notifier_set(rfd->resample_event);
 243            trace_kvm_resample_fd_notify(gsi);
 244            return;
 245        }
 246    }
 247}
 248
 249int kvm_get_max_memslots(void)
 250{
 251    KVMState *s = KVM_STATE(current_accel());
 252
 253    return s->nr_slots;
 254}
 255
 256/* Called with KVMMemoryListener.slots_lock held */
 257static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 258{
 259    KVMState *s = kvm_state;
 260    int i;
 261
 262    for (i = 0; i < s->nr_slots; i++) {
 263        if (kml->slots[i].memory_size == 0) {
 264            return &kml->slots[i];
 265        }
 266    }
 267
 268    return NULL;
 269}
 270
 271bool kvm_has_free_slot(MachineState *ms)
 272{
 273    KVMState *s = KVM_STATE(ms->accelerator);
 274    bool result;
 275    KVMMemoryListener *kml = &s->memory_listener;
 276
 277    kvm_slots_lock();
 278    result = !!kvm_get_free_slot(kml);
 279    kvm_slots_unlock();
 280
 281    return result;
 282}
 283
 284/* Called with KVMMemoryListener.slots_lock held */
 285static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 286{
 287    KVMSlot *slot = kvm_get_free_slot(kml);
 288
 289    if (slot) {
 290        return slot;
 291    }
 292
 293    fprintf(stderr, "%s: no free slot available\n", __func__);
 294    abort();
 295}
 296
 297static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 298                                         hwaddr start_addr,
 299                                         hwaddr size)
 300{
 301    KVMState *s = kvm_state;
 302    int i;
 303
 304    for (i = 0; i < s->nr_slots; i++) {
 305        KVMSlot *mem = &kml->slots[i];
 306
 307        if (start_addr == mem->start_addr && size == mem->memory_size) {
 308            return mem;
 309        }
 310    }
 311
 312    return NULL;
 313}
 314
 315/*
 316 * Calculate and align the start address and the size of the section.
 317 * Return the size. If the size is 0, the aligned section is empty.
 318 */
 319static hwaddr kvm_align_section(MemoryRegionSection *section,
 320                                hwaddr *start)
 321{
 322    hwaddr size = int128_get64(section->size);
 323    hwaddr delta, aligned;
 324
 325    /* kvm works in page size chunks, but the function may be called
 326       with sub-page size and unaligned start address. Pad the start
 327       address to next and truncate size to previous page boundary. */
 328    aligned = ROUND_UP(section->offset_within_address_space,
 329                       qemu_real_host_page_size());
 330    delta = aligned - section->offset_within_address_space;
 331    *start = aligned;
 332    if (delta > size) {
 333        return 0;
 334    }
 335
 336    return (size - delta) & qemu_real_host_page_mask();
 337}
 338
 339int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 340                                       hwaddr *phys_addr)
 341{
 342    KVMMemoryListener *kml = &s->memory_listener;
 343    int i, ret = 0;
 344
 345    kvm_slots_lock();
 346    for (i = 0; i < s->nr_slots; i++) {
 347        KVMSlot *mem = &kml->slots[i];
 348
 349        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 350            *phys_addr = mem->start_addr + (ram - mem->ram);
 351            ret = 1;
 352            break;
 353        }
 354    }
 355    kvm_slots_unlock();
 356
 357    return ret;
 358}
 359
 360static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
 361{
 362    KVMState *s = kvm_state;
 363    struct kvm_userspace_memory_region mem;
 364    int ret;
 365
 366    mem.slot = slot->slot | (kml->as_id << 16);
 367    mem.guest_phys_addr = slot->start_addr;
 368    mem.userspace_addr = (unsigned long)slot->ram;
 369    mem.flags = slot->flags;
 370
 371    if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
 372        /* Set the slot size to 0 before setting the slot to the desired
 373         * value. This is needed based on KVM commit 75d61fbc. */
 374        mem.memory_size = 0;
 375        ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 376        if (ret < 0) {
 377            goto err;
 378        }
 379    }
 380    mem.memory_size = slot->memory_size;
 381    ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 382    slot->old_flags = mem.flags;
 383err:
 384    trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
 385                              mem.memory_size, mem.userspace_addr, ret);
 386    if (ret < 0) {
 387        error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
 388                     " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
 389                     __func__, mem.slot, slot->start_addr,
 390                     (uint64_t)mem.memory_size, strerror(errno));
 391    }
 392    return ret;
 393}
 394
 395static int do_kvm_destroy_vcpu(CPUState *cpu)
 396{
 397    KVMState *s = kvm_state;
 398    long mmap_size;
 399    struct KVMParkedVcpu *vcpu = NULL;
 400    int ret = 0;
 401
 402    DPRINTF("kvm_destroy_vcpu\n");
 403
 404    ret = kvm_arch_destroy_vcpu(cpu);
 405    if (ret < 0) {
 406        goto err;
 407    }
 408
 409    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 410    if (mmap_size < 0) {
 411        ret = mmap_size;
 412        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 413        goto err;
 414    }
 415
 416    ret = munmap(cpu->kvm_run, mmap_size);
 417    if (ret < 0) {
 418        goto err;
 419    }
 420
 421    if (cpu->kvm_dirty_gfns) {
 422        ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes);
 423        if (ret < 0) {
 424            goto err;
 425        }
 426    }
 427
 428    vcpu = g_malloc0(sizeof(*vcpu));
 429    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 430    vcpu->kvm_fd = cpu->kvm_fd;
 431    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 432err:
 433    return ret;
 434}
 435
 436void kvm_destroy_vcpu(CPUState *cpu)
 437{
 438    if (do_kvm_destroy_vcpu(cpu) < 0) {
 439        error_report("kvm_destroy_vcpu failed");
 440        exit(EXIT_FAILURE);
 441    }
 442}
 443
 444static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 445{
 446    struct KVMParkedVcpu *cpu;
 447
 448    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 449        if (cpu->vcpu_id == vcpu_id) {
 450            int kvm_fd;
 451
 452            QLIST_REMOVE(cpu, node);
 453            kvm_fd = cpu->kvm_fd;
 454            g_free(cpu);
 455            return kvm_fd;
 456        }
 457    }
 458
 459    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 460}
 461
 462int kvm_init_vcpu(CPUState *cpu, Error **errp)
 463{
 464    KVMState *s = kvm_state;
 465    long mmap_size;
 466    int ret;
 467
 468    trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
 469
 470    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 471    if (ret < 0) {
 472        error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu failed (%lu)",
 473                         kvm_arch_vcpu_id(cpu));
 474        goto err;
 475    }
 476
 477    cpu->kvm_fd = ret;
 478    cpu->kvm_state = s;
 479    cpu->vcpu_dirty = true;
 480    cpu->dirty_pages = 0;
 481    cpu->throttle_us_per_full = 0;
 482
 483    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 484    if (mmap_size < 0) {
 485        ret = mmap_size;
 486        error_setg_errno(errp, -mmap_size,
 487                         "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed");
 488        goto err;
 489    }
 490
 491    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 492                        cpu->kvm_fd, 0);
 493    if (cpu->kvm_run == MAP_FAILED) {
 494        ret = -errno;
 495        error_setg_errno(errp, ret,
 496                         "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)",
 497                         kvm_arch_vcpu_id(cpu));
 498        goto err;
 499    }
 500
 501    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 502        s->coalesced_mmio_ring =
 503            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 504    }
 505
 506    if (s->kvm_dirty_ring_size) {
 507        /* Use MAP_SHARED to share pages with the kernel */
 508        cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes,
 509                                   PROT_READ | PROT_WRITE, MAP_SHARED,
 510                                   cpu->kvm_fd,
 511                                   PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET);
 512        if (cpu->kvm_dirty_gfns == MAP_FAILED) {
 513            ret = -errno;
 514            DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret);
 515            goto err;
 516        }
 517    }
 518
 519    ret = kvm_arch_init_vcpu(cpu);
 520    if (ret < 0) {
 521        error_setg_errno(errp, -ret,
 522                         "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)",
 523                         kvm_arch_vcpu_id(cpu));
 524    }
 525err:
 526    return ret;
 527}
 528
 529/*
 530 * dirty pages logging control
 531 */
 532
 533static int kvm_mem_flags(MemoryRegion *mr)
 534{
 535    bool readonly = mr->readonly || memory_region_is_romd(mr);
 536    int flags = 0;
 537
 538    if (memory_region_get_dirty_log_mask(mr) != 0) {
 539        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 540    }
 541    if (readonly && kvm_readonly_mem_allowed) {
 542        flags |= KVM_MEM_READONLY;
 543    }
 544    return flags;
 545}
 546
 547/* Called with KVMMemoryListener.slots_lock held */
 548static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 549                                 MemoryRegion *mr)
 550{
 551    mem->flags = kvm_mem_flags(mr);
 552
 553    /* If nothing changed effectively, no need to issue ioctl */
 554    if (mem->flags == mem->old_flags) {
 555        return 0;
 556    }
 557
 558    kvm_slot_init_dirty_bitmap(mem);
 559    return kvm_set_user_memory_region(kml, mem, false);
 560}
 561
 562static int kvm_section_update_flags(KVMMemoryListener *kml,
 563                                    MemoryRegionSection *section)
 564{
 565    hwaddr start_addr, size, slot_size;
 566    KVMSlot *mem;
 567    int ret = 0;
 568
 569    size = kvm_align_section(section, &start_addr);
 570    if (!size) {
 571        return 0;
 572    }
 573
 574    kvm_slots_lock();
 575
 576    while (size && !ret) {
 577        slot_size = MIN(kvm_max_slot_size, size);
 578        mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
 579        if (!mem) {
 580            /* We don't have a slot if we want to trap every access. */
 581            goto out;
 582        }
 583
 584        ret = kvm_slot_update_flags(kml, mem, section->mr);
 585        start_addr += slot_size;
 586        size -= slot_size;
 587    }
 588
 589out:
 590    kvm_slots_unlock();
 591    return ret;
 592}
 593
 594static void kvm_log_start(MemoryListener *listener,
 595                          MemoryRegionSection *section,
 596                          int old, int new)
 597{
 598    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 599    int r;
 600
 601    if (old != 0) {
 602        return;
 603    }
 604
 605    r = kvm_section_update_flags(kml, section);
 606    if (r < 0) {
 607        abort();
 608    }
 609}
 610
 611static void kvm_log_stop(MemoryListener *listener,
 612                          MemoryRegionSection *section,
 613                          int old, int new)
 614{
 615    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 616    int r;
 617
 618    if (new != 0) {
 619        return;
 620    }
 621
 622    r = kvm_section_update_flags(kml, section);
 623    if (r < 0) {
 624        abort();
 625    }
 626}
 627
 628/* get kvm's dirty pages bitmap and update qemu's */
 629static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
 630{
 631    ram_addr_t start = slot->ram_start_offset;
 632    ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
 633
 634    cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
 635}
 636
 637static void kvm_slot_reset_dirty_pages(KVMSlot *slot)
 638{
 639    memset(slot->dirty_bmap, 0, slot->dirty_bmap_size);
 640}
 641
 642#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 643
 644/* Allocate the dirty bitmap for a slot  */
 645static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
 646{
 647    if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
 648        return;
 649    }
 650
 651    /*
 652     * XXX bad kernel interface alert
 653     * For dirty bitmap, kernel allocates array of size aligned to
 654     * bits-per-long.  But for case when the kernel is 64bits and
 655     * the userspace is 32bits, userspace can't align to the same
 656     * bits-per-long, since sizeof(long) is different between kernel
 657     * and user space.  This way, userspace will provide buffer which
 658     * may be 4 bytes less than the kernel will use, resulting in
 659     * userspace memory corruption (which is not detectable by valgrind
 660     * too, in most cases).
 661     * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 662     * a hope that sizeof(long) won't become >8 any time soon.
 663     *
 664     * Note: the granule of kvm dirty log is qemu_real_host_page_size.
 665     * And mem->memory_size is aligned to it (otherwise this mem can't
 666     * be registered to KVM).
 667     */
 668    hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(),
 669                                        /*HOST_LONG_BITS*/ 64) / 8;
 670    mem->dirty_bmap = g_malloc0(bitmap_size);
 671    mem->dirty_bmap_size = bitmap_size;
 672}
 673
 674/*
 675 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if
 676 * succeeded, false otherwise
 677 */
 678static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot)
 679{
 680    struct kvm_dirty_log d = {};
 681    int ret;
 682
 683    d.dirty_bitmap = slot->dirty_bmap;
 684    d.slot = slot->slot | (slot->as_id << 16);
 685    ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
 686
 687    if (ret == -ENOENT) {
 688        /* kernel does not have dirty bitmap in this slot */
 689        ret = 0;
 690    }
 691    if (ret) {
 692        error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d",
 693                          __func__, ret);
 694    }
 695    return ret == 0;
 696}
 697
 698/* Should be with all slots_lock held for the address spaces. */
 699static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id,
 700                                     uint32_t slot_id, uint64_t offset)
 701{
 702    KVMMemoryListener *kml;
 703    KVMSlot *mem;
 704
 705    if (as_id >= s->nr_as) {
 706        return;
 707    }
 708
 709    kml = s->as[as_id].ml;
 710    mem = &kml->slots[slot_id];
 711
 712    if (!mem->memory_size || offset >=
 713        (mem->memory_size / qemu_real_host_page_size())) {
 714        return;
 715    }
 716
 717    set_bit(offset, mem->dirty_bmap);
 718}
 719
 720static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
 721{
 722    return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
 723}
 724
 725static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
 726{
 727    gfn->flags = KVM_DIRTY_GFN_F_RESET;
 728}
 729
 730/*
 731 * Should be with all slots_lock held for the address spaces.  It returns the
 732 * dirty page we've collected on this dirty ring.
 733 */
 734static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu)
 735{
 736    struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur;
 737    uint32_t ring_size = s->kvm_dirty_ring_size;
 738    uint32_t count = 0, fetch = cpu->kvm_fetch_index;
 739
 740    assert(dirty_gfns && ring_size);
 741    trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index);
 742
 743    while (true) {
 744        cur = &dirty_gfns[fetch % ring_size];
 745        if (!dirty_gfn_is_dirtied(cur)) {
 746            break;
 747        }
 748        kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff,
 749                                 cur->offset);
 750        dirty_gfn_set_collected(cur);
 751        trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset);
 752        fetch++;
 753        count++;
 754    }
 755    cpu->kvm_fetch_index = fetch;
 756    cpu->dirty_pages += count;
 757
 758    return count;
 759}
 760
 761/* Must be with slots_lock held */
 762static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu)
 763{
 764    int ret;
 765    uint64_t total = 0;
 766    int64_t stamp;
 767
 768    stamp = get_clock();
 769
 770    if (cpu) {
 771        total = kvm_dirty_ring_reap_one(s, cpu);
 772    } else {
 773        CPU_FOREACH(cpu) {
 774            total += kvm_dirty_ring_reap_one(s, cpu);
 775        }
 776    }
 777
 778    if (total) {
 779        ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS);
 780        assert(ret == total);
 781    }
 782
 783    stamp = get_clock() - stamp;
 784
 785    if (total) {
 786        trace_kvm_dirty_ring_reap(total, stamp / 1000);
 787    }
 788
 789    return total;
 790}
 791
 792/*
 793 * Currently for simplicity, we must hold BQL before calling this.  We can
 794 * consider to drop the BQL if we're clear with all the race conditions.
 795 */
 796static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu)
 797{
 798    uint64_t total;
 799
 800    /*
 801     * We need to lock all kvm slots for all address spaces here,
 802     * because:
 803     *
 804     * (1) We need to mark dirty for dirty bitmaps in multiple slots
 805     *     and for tons of pages, so it's better to take the lock here
 806     *     once rather than once per page.  And more importantly,
 807     *
 808     * (2) We must _NOT_ publish dirty bits to the other threads
 809     *     (e.g., the migration thread) via the kvm memory slot dirty
 810     *     bitmaps before correctly re-protect those dirtied pages.
 811     *     Otherwise we can have potential risk of data corruption if
 812     *     the page data is read in the other thread before we do
 813     *     reset below.
 814     */
 815    kvm_slots_lock();
 816    total = kvm_dirty_ring_reap_locked(s, cpu);
 817    kvm_slots_unlock();
 818
 819    return total;
 820}
 821
 822static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg)
 823{
 824    /* No need to do anything */
 825}
 826
 827/*
 828 * Kick all vcpus out in a synchronized way.  When returned, we
 829 * guarantee that every vcpu has been kicked and at least returned to
 830 * userspace once.
 831 */
 832static void kvm_cpu_synchronize_kick_all(void)
 833{
 834    CPUState *cpu;
 835
 836    CPU_FOREACH(cpu) {
 837        run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL);
 838    }
 839}
 840
 841/*
 842 * Flush all the existing dirty pages to the KVM slot buffers.  When
 843 * this call returns, we guarantee that all the touched dirty pages
 844 * before calling this function have been put into the per-kvmslot
 845 * dirty bitmap.
 846 *
 847 * This function must be called with BQL held.
 848 */
 849static void kvm_dirty_ring_flush(void)
 850{
 851    trace_kvm_dirty_ring_flush(0);
 852    /*
 853     * The function needs to be serialized.  Since this function
 854     * should always be with BQL held, serialization is guaranteed.
 855     * However, let's be sure of it.
 856     */
 857    assert(qemu_mutex_iothread_locked());
 858    /*
 859     * First make sure to flush the hardware buffers by kicking all
 860     * vcpus out in a synchronous way.
 861     */
 862    kvm_cpu_synchronize_kick_all();
 863    kvm_dirty_ring_reap(kvm_state, NULL);
 864    trace_kvm_dirty_ring_flush(1);
 865}
 866
 867/**
 868 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
 869 *
 870 * This function will first try to fetch dirty bitmap from the kernel,
 871 * and then updates qemu's dirty bitmap.
 872 *
 873 * NOTE: caller must be with kml->slots_lock held.
 874 *
 875 * @kml: the KVM memory listener object
 876 * @section: the memory section to sync the dirty bitmap with
 877 */
 878static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 879                                           MemoryRegionSection *section)
 880{
 881    KVMState *s = kvm_state;
 882    KVMSlot *mem;
 883    hwaddr start_addr, size;
 884    hwaddr slot_size;
 885
 886    size = kvm_align_section(section, &start_addr);
 887    while (size) {
 888        slot_size = MIN(kvm_max_slot_size, size);
 889        mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
 890        if (!mem) {
 891            /* We don't have a slot if we want to trap every access. */
 892            return;
 893        }
 894        if (kvm_slot_get_dirty_log(s, mem)) {
 895            kvm_slot_sync_dirty_pages(mem);
 896        }
 897        start_addr += slot_size;
 898        size -= slot_size;
 899    }
 900}
 901
 902/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
 903#define KVM_CLEAR_LOG_SHIFT  6
 904#define KVM_CLEAR_LOG_ALIGN  (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT)
 905#define KVM_CLEAR_LOG_MASK   (-KVM_CLEAR_LOG_ALIGN)
 906
 907static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
 908                                  uint64_t size)
 909{
 910    KVMState *s = kvm_state;
 911    uint64_t end, bmap_start, start_delta, bmap_npages;
 912    struct kvm_clear_dirty_log d;
 913    unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size();
 914    int ret;
 915
 916    /*
 917     * We need to extend either the start or the size or both to
 918     * satisfy the KVM interface requirement.  Firstly, do the start
 919     * page alignment on 64 host pages
 920     */
 921    bmap_start = start & KVM_CLEAR_LOG_MASK;
 922    start_delta = start - bmap_start;
 923    bmap_start /= psize;
 924
 925    /*
 926     * The kernel interface has restriction on the size too, that either:
 927     *
 928     * (1) the size is 64 host pages aligned (just like the start), or
 929     * (2) the size fills up until the end of the KVM memslot.
 930     */
 931    bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
 932        << KVM_CLEAR_LOG_SHIFT;
 933    end = mem->memory_size / psize;
 934    if (bmap_npages > end - bmap_start) {
 935        bmap_npages = end - bmap_start;
 936    }
 937    start_delta /= psize;
 938
 939    /*
 940     * Prepare the bitmap to clear dirty bits.  Here we must guarantee
 941     * that we won't clear any unknown dirty bits otherwise we might
 942     * accidentally clear some set bits which are not yet synced from
 943     * the kernel into QEMU's bitmap, then we'll lose track of the
 944     * guest modifications upon those pages (which can directly lead
 945     * to guest data loss or panic after migration).
 946     *
 947     * Layout of the KVMSlot.dirty_bmap:
 948     *
 949     *                   |<-------- bmap_npages -----------..>|
 950     *                                                     [1]
 951     *                     start_delta         size
 952     *  |----------------|-------------|------------------|------------|
 953     *  ^                ^             ^                               ^
 954     *  |                |             |                               |
 955     * start          bmap_start     (start)                         end
 956     * of memslot                                             of memslot
 957     *
 958     * [1] bmap_npages can be aligned to either 64 pages or the end of slot
 959     */
 960
 961    assert(bmap_start % BITS_PER_LONG == 0);
 962    /* We should never do log_clear before log_sync */
 963    assert(mem->dirty_bmap);
 964    if (start_delta || bmap_npages - size / psize) {
 965        /* Slow path - we need to manipulate a temp bitmap */
 966        bmap_clear = bitmap_new(bmap_npages);
 967        bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
 968                                    bmap_start, start_delta + size / psize);
 969        /*
 970         * We need to fill the holes at start because that was not
 971         * specified by the caller and we extended the bitmap only for
 972         * 64 pages alignment
 973         */
 974        bitmap_clear(bmap_clear, 0, start_delta);
 975        d.dirty_bitmap = bmap_clear;
 976    } else {
 977        /*
 978         * Fast path - both start and size align well with BITS_PER_LONG
 979         * (or the end of memory slot)
 980         */
 981        d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
 982    }
 983
 984    d.first_page = bmap_start;
 985    /* It should never overflow.  If it happens, say something */
 986    assert(bmap_npages <= UINT32_MAX);
 987    d.num_pages = bmap_npages;
 988    d.slot = mem->slot | (as_id << 16);
 989
 990    ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d);
 991    if (ret < 0 && ret != -ENOENT) {
 992        error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
 993                     "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
 994                     __func__, d.slot, (uint64_t)d.first_page,
 995                     (uint32_t)d.num_pages, ret);
 996    } else {
 997        ret = 0;
 998        trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
 999    }
1000
1001    /*
1002     * After we have updated the remote dirty bitmap, we update the
1003     * cached bitmap as well for the memslot, then if another user
1004     * clears the same region we know we shouldn't clear it again on
1005     * the remote otherwise it's data loss as well.
1006     */
1007    bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
1008                 size / psize);
1009    /* This handles the NULL case well */
1010    g_free(bmap_clear);
1011    return ret;
1012}
1013
1014
1015/**
1016 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1017 *
1018 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1019 * protection in the host kernel because in that case this operation
1020 * will be done within log_sync().
1021 *
1022 * @kml:     the kvm memory listener
1023 * @section: the memory range to clear dirty bitmap
1024 */
1025static int kvm_physical_log_clear(KVMMemoryListener *kml,
1026                                  MemoryRegionSection *section)
1027{
1028    KVMState *s = kvm_state;
1029    uint64_t start, size, offset, count;
1030    KVMSlot *mem;
1031    int ret = 0, i;
1032
1033    if (!s->manual_dirty_log_protect) {
1034        /* No need to do explicit clear */
1035        return ret;
1036    }
1037
1038    start = section->offset_within_address_space;
1039    size = int128_get64(section->size);
1040
1041    if (!size) {
1042        /* Nothing more we can do... */
1043        return ret;
1044    }
1045
1046    kvm_slots_lock();
1047
1048    for (i = 0; i < s->nr_slots; i++) {
1049        mem = &kml->slots[i];
1050        /* Discard slots that are empty or do not overlap the section */
1051        if (!mem->memory_size ||
1052            mem->start_addr > start + size - 1 ||
1053            start > mem->start_addr + mem->memory_size - 1) {
1054            continue;
1055        }
1056
1057        if (start >= mem->start_addr) {
1058            /* The slot starts before section or is aligned to it.  */
1059            offset = start - mem->start_addr;
1060            count = MIN(mem->memory_size - offset, size);
1061        } else {
1062            /* The slot starts after section.  */
1063            offset = 0;
1064            count = MIN(mem->memory_size, size - (mem->start_addr - start));
1065        }
1066        ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
1067        if (ret < 0) {
1068            break;
1069        }
1070    }
1071
1072    kvm_slots_unlock();
1073
1074    return ret;
1075}
1076
1077static void kvm_coalesce_mmio_region(MemoryListener *listener,
1078                                     MemoryRegionSection *secion,
1079                                     hwaddr start, hwaddr size)
1080{
1081    KVMState *s = kvm_state;
1082
1083    if (s->coalesced_mmio) {
1084        struct kvm_coalesced_mmio_zone zone;
1085
1086        zone.addr = start;
1087        zone.size = size;
1088        zone.pad = 0;
1089
1090        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1091    }
1092}
1093
1094static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
1095                                       MemoryRegionSection *secion,
1096                                       hwaddr start, hwaddr size)
1097{
1098    KVMState *s = kvm_state;
1099
1100    if (s->coalesced_mmio) {
1101        struct kvm_coalesced_mmio_zone zone;
1102
1103        zone.addr = start;
1104        zone.size = size;
1105        zone.pad = 0;
1106
1107        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1108    }
1109}
1110
1111static void kvm_coalesce_pio_add(MemoryListener *listener,
1112                                MemoryRegionSection *section,
1113                                hwaddr start, hwaddr size)
1114{
1115    KVMState *s = kvm_state;
1116
1117    if (s->coalesced_pio) {
1118        struct kvm_coalesced_mmio_zone zone;
1119
1120        zone.addr = start;
1121        zone.size = size;
1122        zone.pio = 1;
1123
1124        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
1125    }
1126}
1127
1128static void kvm_coalesce_pio_del(MemoryListener *listener,
1129                                MemoryRegionSection *section,
1130                                hwaddr start, hwaddr size)
1131{
1132    KVMState *s = kvm_state;
1133
1134    if (s->coalesced_pio) {
1135        struct kvm_coalesced_mmio_zone zone;
1136
1137        zone.addr = start;
1138        zone.size = size;
1139        zone.pio = 1;
1140
1141        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
1142     }
1143}
1144
1145static MemoryListener kvm_coalesced_pio_listener = {
1146    .name = "kvm-coalesced-pio",
1147    .coalesced_io_add = kvm_coalesce_pio_add,
1148    .coalesced_io_del = kvm_coalesce_pio_del,
1149};
1150
1151int kvm_check_extension(KVMState *s, unsigned int extension)
1152{
1153    int ret;
1154
1155    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1156    if (ret < 0) {
1157        ret = 0;
1158    }
1159
1160    return ret;
1161}
1162
1163int kvm_vm_check_extension(KVMState *s, unsigned int extension)
1164{
1165    int ret;
1166
1167    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
1168    if (ret < 0) {
1169        /* VM wide version not implemented, use global one instead */
1170        ret = kvm_check_extension(s, extension);
1171    }
1172
1173    return ret;
1174}
1175
1176typedef struct HWPoisonPage {
1177    ram_addr_t ram_addr;
1178    QLIST_ENTRY(HWPoisonPage) list;
1179} HWPoisonPage;
1180
1181static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
1182    QLIST_HEAD_INITIALIZER(hwpoison_page_list);
1183
1184static void kvm_unpoison_all(void *param)
1185{
1186    HWPoisonPage *page, *next_page;
1187
1188    QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
1189        QLIST_REMOVE(page, list);
1190        qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
1191        g_free(page);
1192    }
1193}
1194
1195void kvm_hwpoison_page_add(ram_addr_t ram_addr)
1196{
1197    HWPoisonPage *page;
1198
1199    QLIST_FOREACH(page, &hwpoison_page_list, list) {
1200        if (page->ram_addr == ram_addr) {
1201            return;
1202        }
1203    }
1204    page = g_new(HWPoisonPage, 1);
1205    page->ram_addr = ram_addr;
1206    QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
1207}
1208
1209static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
1210{
1211#if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
1212    /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN
1213     * endianness, but the memory core hands them in target endianness.
1214     * For example, PPC is always treated as big-endian even if running
1215     * on KVM and on PPC64LE.  Correct here.
1216     */
1217    switch (size) {
1218    case 2:
1219        val = bswap16(val);
1220        break;
1221    case 4:
1222        val = bswap32(val);
1223        break;
1224    }
1225#endif
1226    return val;
1227}
1228
1229static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
1230                                  bool assign, uint32_t size, bool datamatch)
1231{
1232    int ret;
1233    struct kvm_ioeventfd iofd = {
1234        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1235        .addr = addr,
1236        .len = size,
1237        .flags = 0,
1238        .fd = fd,
1239    };
1240
1241    trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1242                                 datamatch);
1243    if (!kvm_enabled()) {
1244        return -ENOSYS;
1245    }
1246
1247    if (datamatch) {
1248        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1249    }
1250    if (!assign) {
1251        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1252    }
1253
1254    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1255
1256    if (ret < 0) {
1257        return -errno;
1258    }
1259
1260    return 0;
1261}
1262
1263static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1264                                 bool assign, uint32_t size, bool datamatch)
1265{
1266    struct kvm_ioeventfd kick = {
1267        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1268        .addr = addr,
1269        .flags = KVM_IOEVENTFD_FLAG_PIO,
1270        .len = size,
1271        .fd = fd,
1272    };
1273    int r;
1274    trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1275    if (!kvm_enabled()) {
1276        return -ENOSYS;
1277    }
1278    if (datamatch) {
1279        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1280    }
1281    if (!assign) {
1282        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1283    }
1284    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1285    if (r < 0) {
1286        return r;
1287    }
1288    return 0;
1289}
1290
1291
1292static int kvm_check_many_ioeventfds(void)
1293{
1294    /* Userspace can use ioeventfd for io notification.  This requires a host
1295     * that supports eventfd(2) and an I/O thread; since eventfd does not
1296     * support SIGIO it cannot interrupt the vcpu.
1297     *
1298     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
1299     * can avoid creating too many ioeventfds.
1300     */
1301#if defined(CONFIG_EVENTFD)
1302    int ioeventfds[7];
1303    int i, ret = 0;
1304    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
1305        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
1306        if (ioeventfds[i] < 0) {
1307            break;
1308        }
1309        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
1310        if (ret < 0) {
1311            close(ioeventfds[i]);
1312            break;
1313        }
1314    }
1315
1316    /* Decide whether many devices are supported or not */
1317    ret = i == ARRAY_SIZE(ioeventfds);
1318
1319    while (i-- > 0) {
1320        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
1321        close(ioeventfds[i]);
1322    }
1323    return ret;
1324#else
1325    return 0;
1326#endif
1327}
1328
1329static const KVMCapabilityInfo *
1330kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1331{
1332    while (list->name) {
1333        if (!kvm_check_extension(s, list->value)) {
1334            return list;
1335        }
1336        list++;
1337    }
1338    return NULL;
1339}
1340
1341void kvm_set_max_memslot_size(hwaddr max_slot_size)
1342{
1343    g_assert(
1344        ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size
1345    );
1346    kvm_max_slot_size = max_slot_size;
1347}
1348
1349static void kvm_set_phys_mem(KVMMemoryListener *kml,
1350                             MemoryRegionSection *section, bool add)
1351{
1352    KVMSlot *mem;
1353    int err;
1354    MemoryRegion *mr = section->mr;
1355    bool writable = !mr->readonly && !mr->rom_device;
1356    hwaddr start_addr, size, slot_size, mr_offset;
1357    ram_addr_t ram_start_offset;
1358    void *ram;
1359
1360    if (!memory_region_is_ram(mr)) {
1361        if (writable || !kvm_readonly_mem_allowed) {
1362            return;
1363        } else if (!mr->romd_mode) {
1364            /* If the memory device is not in romd_mode, then we actually want
1365             * to remove the kvm memory slot so all accesses will trap. */
1366            add = false;
1367        }
1368    }
1369
1370    size = kvm_align_section(section, &start_addr);
1371    if (!size) {
1372        return;
1373    }
1374
1375    /* The offset of the kvmslot within the memory region */
1376    mr_offset = section->offset_within_region + start_addr -
1377        section->offset_within_address_space;
1378
1379    /* use aligned delta to align the ram address and offset */
1380    ram = memory_region_get_ram_ptr(mr) + mr_offset;
1381    ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset;
1382
1383    kvm_slots_lock();
1384
1385    if (!add) {
1386        do {
1387            slot_size = MIN(kvm_max_slot_size, size);
1388            mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1389            if (!mem) {
1390                goto out;
1391            }
1392            if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1393                /*
1394                 * NOTE: We should be aware of the fact that here we're only
1395                 * doing a best effort to sync dirty bits.  No matter whether
1396                 * we're using dirty log or dirty ring, we ignored two facts:
1397                 *
1398                 * (1) dirty bits can reside in hardware buffers (PML)
1399                 *
1400                 * (2) after we collected dirty bits here, pages can be dirtied
1401                 * again before we do the final KVM_SET_USER_MEMORY_REGION to
1402                 * remove the slot.
1403                 *
1404                 * Not easy.  Let's cross the fingers until it's fixed.
1405                 */
1406                if (kvm_state->kvm_dirty_ring_size) {
1407                    kvm_dirty_ring_reap_locked(kvm_state, NULL);
1408                } else {
1409                    kvm_slot_get_dirty_log(kvm_state, mem);
1410                }
1411                kvm_slot_sync_dirty_pages(mem);
1412            }
1413
1414            /* unregister the slot */
1415            g_free(mem->dirty_bmap);
1416            mem->dirty_bmap = NULL;
1417            mem->memory_size = 0;
1418            mem->flags = 0;
1419            err = kvm_set_user_memory_region(kml, mem, false);
1420            if (err) {
1421                fprintf(stderr, "%s: error unregistering slot: %s\n",
1422                        __func__, strerror(-err));
1423                abort();
1424            }
1425            start_addr += slot_size;
1426            size -= slot_size;
1427        } while (size);
1428        goto out;
1429    }
1430
1431    /* register the new slot */
1432    do {
1433        slot_size = MIN(kvm_max_slot_size, size);
1434        mem = kvm_alloc_slot(kml);
1435        mem->as_id = kml->as_id;
1436        mem->memory_size = slot_size;
1437        mem->start_addr = start_addr;
1438        mem->ram_start_offset = ram_start_offset;
1439        mem->ram = ram;
1440        mem->flags = kvm_mem_flags(mr);
1441        kvm_slot_init_dirty_bitmap(mem);
1442        err = kvm_set_user_memory_region(kml, mem, true);
1443        if (err) {
1444            fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1445                    strerror(-err));
1446            abort();
1447        }
1448        start_addr += slot_size;
1449        ram_start_offset += slot_size;
1450        ram += slot_size;
1451        size -= slot_size;
1452    } while (size);
1453
1454out:
1455    kvm_slots_unlock();
1456}
1457
1458static void *kvm_dirty_ring_reaper_thread(void *data)
1459{
1460    KVMState *s = data;
1461    struct KVMDirtyRingReaper *r = &s->reaper;
1462
1463    rcu_register_thread();
1464
1465    trace_kvm_dirty_ring_reaper("init");
1466
1467    while (true) {
1468        r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT;
1469        trace_kvm_dirty_ring_reaper("wait");
1470        /*
1471         * TODO: provide a smarter timeout rather than a constant?
1472         */
1473        sleep(1);
1474
1475        /* keep sleeping so that dirtylimit not be interfered by reaper */
1476        if (dirtylimit_in_service()) {
1477            continue;
1478        }
1479
1480        trace_kvm_dirty_ring_reaper("wakeup");
1481        r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
1482
1483        qemu_mutex_lock_iothread();
1484        kvm_dirty_ring_reap(s, NULL);
1485        qemu_mutex_unlock_iothread();
1486
1487        r->reaper_iteration++;
1488    }
1489
1490    trace_kvm_dirty_ring_reaper("exit");
1491
1492    rcu_unregister_thread();
1493
1494    return NULL;
1495}
1496
1497static int kvm_dirty_ring_reaper_init(KVMState *s)
1498{
1499    struct KVMDirtyRingReaper *r = &s->reaper;
1500
1501    qemu_thread_create(&r->reaper_thr, "kvm-reaper",
1502                       kvm_dirty_ring_reaper_thread,
1503                       s, QEMU_THREAD_JOINABLE);
1504
1505    return 0;
1506}
1507
1508static void kvm_region_add(MemoryListener *listener,
1509                           MemoryRegionSection *section)
1510{
1511    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1512
1513    memory_region_ref(section->mr);
1514    kvm_set_phys_mem(kml, section, true);
1515}
1516
1517static void kvm_region_del(MemoryListener *listener,
1518                           MemoryRegionSection *section)
1519{
1520    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1521
1522    kvm_set_phys_mem(kml, section, false);
1523    memory_region_unref(section->mr);
1524}
1525
1526static void kvm_log_sync(MemoryListener *listener,
1527                         MemoryRegionSection *section)
1528{
1529    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1530
1531    kvm_slots_lock();
1532    kvm_physical_sync_dirty_bitmap(kml, section);
1533    kvm_slots_unlock();
1534}
1535
1536static void kvm_log_sync_global(MemoryListener *l)
1537{
1538    KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener);
1539    KVMState *s = kvm_state;
1540    KVMSlot *mem;
1541    int i;
1542
1543    /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */
1544    kvm_dirty_ring_flush();
1545
1546    /*
1547     * TODO: make this faster when nr_slots is big while there are
1548     * only a few used slots (small VMs).
1549     */
1550    kvm_slots_lock();
1551    for (i = 0; i < s->nr_slots; i++) {
1552        mem = &kml->slots[i];
1553        if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1554            kvm_slot_sync_dirty_pages(mem);
1555            /*
1556             * This is not needed by KVM_GET_DIRTY_LOG because the
1557             * ioctl will unconditionally overwrite the whole region.
1558             * However kvm dirty ring has no such side effect.
1559             */
1560            kvm_slot_reset_dirty_pages(mem);
1561        }
1562    }
1563    kvm_slots_unlock();
1564}
1565
1566static void kvm_log_clear(MemoryListener *listener,
1567                          MemoryRegionSection *section)
1568{
1569    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1570    int r;
1571
1572    r = kvm_physical_log_clear(kml, section);
1573    if (r < 0) {
1574        error_report_once("%s: kvm log clear failed: mr=%s "
1575                          "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1576                          section->mr->name, section->offset_within_region,
1577                          int128_get64(section->size));
1578        abort();
1579    }
1580}
1581
1582static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1583                                  MemoryRegionSection *section,
1584                                  bool match_data, uint64_t data,
1585                                  EventNotifier *e)
1586{
1587    int fd = event_notifier_get_fd(e);
1588    int r;
1589
1590    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1591                               data, true, int128_get64(section->size),
1592                               match_data);
1593    if (r < 0) {
1594        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1595                __func__, strerror(-r), -r);
1596        abort();
1597    }
1598}
1599
1600static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1601                                  MemoryRegionSection *section,
1602                                  bool match_data, uint64_t data,
1603                                  EventNotifier *e)
1604{
1605    int fd = event_notifier_get_fd(e);
1606    int r;
1607
1608    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1609                               data, false, int128_get64(section->size),
1610                               match_data);
1611    if (r < 0) {
1612        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1613                __func__, strerror(-r), -r);
1614        abort();
1615    }
1616}
1617
1618static void kvm_io_ioeventfd_add(MemoryListener *listener,
1619                                 MemoryRegionSection *section,
1620                                 bool match_data, uint64_t data,
1621                                 EventNotifier *e)
1622{
1623    int fd = event_notifier_get_fd(e);
1624    int r;
1625
1626    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1627                              data, true, int128_get64(section->size),
1628                              match_data);
1629    if (r < 0) {
1630        fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1631                __func__, strerror(-r), -r);
1632        abort();
1633    }
1634}
1635
1636static void kvm_io_ioeventfd_del(MemoryListener *listener,
1637                                 MemoryRegionSection *section,
1638                                 bool match_data, uint64_t data,
1639                                 EventNotifier *e)
1640
1641{
1642    int fd = event_notifier_get_fd(e);
1643    int r;
1644
1645    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1646                              data, false, int128_get64(section->size),
1647                              match_data);
1648    if (r < 0) {
1649        fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1650                __func__, strerror(-r), -r);
1651        abort();
1652    }
1653}
1654
1655void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1656                                  AddressSpace *as, int as_id, const char *name)
1657{
1658    int i;
1659
1660    kml->slots = g_new0(KVMSlot, s->nr_slots);
1661    kml->as_id = as_id;
1662
1663    for (i = 0; i < s->nr_slots; i++) {
1664        kml->slots[i].slot = i;
1665    }
1666
1667    kml->listener.region_add = kvm_region_add;
1668    kml->listener.region_del = kvm_region_del;
1669    kml->listener.log_start = kvm_log_start;
1670    kml->listener.log_stop = kvm_log_stop;
1671    kml->listener.priority = 10;
1672    kml->listener.name = name;
1673
1674    if (s->kvm_dirty_ring_size) {
1675        kml->listener.log_sync_global = kvm_log_sync_global;
1676    } else {
1677        kml->listener.log_sync = kvm_log_sync;
1678        kml->listener.log_clear = kvm_log_clear;
1679    }
1680
1681    memory_listener_register(&kml->listener, as);
1682
1683    for (i = 0; i < s->nr_as; ++i) {
1684        if (!s->as[i].as) {
1685            s->as[i].as = as;
1686            s->as[i].ml = kml;
1687            break;
1688        }
1689    }
1690}
1691
1692static MemoryListener kvm_io_listener = {
1693    .name = "kvm-io",
1694    .eventfd_add = kvm_io_ioeventfd_add,
1695    .eventfd_del = kvm_io_ioeventfd_del,
1696    .priority = 10,
1697};
1698
1699int kvm_set_irq(KVMState *s, int irq, int level)
1700{
1701    struct kvm_irq_level event;
1702    int ret;
1703
1704    assert(kvm_async_interrupts_enabled());
1705
1706    event.level = level;
1707    event.irq = irq;
1708    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1709    if (ret < 0) {
1710        perror("kvm_set_irq");
1711        abort();
1712    }
1713
1714    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1715}
1716
1717#ifdef KVM_CAP_IRQ_ROUTING
1718typedef struct KVMMSIRoute {
1719    struct kvm_irq_routing_entry kroute;
1720    QTAILQ_ENTRY(KVMMSIRoute) entry;
1721} KVMMSIRoute;
1722
1723static void set_gsi(KVMState *s, unsigned int gsi)
1724{
1725    set_bit(gsi, s->used_gsi_bitmap);
1726}
1727
1728static void clear_gsi(KVMState *s, unsigned int gsi)
1729{
1730    clear_bit(gsi, s->used_gsi_bitmap);
1731}
1732
1733void kvm_init_irq_routing(KVMState *s)
1734{
1735    int gsi_count, i;
1736
1737    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1738    if (gsi_count > 0) {
1739        /* Round up so we can search ints using ffs */
1740        s->used_gsi_bitmap = bitmap_new(gsi_count);
1741        s->gsi_count = gsi_count;
1742    }
1743
1744    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1745    s->nr_allocated_irq_routes = 0;
1746
1747    if (!kvm_direct_msi_allowed) {
1748        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1749            QTAILQ_INIT(&s->msi_hashtab[i]);
1750        }
1751    }
1752
1753    kvm_arch_init_irq_routing(s);
1754}
1755
1756void kvm_irqchip_commit_routes(KVMState *s)
1757{
1758    int ret;
1759
1760    if (kvm_gsi_direct_mapping()) {
1761        return;
1762    }
1763
1764    if (!kvm_gsi_routing_enabled()) {
1765        return;
1766    }
1767
1768    s->irq_routes->flags = 0;
1769    trace_kvm_irqchip_commit_routes();
1770    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1771    assert(ret == 0);
1772}
1773
1774static void kvm_add_routing_entry(KVMState *s,
1775                                  struct kvm_irq_routing_entry *entry)
1776{
1777    struct kvm_irq_routing_entry *new;
1778    int n, size;
1779
1780    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1781        n = s->nr_allocated_irq_routes * 2;
1782        if (n < 64) {
1783            n = 64;
1784        }
1785        size = sizeof(struct kvm_irq_routing);
1786        size += n * sizeof(*new);
1787        s->irq_routes = g_realloc(s->irq_routes, size);
1788        s->nr_allocated_irq_routes = n;
1789    }
1790    n = s->irq_routes->nr++;
1791    new = &s->irq_routes->entries[n];
1792
1793    *new = *entry;
1794
1795    set_gsi(s, entry->gsi);
1796}
1797
1798static int kvm_update_routing_entry(KVMState *s,
1799                                    struct kvm_irq_routing_entry *new_entry)
1800{
1801    struct kvm_irq_routing_entry *entry;
1802    int n;
1803
1804    for (n = 0; n < s->irq_routes->nr; n++) {
1805        entry = &s->irq_routes->entries[n];
1806        if (entry->gsi != new_entry->gsi) {
1807            continue;
1808        }
1809
1810        if(!memcmp(entry, new_entry, sizeof *entry)) {
1811            return 0;
1812        }
1813
1814        *entry = *new_entry;
1815
1816        return 0;
1817    }
1818
1819    return -ESRCH;
1820}
1821
1822void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1823{
1824    struct kvm_irq_routing_entry e = {};
1825
1826    assert(pin < s->gsi_count);
1827
1828    e.gsi = irq;
1829    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1830    e.flags = 0;
1831    e.u.irqchip.irqchip = irqchip;
1832    e.u.irqchip.pin = pin;
1833    kvm_add_routing_entry(s, &e);
1834}
1835
1836void kvm_irqchip_release_virq(KVMState *s, int virq)
1837{
1838    struct kvm_irq_routing_entry *e;
1839    int i;
1840
1841    if (kvm_gsi_direct_mapping()) {
1842        return;
1843    }
1844
1845    for (i = 0; i < s->irq_routes->nr; i++) {
1846        e = &s->irq_routes->entries[i];
1847        if (e->gsi == virq) {
1848            s->irq_routes->nr--;
1849            *e = s->irq_routes->entries[s->irq_routes->nr];
1850        }
1851    }
1852    clear_gsi(s, virq);
1853    kvm_arch_release_virq_post(virq);
1854    trace_kvm_irqchip_release_virq(virq);
1855}
1856
1857void kvm_irqchip_add_change_notifier(Notifier *n)
1858{
1859    notifier_list_add(&kvm_irqchip_change_notifiers, n);
1860}
1861
1862void kvm_irqchip_remove_change_notifier(Notifier *n)
1863{
1864    notifier_remove(n);
1865}
1866
1867void kvm_irqchip_change_notify(void)
1868{
1869    notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1870}
1871
1872static unsigned int kvm_hash_msi(uint32_t data)
1873{
1874    /* This is optimized for IA32 MSI layout. However, no other arch shall
1875     * repeat the mistake of not providing a direct MSI injection API. */
1876    return data & 0xff;
1877}
1878
1879static void kvm_flush_dynamic_msi_routes(KVMState *s)
1880{
1881    KVMMSIRoute *route, *next;
1882    unsigned int hash;
1883
1884    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1885        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1886            kvm_irqchip_release_virq(s, route->kroute.gsi);
1887            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1888            g_free(route);
1889        }
1890    }
1891}
1892
1893static int kvm_irqchip_get_virq(KVMState *s)
1894{
1895    int next_virq;
1896
1897    /*
1898     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1899     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1900     * number can succeed even though a new route entry cannot be added.
1901     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1902     */
1903    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1904        kvm_flush_dynamic_msi_routes(s);
1905    }
1906
1907    /* Return the lowest unused GSI in the bitmap */
1908    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1909    if (next_virq >= s->gsi_count) {
1910        return -ENOSPC;
1911    } else {
1912        return next_virq;
1913    }
1914}
1915
1916static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1917{
1918    unsigned int hash = kvm_hash_msi(msg.data);
1919    KVMMSIRoute *route;
1920
1921    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1922        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1923            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1924            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1925            return route;
1926        }
1927    }
1928    return NULL;
1929}
1930
1931int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1932{
1933    struct kvm_msi msi;
1934    KVMMSIRoute *route;
1935
1936    if (kvm_direct_msi_allowed) {
1937        msi.address_lo = (uint32_t)msg.address;
1938        msi.address_hi = msg.address >> 32;
1939        msi.data = le32_to_cpu(msg.data);
1940        msi.flags = 0;
1941        memset(msi.pad, 0, sizeof(msi.pad));
1942
1943        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1944    }
1945
1946    route = kvm_lookup_msi_route(s, msg);
1947    if (!route) {
1948        int virq;
1949
1950        virq = kvm_irqchip_get_virq(s);
1951        if (virq < 0) {
1952            return virq;
1953        }
1954
1955        route = g_new0(KVMMSIRoute, 1);
1956        route->kroute.gsi = virq;
1957        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1958        route->kroute.flags = 0;
1959        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1960        route->kroute.u.msi.address_hi = msg.address >> 32;
1961        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1962
1963        kvm_add_routing_entry(s, &route->kroute);
1964        kvm_irqchip_commit_routes(s);
1965
1966        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1967                           entry);
1968    }
1969
1970    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1971
1972    return kvm_set_irq(s, route->kroute.gsi, 1);
1973}
1974
1975int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
1976{
1977    struct kvm_irq_routing_entry kroute = {};
1978    int virq;
1979    KVMState *s = c->s;
1980    MSIMessage msg = {0, 0};
1981
1982    if (pci_available && dev) {
1983        msg = pci_get_msi_message(dev, vector);
1984    }
1985
1986    if (kvm_gsi_direct_mapping()) {
1987        return kvm_arch_msi_data_to_gsi(msg.data);
1988    }
1989
1990    if (!kvm_gsi_routing_enabled()) {
1991        return -ENOSYS;
1992    }
1993
1994    virq = kvm_irqchip_get_virq(s);
1995    if (virq < 0) {
1996        return virq;
1997    }
1998
1999    kroute.gsi = virq;
2000    kroute.type = KVM_IRQ_ROUTING_MSI;
2001    kroute.flags = 0;
2002    kroute.u.msi.address_lo = (uint32_t)msg.address;
2003    kroute.u.msi.address_hi = msg.address >> 32;
2004    kroute.u.msi.data = le32_to_cpu(msg.data);
2005    if (pci_available && kvm_msi_devid_required()) {
2006        kroute.flags = KVM_MSI_VALID_DEVID;
2007        kroute.u.msi.devid = pci_requester_id(dev);
2008    }
2009    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2010        kvm_irqchip_release_virq(s, virq);
2011        return -EINVAL;
2012    }
2013
2014    trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
2015                                    vector, virq);
2016
2017    kvm_add_routing_entry(s, &kroute);
2018    kvm_arch_add_msi_route_post(&kroute, vector, dev);
2019    c->changes++;
2020
2021    return virq;
2022}
2023
2024int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
2025                                 PCIDevice *dev)
2026{
2027    struct kvm_irq_routing_entry kroute = {};
2028
2029    if (kvm_gsi_direct_mapping()) {
2030        return 0;
2031    }
2032
2033    if (!kvm_irqchip_in_kernel()) {
2034        return -ENOSYS;
2035    }
2036
2037    kroute.gsi = virq;
2038    kroute.type = KVM_IRQ_ROUTING_MSI;
2039    kroute.flags = 0;
2040    kroute.u.msi.address_lo = (uint32_t)msg.address;
2041    kroute.u.msi.address_hi = msg.address >> 32;
2042    kroute.u.msi.data = le32_to_cpu(msg.data);
2043    if (pci_available && kvm_msi_devid_required()) {
2044        kroute.flags = KVM_MSI_VALID_DEVID;
2045        kroute.u.msi.devid = pci_requester_id(dev);
2046    }
2047    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
2048        return -EINVAL;
2049    }
2050
2051    trace_kvm_irqchip_update_msi_route(virq);
2052
2053    return kvm_update_routing_entry(s, &kroute);
2054}
2055
2056static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2057                                    EventNotifier *resample, int virq,
2058                                    bool assign)
2059{
2060    int fd = event_notifier_get_fd(event);
2061    int rfd = resample ? event_notifier_get_fd(resample) : -1;
2062
2063    struct kvm_irqfd irqfd = {
2064        .fd = fd,
2065        .gsi = virq,
2066        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
2067    };
2068
2069    if (rfd != -1) {
2070        assert(assign);
2071        if (kvm_irqchip_is_split()) {
2072            /*
2073             * When the slow irqchip (e.g. IOAPIC) is in the
2074             * userspace, KVM kernel resamplefd will not work because
2075             * the EOI of the interrupt will be delivered to userspace
2076             * instead, so the KVM kernel resamplefd kick will be
2077             * skipped.  The userspace here mimics what the kernel
2078             * provides with resamplefd, remember the resamplefd and
2079             * kick it when we receive EOI of this IRQ.
2080             *
2081             * This is hackery because IOAPIC is mostly bypassed
2082             * (except EOI broadcasts) when irqfd is used.  However
2083             * this can bring much performance back for split irqchip
2084             * with INTx IRQs (for VFIO, this gives 93% perf of the
2085             * full fast path, which is 46% perf boost comparing to
2086             * the INTx slow path).
2087             */
2088            kvm_resample_fd_insert(virq, resample);
2089        } else {
2090            irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
2091            irqfd.resamplefd = rfd;
2092        }
2093    } else if (!assign) {
2094        if (kvm_irqchip_is_split()) {
2095            kvm_resample_fd_remove(virq);
2096        }
2097    }
2098
2099    if (!kvm_irqfds_enabled()) {
2100        return -ENOSYS;
2101    }
2102
2103    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
2104}
2105
2106int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2107{
2108    struct kvm_irq_routing_entry kroute = {};
2109    int virq;
2110
2111    if (!kvm_gsi_routing_enabled()) {
2112        return -ENOSYS;
2113    }
2114
2115    virq = kvm_irqchip_get_virq(s);
2116    if (virq < 0) {
2117        return virq;
2118    }
2119
2120    kroute.gsi = virq;
2121    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
2122    kroute.flags = 0;
2123    kroute.u.adapter.summary_addr = adapter->summary_addr;
2124    kroute.u.adapter.ind_addr = adapter->ind_addr;
2125    kroute.u.adapter.summary_offset = adapter->summary_offset;
2126    kroute.u.adapter.ind_offset = adapter->ind_offset;
2127    kroute.u.adapter.adapter_id = adapter->adapter_id;
2128
2129    kvm_add_routing_entry(s, &kroute);
2130
2131    return virq;
2132}
2133
2134int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2135{
2136    struct kvm_irq_routing_entry kroute = {};
2137    int virq;
2138
2139    if (!kvm_gsi_routing_enabled()) {
2140        return -ENOSYS;
2141    }
2142    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
2143        return -ENOSYS;
2144    }
2145    virq = kvm_irqchip_get_virq(s);
2146    if (virq < 0) {
2147        return virq;
2148    }
2149
2150    kroute.gsi = virq;
2151    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
2152    kroute.flags = 0;
2153    kroute.u.hv_sint.vcpu = vcpu;
2154    kroute.u.hv_sint.sint = sint;
2155
2156    kvm_add_routing_entry(s, &kroute);
2157    kvm_irqchip_commit_routes(s);
2158
2159    return virq;
2160}
2161
2162#else /* !KVM_CAP_IRQ_ROUTING */
2163
2164void kvm_init_irq_routing(KVMState *s)
2165{
2166}
2167
2168void kvm_irqchip_release_virq(KVMState *s, int virq)
2169{
2170}
2171
2172int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
2173{
2174    abort();
2175}
2176
2177int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
2178{
2179    return -ENOSYS;
2180}
2181
2182int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
2183{
2184    return -ENOSYS;
2185}
2186
2187int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
2188{
2189    return -ENOSYS;
2190}
2191
2192static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
2193                                    EventNotifier *resample, int virq,
2194                                    bool assign)
2195{
2196    abort();
2197}
2198
2199int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
2200{
2201    return -ENOSYS;
2202}
2203#endif /* !KVM_CAP_IRQ_ROUTING */
2204
2205int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2206                                       EventNotifier *rn, int virq)
2207{
2208    return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
2209}
2210
2211int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
2212                                          int virq)
2213{
2214    return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
2215}
2216
2217int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
2218                                   EventNotifier *rn, qemu_irq irq)
2219{
2220    gpointer key, gsi;
2221    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2222
2223    if (!found) {
2224        return -ENXIO;
2225    }
2226    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
2227}
2228
2229int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
2230                                      qemu_irq irq)
2231{
2232    gpointer key, gsi;
2233    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
2234
2235    if (!found) {
2236        return -ENXIO;
2237    }
2238    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
2239}
2240
2241void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
2242{
2243    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
2244}
2245
2246static void kvm_irqchip_create(KVMState *s)
2247{
2248    int ret;
2249
2250    assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
2251    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
2252        ;
2253    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
2254        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
2255        if (ret < 0) {
2256            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
2257            exit(1);
2258        }
2259    } else {
2260        return;
2261    }
2262
2263    /* First probe and see if there's a arch-specific hook to create the
2264     * in-kernel irqchip for us */
2265    ret = kvm_arch_irqchip_create(s);
2266    if (ret == 0) {
2267        if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
2268            error_report("Split IRQ chip mode not supported.");
2269            exit(1);
2270        } else {
2271            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
2272        }
2273    }
2274    if (ret < 0) {
2275        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
2276        exit(1);
2277    }
2278
2279    kvm_kernel_irqchip = true;
2280    /* If we have an in-kernel IRQ chip then we must have asynchronous
2281     * interrupt delivery (though the reverse is not necessarily true)
2282     */
2283    kvm_async_interrupts_allowed = true;
2284    kvm_halt_in_kernel_allowed = true;
2285
2286    kvm_init_irq_routing(s);
2287
2288    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
2289}
2290
2291/* Find number of supported CPUs using the recommended
2292 * procedure from the kernel API documentation to cope with
2293 * older kernels that may be missing capabilities.
2294 */
2295static int kvm_recommended_vcpus(KVMState *s)
2296{
2297    int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
2298    return (ret) ? ret : 4;
2299}
2300
2301static int kvm_max_vcpus(KVMState *s)
2302{
2303    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
2304    return (ret) ? ret : kvm_recommended_vcpus(s);
2305}
2306
2307static int kvm_max_vcpu_id(KVMState *s)
2308{
2309    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
2310    return (ret) ? ret : kvm_max_vcpus(s);
2311}
2312
2313bool kvm_vcpu_id_is_valid(int vcpu_id)
2314{
2315    KVMState *s = KVM_STATE(current_accel());
2316    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
2317}
2318
2319bool kvm_dirty_ring_enabled(void)
2320{
2321    return kvm_state->kvm_dirty_ring_size ? true : false;
2322}
2323
2324static void query_stats_cb(StatsResultList **result, StatsTarget target,
2325                           strList *names, strList *targets, Error **errp);
2326static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp);
2327
2328uint32_t kvm_dirty_ring_size(void)
2329{
2330    return kvm_state->kvm_dirty_ring_size;
2331}
2332
2333static int kvm_init(MachineState *ms)
2334{
2335    MachineClass *mc = MACHINE_GET_CLASS(ms);
2336    static const char upgrade_note[] =
2337        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
2338        "(see http://sourceforge.net/projects/kvm).\n";
2339    struct {
2340        const char *name;
2341        int num;
2342    } num_cpus[] = {
2343        { "SMP",          ms->smp.cpus },
2344        { "hotpluggable", ms->smp.max_cpus },
2345        { NULL, }
2346    }, *nc = num_cpus;
2347    int soft_vcpus_limit, hard_vcpus_limit;
2348    KVMState *s;
2349    const KVMCapabilityInfo *missing_cap;
2350    int ret;
2351    int type = 0;
2352    uint64_t dirty_log_manual_caps;
2353
2354    qemu_mutex_init(&kml_slots_lock);
2355
2356    s = KVM_STATE(ms->accelerator);
2357
2358    /*
2359     * On systems where the kernel can support different base page
2360     * sizes, host page size may be different from TARGET_PAGE_SIZE,
2361     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
2362     * page size for the system though.
2363     */
2364    assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size());
2365
2366    s->sigmask_len = 8;
2367
2368#ifdef KVM_CAP_SET_GUEST_DEBUG
2369    QTAILQ_INIT(&s->kvm_sw_breakpoints);
2370#endif
2371    QLIST_INIT(&s->kvm_parked_vcpus);
2372    s->fd = qemu_open_old("/dev/kvm", O_RDWR);
2373    if (s->fd == -1) {
2374        fprintf(stderr, "Could not access KVM kernel module: %m\n");
2375        ret = -errno;
2376        goto err;
2377    }
2378
2379    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2380    if (ret < KVM_API_VERSION) {
2381        if (ret >= 0) {
2382            ret = -EINVAL;
2383        }
2384        fprintf(stderr, "kvm version too old\n");
2385        goto err;
2386    }
2387
2388    if (ret > KVM_API_VERSION) {
2389        ret = -EINVAL;
2390        fprintf(stderr, "kvm version not supported\n");
2391        goto err;
2392    }
2393
2394    kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2395    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2396
2397    /* If unspecified, use the default value */
2398    if (!s->nr_slots) {
2399        s->nr_slots = 32;
2400    }
2401
2402    s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2403    if (s->nr_as <= 1) {
2404        s->nr_as = 1;
2405    }
2406    s->as = g_new0(struct KVMAs, s->nr_as);
2407
2408    if (object_property_find(OBJECT(current_machine), "kvm-type")) {
2409        g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine),
2410                                                            "kvm-type",
2411                                                            &error_abort);
2412        type = mc->kvm_type(ms, kvm_type);
2413    } else if (mc->kvm_type) {
2414        type = mc->kvm_type(ms, NULL);
2415    }
2416
2417    do {
2418        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2419    } while (ret == -EINTR);
2420
2421    if (ret < 0) {
2422        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2423                strerror(-ret));
2424
2425#ifdef TARGET_S390X
2426        if (ret == -EINVAL) {
2427            fprintf(stderr,
2428                    "Host kernel setup problem detected. Please verify:\n");
2429            fprintf(stderr, "- for kernels supporting the switch_amode or"
2430                    " user_mode parameters, whether\n");
2431            fprintf(stderr,
2432                    "  user space is running in primary address space\n");
2433            fprintf(stderr,
2434                    "- for kernels supporting the vm.allocate_pgste sysctl, "
2435                    "whether it is enabled\n");
2436        }
2437#elif defined(TARGET_PPC)
2438        if (ret == -EINVAL) {
2439            fprintf(stderr,
2440                    "PPC KVM module is not loaded. Try modprobe kvm_%s.\n",
2441                    (type == 2) ? "pr" : "hv");
2442        }
2443#endif
2444        goto err;
2445    }
2446
2447    s->vmfd = ret;
2448
2449    /* check the vcpu limits */
2450    soft_vcpus_limit = kvm_recommended_vcpus(s);
2451    hard_vcpus_limit = kvm_max_vcpus(s);
2452
2453    while (nc->name) {
2454        if (nc->num > soft_vcpus_limit) {
2455            warn_report("Number of %s cpus requested (%d) exceeds "
2456                        "the recommended cpus supported by KVM (%d)",
2457                        nc->name, nc->num, soft_vcpus_limit);
2458
2459            if (nc->num > hard_vcpus_limit) {
2460                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2461                        "the maximum cpus supported by KVM (%d)\n",
2462                        nc->name, nc->num, hard_vcpus_limit);
2463                exit(1);
2464            }
2465        }
2466        nc++;
2467    }
2468
2469    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2470    if (!missing_cap) {
2471        missing_cap =
2472            kvm_check_extension_list(s, kvm_arch_required_capabilities);
2473    }
2474    if (missing_cap) {
2475        ret = -EINVAL;
2476        fprintf(stderr, "kvm does not support %s\n%s",
2477                missing_cap->name, upgrade_note);
2478        goto err;
2479    }
2480
2481    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2482    s->coalesced_pio = s->coalesced_mmio &&
2483                       kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2484
2485    /*
2486     * Enable KVM dirty ring if supported, otherwise fall back to
2487     * dirty logging mode
2488     */
2489    if (s->kvm_dirty_ring_size > 0) {
2490        uint64_t ring_bytes;
2491
2492        ring_bytes = s->kvm_dirty_ring_size * sizeof(struct kvm_dirty_gfn);
2493
2494        /* Read the max supported pages */
2495        ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING);
2496        if (ret > 0) {
2497            if (ring_bytes > ret) {
2498                error_report("KVM dirty ring size %" PRIu32 " too big "
2499                             "(maximum is %ld).  Please use a smaller value.",
2500                             s->kvm_dirty_ring_size,
2501                             (long)ret / sizeof(struct kvm_dirty_gfn));
2502                ret = -EINVAL;
2503                goto err;
2504            }
2505
2506            ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING, 0, ring_bytes);
2507            if (ret) {
2508                error_report("Enabling of KVM dirty ring failed: %s. "
2509                             "Suggested minimum value is 1024.", strerror(-ret));
2510                goto err;
2511            }
2512
2513            s->kvm_dirty_ring_bytes = ring_bytes;
2514         } else {
2515             warn_report("KVM dirty ring not available, using bitmap method");
2516             s->kvm_dirty_ring_size = 0;
2517        }
2518    }
2519
2520    /*
2521     * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is
2522     * enabled.  More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no
2523     * page is wr-protected initially, which is against how kvm dirty ring is
2524     * usage - kvm dirty ring requires all pages are wr-protected at the very
2525     * beginning.  Enabling this feature for dirty ring causes data corruption.
2526     *
2527     * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log,
2528     * we may expect a higher stall time when starting the migration.  In the
2529     * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too:
2530     * instead of clearing dirty bit, it can be a way to explicitly wr-protect
2531     * guest pages.
2532     */
2533    if (!s->kvm_dirty_ring_size) {
2534        dirty_log_manual_caps =
2535            kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2536        dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2537                                  KVM_DIRTY_LOG_INITIALLY_SET);
2538        s->manual_dirty_log_protect = dirty_log_manual_caps;
2539        if (dirty_log_manual_caps) {
2540            ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2541                                    dirty_log_manual_caps);
2542            if (ret) {
2543                warn_report("Trying to enable capability %"PRIu64" of "
2544                            "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2545                            "Falling back to the legacy mode. ",
2546                            dirty_log_manual_caps);
2547                s->manual_dirty_log_protect = 0;
2548            }
2549        }
2550    }
2551
2552#ifdef KVM_CAP_VCPU_EVENTS
2553    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2554#endif
2555
2556    s->robust_singlestep =
2557        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2558
2559#ifdef KVM_CAP_DEBUGREGS
2560    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2561#endif
2562
2563    s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2564
2565#ifdef KVM_CAP_IRQ_ROUTING
2566    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2567#endif
2568
2569    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2570
2571    s->irq_set_ioctl = KVM_IRQ_LINE;
2572    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2573        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2574    }
2575
2576    kvm_readonly_mem_allowed =
2577        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2578
2579    kvm_eventfds_allowed =
2580        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2581
2582    kvm_irqfds_allowed =
2583        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2584
2585    kvm_resamplefds_allowed =
2586        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2587
2588    kvm_vm_attributes_allowed =
2589        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2590
2591    kvm_ioeventfd_any_length_allowed =
2592        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2593
2594#ifdef KVM_CAP_SET_GUEST_DEBUG
2595    kvm_has_guest_debug =
2596        (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0);
2597#endif
2598
2599    kvm_sstep_flags = 0;
2600    if (kvm_has_guest_debug) {
2601        kvm_sstep_flags = SSTEP_ENABLE;
2602
2603#if defined KVM_CAP_SET_GUEST_DEBUG2
2604        int guest_debug_flags =
2605            kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2);
2606
2607        if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) {
2608            kvm_sstep_flags |= SSTEP_NOIRQ;
2609        }
2610#endif
2611    }
2612
2613    kvm_state = s;
2614
2615    ret = kvm_arch_init(ms, s);
2616    if (ret < 0) {
2617        goto err;
2618    }
2619
2620    if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2621        s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2622    }
2623
2624    qemu_register_reset(kvm_unpoison_all, NULL);
2625
2626    if (s->kernel_irqchip_allowed) {
2627        kvm_irqchip_create(s);
2628    }
2629
2630    if (kvm_eventfds_allowed) {
2631        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2632        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2633    }
2634    s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2635    s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2636
2637    kvm_memory_listener_register(s, &s->memory_listener,
2638                                 &address_space_memory, 0, "kvm-memory");
2639    if (kvm_eventfds_allowed) {
2640        memory_listener_register(&kvm_io_listener,
2641                                 &address_space_io);
2642    }
2643    memory_listener_register(&kvm_coalesced_pio_listener,
2644                             &address_space_io);
2645
2646    s->many_ioeventfds = kvm_check_many_ioeventfds();
2647
2648    s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2649    if (!s->sync_mmu) {
2650        ret = ram_block_discard_disable(true);
2651        assert(!ret);
2652    }
2653
2654    if (s->kvm_dirty_ring_size) {
2655        ret = kvm_dirty_ring_reaper_init(s);
2656        if (ret) {
2657            goto err;
2658        }
2659    }
2660
2661    if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) {
2662        add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb,
2663                            query_stats_schemas_cb);
2664    }
2665
2666    return 0;
2667
2668err:
2669    assert(ret < 0);
2670    if (s->vmfd >= 0) {
2671        close(s->vmfd);
2672    }
2673    if (s->fd != -1) {
2674        close(s->fd);
2675    }
2676    g_free(s->memory_listener.slots);
2677
2678    return ret;
2679}
2680
2681void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2682{
2683    s->sigmask_len = sigmask_len;
2684}
2685
2686static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2687                          int size, uint32_t count)
2688{
2689    int i;
2690    uint8_t *ptr = data;
2691
2692    for (i = 0; i < count; i++) {
2693        address_space_rw(&address_space_io, port, attrs,
2694                         ptr, size,
2695                         direction == KVM_EXIT_IO_OUT);
2696        ptr += size;
2697    }
2698}
2699
2700static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2701{
2702    fprintf(stderr, "KVM internal error. Suberror: %d\n",
2703            run->internal.suberror);
2704
2705    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2706        int i;
2707
2708        for (i = 0; i < run->internal.ndata; ++i) {
2709            fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n",
2710                    i, (uint64_t)run->internal.data[i]);
2711        }
2712    }
2713    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2714        fprintf(stderr, "emulation failure\n");
2715        if (!kvm_arch_stop_on_emulation_error(cpu)) {
2716            cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2717            return EXCP_INTERRUPT;
2718        }
2719    }
2720    /* FIXME: Should trigger a qmp message to let management know
2721     * something went wrong.
2722     */
2723    return -1;
2724}
2725
2726void kvm_flush_coalesced_mmio_buffer(void)
2727{
2728    KVMState *s = kvm_state;
2729
2730    if (s->coalesced_flush_in_progress) {
2731        return;
2732    }
2733
2734    s->coalesced_flush_in_progress = true;
2735
2736    if (s->coalesced_mmio_ring) {
2737        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2738        while (ring->first != ring->last) {
2739            struct kvm_coalesced_mmio *ent;
2740
2741            ent = &ring->coalesced_mmio[ring->first];
2742
2743            if (ent->pio == 1) {
2744                address_space_write(&address_space_io, ent->phys_addr,
2745                                    MEMTXATTRS_UNSPECIFIED, ent->data,
2746                                    ent->len);
2747            } else {
2748                cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2749            }
2750            smp_wmb();
2751            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2752        }
2753    }
2754
2755    s->coalesced_flush_in_progress = false;
2756}
2757
2758bool kvm_cpu_check_are_resettable(void)
2759{
2760    return kvm_arch_cpu_check_are_resettable();
2761}
2762
2763static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2764{
2765    if (!cpu->vcpu_dirty) {
2766        kvm_arch_get_registers(cpu);
2767        cpu->vcpu_dirty = true;
2768    }
2769}
2770
2771void kvm_cpu_synchronize_state(CPUState *cpu)
2772{
2773    if (!cpu->vcpu_dirty) {
2774        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2775    }
2776}
2777
2778static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2779{
2780    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2781    cpu->vcpu_dirty = false;
2782}
2783
2784void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2785{
2786    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2787}
2788
2789static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2790{
2791    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2792    cpu->vcpu_dirty = false;
2793}
2794
2795void kvm_cpu_synchronize_post_init(CPUState *cpu)
2796{
2797    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2798}
2799
2800static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2801{
2802    cpu->vcpu_dirty = true;
2803}
2804
2805void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2806{
2807    run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2808}
2809
2810#ifdef KVM_HAVE_MCE_INJECTION
2811static __thread void *pending_sigbus_addr;
2812static __thread int pending_sigbus_code;
2813static __thread bool have_sigbus_pending;
2814#endif
2815
2816static void kvm_cpu_kick(CPUState *cpu)
2817{
2818    qatomic_set(&cpu->kvm_run->immediate_exit, 1);
2819}
2820
2821static void kvm_cpu_kick_self(void)
2822{
2823    if (kvm_immediate_exit) {
2824        kvm_cpu_kick(current_cpu);
2825    } else {
2826        qemu_cpu_kick_self();
2827    }
2828}
2829
2830static void kvm_eat_signals(CPUState *cpu)
2831{
2832    struct timespec ts = { 0, 0 };
2833    siginfo_t siginfo;
2834    sigset_t waitset;
2835    sigset_t chkset;
2836    int r;
2837
2838    if (kvm_immediate_exit) {
2839        qatomic_set(&cpu->kvm_run->immediate_exit, 0);
2840        /* Write kvm_run->immediate_exit before the cpu->exit_request
2841         * write in kvm_cpu_exec.
2842         */
2843        smp_wmb();
2844        return;
2845    }
2846
2847    sigemptyset(&waitset);
2848    sigaddset(&waitset, SIG_IPI);
2849
2850    do {
2851        r = sigtimedwait(&waitset, &siginfo, &ts);
2852        if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2853            perror("sigtimedwait");
2854            exit(1);
2855        }
2856
2857        r = sigpending(&chkset);
2858        if (r == -1) {
2859            perror("sigpending");
2860            exit(1);
2861        }
2862    } while (sigismember(&chkset, SIG_IPI));
2863}
2864
2865int kvm_cpu_exec(CPUState *cpu)
2866{
2867    struct kvm_run *run = cpu->kvm_run;
2868    int ret, run_ret;
2869
2870    DPRINTF("kvm_cpu_exec()\n");
2871
2872    if (kvm_arch_process_async_events(cpu)) {
2873        qatomic_set(&cpu->exit_request, 0);
2874        return EXCP_HLT;
2875    }
2876
2877    qemu_mutex_unlock_iothread();
2878    cpu_exec_start(cpu);
2879
2880    do {
2881        MemTxAttrs attrs;
2882
2883        if (cpu->vcpu_dirty) {
2884            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2885            cpu->vcpu_dirty = false;
2886        }
2887
2888        kvm_arch_pre_run(cpu, run);
2889        if (qatomic_read(&cpu->exit_request)) {
2890            DPRINTF("interrupt exit requested\n");
2891            /*
2892             * KVM requires us to reenter the kernel after IO exits to complete
2893             * instruction emulation. This self-signal will ensure that we
2894             * leave ASAP again.
2895             */
2896            kvm_cpu_kick_self();
2897        }
2898
2899        /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2900         * Matching barrier in kvm_eat_signals.
2901         */
2902        smp_rmb();
2903
2904        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2905
2906        attrs = kvm_arch_post_run(cpu, run);
2907
2908#ifdef KVM_HAVE_MCE_INJECTION
2909        if (unlikely(have_sigbus_pending)) {
2910            qemu_mutex_lock_iothread();
2911            kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2912                                    pending_sigbus_addr);
2913            have_sigbus_pending = false;
2914            qemu_mutex_unlock_iothread();
2915        }
2916#endif
2917
2918        if (run_ret < 0) {
2919            if (run_ret == -EINTR || run_ret == -EAGAIN) {
2920                DPRINTF("io window exit\n");
2921                kvm_eat_signals(cpu);
2922                ret = EXCP_INTERRUPT;
2923                break;
2924            }
2925            fprintf(stderr, "error: kvm run failed %s\n",
2926                    strerror(-run_ret));
2927#ifdef TARGET_PPC
2928            if (run_ret == -EBUSY) {
2929                fprintf(stderr,
2930                        "This is probably because your SMT is enabled.\n"
2931                        "VCPU can only run on primary threads with all "
2932                        "secondary threads offline.\n");
2933            }
2934#endif
2935            ret = -1;
2936            break;
2937        }
2938
2939        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2940        switch (run->exit_reason) {
2941        case KVM_EXIT_IO:
2942            DPRINTF("handle_io\n");
2943            /* Called outside BQL */
2944            kvm_handle_io(run->io.port, attrs,
2945                          (uint8_t *)run + run->io.data_offset,
2946                          run->io.direction,
2947                          run->io.size,
2948                          run->io.count);
2949            ret = 0;
2950            break;
2951        case KVM_EXIT_MMIO:
2952            DPRINTF("handle_mmio\n");
2953            /* Called outside BQL */
2954            address_space_rw(&address_space_memory,
2955                             run->mmio.phys_addr, attrs,
2956                             run->mmio.data,
2957                             run->mmio.len,
2958                             run->mmio.is_write);
2959            ret = 0;
2960            break;
2961        case KVM_EXIT_IRQ_WINDOW_OPEN:
2962            DPRINTF("irq_window_open\n");
2963            ret = EXCP_INTERRUPT;
2964            break;
2965        case KVM_EXIT_SHUTDOWN:
2966            DPRINTF("shutdown\n");
2967            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2968            ret = EXCP_INTERRUPT;
2969            break;
2970        case KVM_EXIT_UNKNOWN:
2971            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2972                    (uint64_t)run->hw.hardware_exit_reason);
2973            ret = -1;
2974            break;
2975        case KVM_EXIT_INTERNAL_ERROR:
2976            ret = kvm_handle_internal_error(cpu, run);
2977            break;
2978        case KVM_EXIT_DIRTY_RING_FULL:
2979            /*
2980             * We shouldn't continue if the dirty ring of this vcpu is
2981             * still full.  Got kicked by KVM_RESET_DIRTY_RINGS.
2982             */
2983            trace_kvm_dirty_ring_full(cpu->cpu_index);
2984            qemu_mutex_lock_iothread();
2985            /*
2986             * We throttle vCPU by making it sleep once it exit from kernel
2987             * due to dirty ring full. In the dirtylimit scenario, reaping
2988             * all vCPUs after a single vCPU dirty ring get full result in
2989             * the miss of sleep, so just reap the ring-fulled vCPU.
2990             */
2991            if (dirtylimit_in_service()) {
2992                kvm_dirty_ring_reap(kvm_state, cpu);
2993            } else {
2994                kvm_dirty_ring_reap(kvm_state, NULL);
2995            }
2996            qemu_mutex_unlock_iothread();
2997            dirtylimit_vcpu_execute(cpu);
2998            ret = 0;
2999            break;
3000        case KVM_EXIT_SYSTEM_EVENT:
3001            switch (run->system_event.type) {
3002            case KVM_SYSTEM_EVENT_SHUTDOWN:
3003                qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
3004                ret = EXCP_INTERRUPT;
3005                break;
3006            case KVM_SYSTEM_EVENT_RESET:
3007                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
3008                ret = EXCP_INTERRUPT;
3009                break;
3010            case KVM_SYSTEM_EVENT_CRASH:
3011                kvm_cpu_synchronize_state(cpu);
3012                qemu_mutex_lock_iothread();
3013                qemu_system_guest_panicked(cpu_get_crash_info(cpu));
3014                qemu_mutex_unlock_iothread();
3015                ret = 0;
3016                break;
3017            default:
3018                DPRINTF("kvm_arch_handle_exit\n");
3019                ret = kvm_arch_handle_exit(cpu, run);
3020                break;
3021            }
3022            break;
3023        default:
3024            DPRINTF("kvm_arch_handle_exit\n");
3025            ret = kvm_arch_handle_exit(cpu, run);
3026            break;
3027        }
3028    } while (ret == 0);
3029
3030    cpu_exec_end(cpu);
3031    qemu_mutex_lock_iothread();
3032
3033    if (ret < 0) {
3034        cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
3035        vm_stop(RUN_STATE_INTERNAL_ERROR);
3036    }
3037
3038    qatomic_set(&cpu->exit_request, 0);
3039    return ret;
3040}
3041
3042int kvm_ioctl(KVMState *s, int type, ...)
3043{
3044    int ret;
3045    void *arg;
3046    va_list ap;
3047
3048    va_start(ap, type);
3049    arg = va_arg(ap, void *);
3050    va_end(ap);
3051
3052    trace_kvm_ioctl(type, arg);
3053    ret = ioctl(s->fd, type, arg);
3054    if (ret == -1) {
3055        ret = -errno;
3056    }
3057    return ret;
3058}
3059
3060int kvm_vm_ioctl(KVMState *s, int type, ...)
3061{
3062    int ret;
3063    void *arg;
3064    va_list ap;
3065
3066    va_start(ap, type);
3067    arg = va_arg(ap, void *);
3068    va_end(ap);
3069
3070    trace_kvm_vm_ioctl(type, arg);
3071    ret = ioctl(s->vmfd, type, arg);
3072    if (ret == -1) {
3073        ret = -errno;
3074    }
3075    return ret;
3076}
3077
3078int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
3079{
3080    int ret;
3081    void *arg;
3082    va_list ap;
3083
3084    va_start(ap, type);
3085    arg = va_arg(ap, void *);
3086    va_end(ap);
3087
3088    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
3089    ret = ioctl(cpu->kvm_fd, type, arg);
3090    if (ret == -1) {
3091        ret = -errno;
3092    }
3093    return ret;
3094}
3095
3096int kvm_device_ioctl(int fd, int type, ...)
3097{
3098    int ret;
3099    void *arg;
3100    va_list ap;
3101
3102    va_start(ap, type);
3103    arg = va_arg(ap, void *);
3104    va_end(ap);
3105
3106    trace_kvm_device_ioctl(fd, type, arg);
3107    ret = ioctl(fd, type, arg);
3108    if (ret == -1) {
3109        ret = -errno;
3110    }
3111    return ret;
3112}
3113
3114int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
3115{
3116    int ret;
3117    struct kvm_device_attr attribute = {
3118        .group = group,
3119        .attr = attr,
3120    };
3121
3122    if (!kvm_vm_attributes_allowed) {
3123        return 0;
3124    }
3125
3126    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
3127    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
3128    return ret ? 0 : 1;
3129}
3130
3131int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
3132{
3133    struct kvm_device_attr attribute = {
3134        .group = group,
3135        .attr = attr,
3136        .flags = 0,
3137    };
3138
3139    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
3140}
3141
3142int kvm_device_access(int fd, int group, uint64_t attr,
3143                      void *val, bool write, Error **errp)
3144{
3145    struct kvm_device_attr kvmattr;
3146    int err;
3147
3148    kvmattr.flags = 0;
3149    kvmattr.group = group;
3150    kvmattr.attr = attr;
3151    kvmattr.addr = (uintptr_t)val;
3152
3153    err = kvm_device_ioctl(fd,
3154                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
3155                           &kvmattr);
3156    if (err < 0) {
3157        error_setg_errno(errp, -err,
3158                         "KVM_%s_DEVICE_ATTR failed: Group %d "
3159                         "attr 0x%016" PRIx64,
3160                         write ? "SET" : "GET", group, attr);
3161    }
3162    return err;
3163}
3164
3165bool kvm_has_sync_mmu(void)
3166{
3167    return kvm_state->sync_mmu;
3168}
3169
3170int kvm_has_vcpu_events(void)
3171{
3172    return kvm_state->vcpu_events;
3173}
3174
3175int kvm_has_robust_singlestep(void)
3176{
3177    return kvm_state->robust_singlestep;
3178}
3179
3180int kvm_has_debugregs(void)
3181{
3182    return kvm_state->debugregs;
3183}
3184
3185int kvm_max_nested_state_length(void)
3186{
3187    return kvm_state->max_nested_state_len;
3188}
3189
3190int kvm_has_many_ioeventfds(void)
3191{
3192    if (!kvm_enabled()) {
3193        return 0;
3194    }
3195    return kvm_state->many_ioeventfds;
3196}
3197
3198int kvm_has_gsi_routing(void)
3199{
3200#ifdef KVM_CAP_IRQ_ROUTING
3201    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
3202#else
3203    return false;
3204#endif
3205}
3206
3207int kvm_has_intx_set_mask(void)
3208{
3209    return kvm_state->intx_set_mask;
3210}
3211
3212bool kvm_arm_supports_user_irq(void)
3213{
3214    return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
3215}
3216
3217#ifdef KVM_CAP_SET_GUEST_DEBUG
3218struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
3219                                                 target_ulong pc)
3220{
3221    struct kvm_sw_breakpoint *bp;
3222
3223    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
3224        if (bp->pc == pc) {
3225            return bp;
3226        }
3227    }
3228    return NULL;
3229}
3230
3231int kvm_sw_breakpoints_active(CPUState *cpu)
3232{
3233    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
3234}
3235
3236struct kvm_set_guest_debug_data {
3237    struct kvm_guest_debug dbg;
3238    int err;
3239};
3240
3241static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
3242{
3243    struct kvm_set_guest_debug_data *dbg_data =
3244        (struct kvm_set_guest_debug_data *) data.host_ptr;
3245
3246    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
3247                                   &dbg_data->dbg);
3248}
3249
3250int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3251{
3252    struct kvm_set_guest_debug_data data;
3253
3254    data.dbg.control = reinject_trap;
3255
3256    if (cpu->singlestep_enabled) {
3257        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
3258
3259        if (cpu->singlestep_enabled & SSTEP_NOIRQ) {
3260            data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ;
3261        }
3262    }
3263    kvm_arch_update_guest_debug(cpu, &data.dbg);
3264
3265    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
3266               RUN_ON_CPU_HOST_PTR(&data));
3267    return data.err;
3268}
3269
3270int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
3271                          target_ulong len, int type)
3272{
3273    struct kvm_sw_breakpoint *bp;
3274    int err;
3275
3276    if (type == GDB_BREAKPOINT_SW) {
3277        bp = kvm_find_sw_breakpoint(cpu, addr);
3278        if (bp) {
3279            bp->use_count++;
3280            return 0;
3281        }
3282
3283        bp = g_new(struct kvm_sw_breakpoint, 1);
3284        bp->pc = addr;
3285        bp->use_count = 1;
3286        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
3287        if (err) {
3288            g_free(bp);
3289            return err;
3290        }
3291
3292        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3293    } else {
3294        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
3295        if (err) {
3296            return err;
3297        }
3298    }
3299
3300    CPU_FOREACH(cpu) {
3301        err = kvm_update_guest_debug(cpu, 0);
3302        if (err) {
3303            return err;
3304        }
3305    }
3306    return 0;
3307}
3308
3309int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
3310                          target_ulong len, int type)
3311{
3312    struct kvm_sw_breakpoint *bp;
3313    int err;
3314
3315    if (type == GDB_BREAKPOINT_SW) {
3316        bp = kvm_find_sw_breakpoint(cpu, addr);
3317        if (!bp) {
3318            return -ENOENT;
3319        }
3320
3321        if (bp->use_count > 1) {
3322            bp->use_count--;
3323            return 0;
3324        }
3325
3326        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
3327        if (err) {
3328            return err;
3329        }
3330
3331        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
3332        g_free(bp);
3333    } else {
3334        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
3335        if (err) {
3336            return err;
3337        }
3338    }
3339
3340    CPU_FOREACH(cpu) {
3341        err = kvm_update_guest_debug(cpu, 0);
3342        if (err) {
3343            return err;
3344        }
3345    }
3346    return 0;
3347}
3348
3349void kvm_remove_all_breakpoints(CPUState *cpu)
3350{
3351    struct kvm_sw_breakpoint *bp, *next;
3352    KVMState *s = cpu->kvm_state;
3353    CPUState *tmpcpu;
3354
3355    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
3356        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
3357            /* Try harder to find a CPU that currently sees the breakpoint. */
3358            CPU_FOREACH(tmpcpu) {
3359                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
3360                    break;
3361                }
3362            }
3363        }
3364        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
3365        g_free(bp);
3366    }
3367    kvm_arch_remove_all_hw_breakpoints();
3368
3369    CPU_FOREACH(cpu) {
3370        kvm_update_guest_debug(cpu, 0);
3371    }
3372}
3373
3374#else /* !KVM_CAP_SET_GUEST_DEBUG */
3375
3376int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
3377{
3378    return -EINVAL;
3379}
3380
3381int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
3382                          target_ulong len, int type)
3383{
3384    return -EINVAL;
3385}
3386
3387int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
3388                          target_ulong len, int type)
3389{
3390    return -EINVAL;
3391}
3392
3393void kvm_remove_all_breakpoints(CPUState *cpu)
3394{
3395}
3396#endif /* !KVM_CAP_SET_GUEST_DEBUG */
3397
3398static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
3399{
3400    KVMState *s = kvm_state;
3401    struct kvm_signal_mask *sigmask;
3402    int r;
3403
3404    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
3405
3406    sigmask->len = s->sigmask_len;
3407    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
3408    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
3409    g_free(sigmask);
3410
3411    return r;
3412}
3413
3414static void kvm_ipi_signal(int sig)
3415{
3416    if (current_cpu) {
3417        assert(kvm_immediate_exit);
3418        kvm_cpu_kick(current_cpu);
3419    }
3420}
3421
3422void kvm_init_cpu_signals(CPUState *cpu)
3423{
3424    int r;
3425    sigset_t set;
3426    struct sigaction sigact;
3427
3428    memset(&sigact, 0, sizeof(sigact));
3429    sigact.sa_handler = kvm_ipi_signal;
3430    sigaction(SIG_IPI, &sigact, NULL);
3431
3432    pthread_sigmask(SIG_BLOCK, NULL, &set);
3433#if defined KVM_HAVE_MCE_INJECTION
3434    sigdelset(&set, SIGBUS);
3435    pthread_sigmask(SIG_SETMASK, &set, NULL);
3436#endif
3437    sigdelset(&set, SIG_IPI);
3438    if (kvm_immediate_exit) {
3439        r = pthread_sigmask(SIG_SETMASK, &set, NULL);
3440    } else {
3441        r = kvm_set_signal_mask(cpu, &set);
3442    }
3443    if (r) {
3444        fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
3445        exit(1);
3446    }
3447}
3448
3449/* Called asynchronously in VCPU thread.  */
3450int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
3451{
3452#ifdef KVM_HAVE_MCE_INJECTION
3453    if (have_sigbus_pending) {
3454        return 1;
3455    }
3456    have_sigbus_pending = true;
3457    pending_sigbus_addr = addr;
3458    pending_sigbus_code = code;
3459    qatomic_set(&cpu->exit_request, 1);
3460    return 0;
3461#else
3462    return 1;
3463#endif
3464}
3465
3466/* Called synchronously (via signalfd) in main thread.  */
3467int kvm_on_sigbus(int code, void *addr)
3468{
3469#ifdef KVM_HAVE_MCE_INJECTION
3470    /* Action required MCE kills the process if SIGBUS is blocked.  Because
3471     * that's what happens in the I/O thread, where we handle MCE via signalfd,
3472     * we can only get action optional here.
3473     */
3474    assert(code != BUS_MCEERR_AR);
3475    kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3476    return 0;
3477#else
3478    return 1;
3479#endif
3480}
3481
3482int kvm_create_device(KVMState *s, uint64_t type, bool test)
3483{
3484    int ret;
3485    struct kvm_create_device create_dev;
3486
3487    create_dev.type = type;
3488    create_dev.fd = -1;
3489    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3490
3491    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3492        return -ENOTSUP;
3493    }
3494
3495    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3496    if (ret) {
3497        return ret;
3498    }
3499
3500    return test ? 0 : create_dev.fd;
3501}
3502
3503bool kvm_device_supported(int vmfd, uint64_t type)
3504{
3505    struct kvm_create_device create_dev = {
3506        .type = type,
3507        .fd = -1,
3508        .flags = KVM_CREATE_DEVICE_TEST,
3509    };
3510
3511    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3512        return false;
3513    }
3514
3515    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3516}
3517
3518int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3519{
3520    struct kvm_one_reg reg;
3521    int r;
3522
3523    reg.id = id;
3524    reg.addr = (uintptr_t) source;
3525    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
3526    if (r) {
3527        trace_kvm_failed_reg_set(id, strerror(-r));
3528    }
3529    return r;
3530}
3531
3532int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3533{
3534    struct kvm_one_reg reg;
3535    int r;
3536
3537    reg.id = id;
3538    reg.addr = (uintptr_t) target;
3539    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
3540    if (r) {
3541        trace_kvm_failed_reg_get(id, strerror(-r));
3542    }
3543    return r;
3544}
3545
3546static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3547                                 hwaddr start_addr, hwaddr size)
3548{
3549    KVMState *kvm = KVM_STATE(ms->accelerator);
3550    int i;
3551
3552    for (i = 0; i < kvm->nr_as; ++i) {
3553        if (kvm->as[i].as == as && kvm->as[i].ml) {
3554            size = MIN(kvm_max_slot_size, size);
3555            return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3556                                                    start_addr, size);
3557        }
3558    }
3559
3560    return false;
3561}
3562
3563static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3564                                   const char *name, void *opaque,
3565                                   Error **errp)
3566{
3567    KVMState *s = KVM_STATE(obj);
3568    int64_t value = s->kvm_shadow_mem;
3569
3570    visit_type_int(v, name, &value, errp);
3571}
3572
3573static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3574                                   const char *name, void *opaque,
3575                                   Error **errp)
3576{
3577    KVMState *s = KVM_STATE(obj);
3578    int64_t value;
3579
3580    if (s->fd != -1) {
3581        error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3582        return;
3583    }
3584
3585    if (!visit_type_int(v, name, &value, errp)) {
3586        return;
3587    }
3588
3589    s->kvm_shadow_mem = value;
3590}
3591
3592static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3593                                   const char *name, void *opaque,
3594                                   Error **errp)
3595{
3596    KVMState *s = KVM_STATE(obj);
3597    OnOffSplit mode;
3598
3599    if (s->fd != -1) {
3600        error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3601        return;
3602    }
3603
3604    if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3605        return;
3606    }
3607    switch (mode) {
3608    case ON_OFF_SPLIT_ON:
3609        s->kernel_irqchip_allowed = true;
3610        s->kernel_irqchip_required = true;
3611        s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3612        break;
3613    case ON_OFF_SPLIT_OFF:
3614        s->kernel_irqchip_allowed = false;
3615        s->kernel_irqchip_required = false;
3616        s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3617        break;
3618    case ON_OFF_SPLIT_SPLIT:
3619        s->kernel_irqchip_allowed = true;
3620        s->kernel_irqchip_required = true;
3621        s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3622        break;
3623    default:
3624        /* The value was checked in visit_type_OnOffSplit() above. If
3625         * we get here, then something is wrong in QEMU.
3626         */
3627        abort();
3628    }
3629}
3630
3631bool kvm_kernel_irqchip_allowed(void)
3632{
3633    return kvm_state->kernel_irqchip_allowed;
3634}
3635
3636bool kvm_kernel_irqchip_required(void)
3637{
3638    return kvm_state->kernel_irqchip_required;
3639}
3640
3641bool kvm_kernel_irqchip_split(void)
3642{
3643    return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3644}
3645
3646static void kvm_get_dirty_ring_size(Object *obj, Visitor *v,
3647                                    const char *name, void *opaque,
3648                                    Error **errp)
3649{
3650    KVMState *s = KVM_STATE(obj);
3651    uint32_t value = s->kvm_dirty_ring_size;
3652
3653    visit_type_uint32(v, name, &value, errp);
3654}
3655
3656static void kvm_set_dirty_ring_size(Object *obj, Visitor *v,
3657                                    const char *name, void *opaque,
3658                                    Error **errp)
3659{
3660    KVMState *s = KVM_STATE(obj);
3661    Error *error = NULL;
3662    uint32_t value;
3663
3664    if (s->fd != -1) {
3665        error_setg(errp, "Cannot set properties after the accelerator has been initialized");
3666        return;
3667    }
3668
3669    visit_type_uint32(v, name, &value, &error);
3670    if (error) {
3671        error_propagate(errp, error);
3672        return;
3673    }
3674    if (value & (value - 1)) {
3675        error_setg(errp, "dirty-ring-size must be a power of two.");
3676        return;
3677    }
3678
3679    s->kvm_dirty_ring_size = value;
3680}
3681
3682static void kvm_accel_instance_init(Object *obj)
3683{
3684    KVMState *s = KVM_STATE(obj);
3685
3686    s->fd = -1;
3687    s->vmfd = -1;
3688    s->kvm_shadow_mem = -1;
3689    s->kernel_irqchip_allowed = true;
3690    s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3691    /* KVM dirty ring is by default off */
3692    s->kvm_dirty_ring_size = 0;
3693}
3694
3695static void kvm_accel_class_init(ObjectClass *oc, void *data)
3696{
3697    AccelClass *ac = ACCEL_CLASS(oc);
3698    ac->name = "KVM";
3699    ac->init_machine = kvm_init;
3700    ac->has_memory = kvm_accel_has_memory;
3701    ac->allowed = &kvm_allowed;
3702
3703    object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3704        NULL, kvm_set_kernel_irqchip,
3705        NULL, NULL);
3706    object_class_property_set_description(oc, "kernel-irqchip",
3707        "Configure KVM in-kernel irqchip");
3708
3709    object_class_property_add(oc, "kvm-shadow-mem", "int",
3710        kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3711        NULL, NULL);
3712    object_class_property_set_description(oc, "kvm-shadow-mem",
3713        "KVM shadow MMU size");
3714
3715    object_class_property_add(oc, "dirty-ring-size", "uint32",
3716        kvm_get_dirty_ring_size, kvm_set_dirty_ring_size,
3717        NULL, NULL);
3718    object_class_property_set_description(oc, "dirty-ring-size",
3719        "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)");
3720}
3721
3722static const TypeInfo kvm_accel_type = {
3723    .name = TYPE_KVM_ACCEL,
3724    .parent = TYPE_ACCEL,
3725    .instance_init = kvm_accel_instance_init,
3726    .class_init = kvm_accel_class_init,
3727    .instance_size = sizeof(KVMState),
3728};
3729
3730static void kvm_type_init(void)
3731{
3732    type_register_static(&kvm_accel_type);
3733}
3734
3735type_init(kvm_type_init);
3736
3737typedef struct StatsArgs {
3738    union StatsResultsType {
3739        StatsResultList **stats;
3740        StatsSchemaList **schema;
3741    } result;
3742    strList *names;
3743    Error **errp;
3744} StatsArgs;
3745
3746static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc,
3747                                    uint64_t *stats_data,
3748                                    StatsList *stats_list,
3749                                    Error **errp)
3750{
3751
3752    Stats *stats;
3753    uint64List *val_list = NULL;
3754
3755    /* Only add stats that we understand.  */
3756    switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3757    case KVM_STATS_TYPE_CUMULATIVE:
3758    case KVM_STATS_TYPE_INSTANT:
3759    case KVM_STATS_TYPE_PEAK:
3760    case KVM_STATS_TYPE_LINEAR_HIST:
3761    case KVM_STATS_TYPE_LOG_HIST:
3762        break;
3763    default:
3764        return stats_list;
3765    }
3766
3767    switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3768    case KVM_STATS_UNIT_NONE:
3769    case KVM_STATS_UNIT_BYTES:
3770    case KVM_STATS_UNIT_CYCLES:
3771    case KVM_STATS_UNIT_SECONDS:
3772    case KVM_STATS_UNIT_BOOLEAN:
3773        break;
3774    default:
3775        return stats_list;
3776    }
3777
3778    switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3779    case KVM_STATS_BASE_POW10:
3780    case KVM_STATS_BASE_POW2:
3781        break;
3782    default:
3783        return stats_list;
3784    }
3785
3786    /* Alloc and populate data list */
3787    stats = g_new0(Stats, 1);
3788    stats->name = g_strdup(pdesc->name);
3789    stats->value = g_new0(StatsValue, 1);;
3790
3791    if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) {
3792        stats->value->u.boolean = *stats_data;
3793        stats->value->type = QTYPE_QBOOL;
3794    } else if (pdesc->size == 1) {
3795        stats->value->u.scalar = *stats_data;
3796        stats->value->type = QTYPE_QNUM;
3797    } else {
3798        int i;
3799        for (i = 0; i < pdesc->size; i++) {
3800            QAPI_LIST_PREPEND(val_list, stats_data[i]);
3801        }
3802        stats->value->u.list = val_list;
3803        stats->value->type = QTYPE_QLIST;
3804    }
3805
3806    QAPI_LIST_PREPEND(stats_list, stats);
3807    return stats_list;
3808}
3809
3810static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc,
3811                                                 StatsSchemaValueList *list,
3812                                                 Error **errp)
3813{
3814    StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1);
3815    schema_entry->value = g_new0(StatsSchemaValue, 1);
3816
3817    switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
3818    case KVM_STATS_TYPE_CUMULATIVE:
3819        schema_entry->value->type = STATS_TYPE_CUMULATIVE;
3820        break;
3821    case KVM_STATS_TYPE_INSTANT:
3822        schema_entry->value->type = STATS_TYPE_INSTANT;
3823        break;
3824    case KVM_STATS_TYPE_PEAK:
3825        schema_entry->value->type = STATS_TYPE_PEAK;
3826        break;
3827    case KVM_STATS_TYPE_LINEAR_HIST:
3828        schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM;
3829        schema_entry->value->bucket_size = pdesc->bucket_size;
3830        schema_entry->value->has_bucket_size = true;
3831        break;
3832    case KVM_STATS_TYPE_LOG_HIST:
3833        schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM;
3834        break;
3835    default:
3836        goto exit;
3837    }
3838
3839    switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
3840    case KVM_STATS_UNIT_NONE:
3841        break;
3842    case KVM_STATS_UNIT_BOOLEAN:
3843        schema_entry->value->has_unit = true;
3844        schema_entry->value->unit = STATS_UNIT_BOOLEAN;
3845        break;
3846    case KVM_STATS_UNIT_BYTES:
3847        schema_entry->value->has_unit = true;
3848        schema_entry->value->unit = STATS_UNIT_BYTES;
3849        break;
3850    case KVM_STATS_UNIT_CYCLES:
3851        schema_entry->value->has_unit = true;
3852        schema_entry->value->unit = STATS_UNIT_CYCLES;
3853        break;
3854    case KVM_STATS_UNIT_SECONDS:
3855        schema_entry->value->has_unit = true;
3856        schema_entry->value->unit = STATS_UNIT_SECONDS;
3857        break;
3858    default:
3859        goto exit;
3860    }
3861
3862    schema_entry->value->exponent = pdesc->exponent;
3863    if (pdesc->exponent) {
3864        switch (pdesc->flags & KVM_STATS_BASE_MASK) {
3865        case KVM_STATS_BASE_POW10:
3866            schema_entry->value->has_base = true;
3867            schema_entry->value->base = 10;
3868            break;
3869        case KVM_STATS_BASE_POW2:
3870            schema_entry->value->has_base = true;
3871            schema_entry->value->base = 2;
3872            break;
3873        default:
3874            goto exit;
3875        }
3876    }
3877
3878    schema_entry->value->name = g_strdup(pdesc->name);
3879    schema_entry->next = list;
3880    return schema_entry;
3881exit:
3882    g_free(schema_entry->value);
3883    g_free(schema_entry);
3884    return list;
3885}
3886
3887/* Cached stats descriptors */
3888typedef struct StatsDescriptors {
3889    const char *ident; /* cache key, currently the StatsTarget */
3890    struct kvm_stats_desc *kvm_stats_desc;
3891    struct kvm_stats_header *kvm_stats_header;
3892    QTAILQ_ENTRY(StatsDescriptors) next;
3893} StatsDescriptors;
3894
3895static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors =
3896    QTAILQ_HEAD_INITIALIZER(stats_descriptors);
3897
3898/*
3899 * Return the descriptors for 'target', that either have already been read
3900 * or are retrieved from 'stats_fd'.
3901 */
3902static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd,
3903                                                Error **errp)
3904{
3905    StatsDescriptors *descriptors;
3906    const char *ident;
3907    struct kvm_stats_desc *kvm_stats_desc;
3908    struct kvm_stats_header *kvm_stats_header;
3909    size_t size_desc;
3910    ssize_t ret;
3911
3912    ident = StatsTarget_str(target);
3913    QTAILQ_FOREACH(descriptors, &stats_descriptors, next) {
3914        if (g_str_equal(descriptors->ident, ident)) {
3915            return descriptors;
3916        }
3917    }
3918
3919    descriptors = g_new0(StatsDescriptors, 1);
3920
3921    /* Read stats header */
3922    kvm_stats_header = g_malloc(sizeof(*kvm_stats_header));
3923    ret = read(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header));
3924    if (ret != sizeof(*kvm_stats_header)) {
3925        error_setg(errp, "KVM stats: failed to read stats header: "
3926                   "expected %zu actual %zu",
3927                   sizeof(*kvm_stats_header), ret);
3928        g_free(descriptors);
3929        return NULL;
3930    }
3931    size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3932
3933    /* Read stats descriptors */
3934    kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc);
3935    ret = pread(stats_fd, kvm_stats_desc,
3936                size_desc * kvm_stats_header->num_desc,
3937                kvm_stats_header->desc_offset);
3938
3939    if (ret != size_desc * kvm_stats_header->num_desc) {
3940        error_setg(errp, "KVM stats: failed to read stats descriptors: "
3941                   "expected %zu actual %zu",
3942                   size_desc * kvm_stats_header->num_desc, ret);
3943        g_free(descriptors);
3944        g_free(kvm_stats_desc);
3945        return NULL;
3946    }
3947    descriptors->kvm_stats_header = kvm_stats_header;
3948    descriptors->kvm_stats_desc = kvm_stats_desc;
3949    descriptors->ident = ident;
3950    QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next);
3951    return descriptors;
3952}
3953
3954static void query_stats(StatsResultList **result, StatsTarget target,
3955                        strList *names, int stats_fd, Error **errp)
3956{
3957    struct kvm_stats_desc *kvm_stats_desc;
3958    struct kvm_stats_header *kvm_stats_header;
3959    StatsDescriptors *descriptors;
3960    g_autofree uint64_t *stats_data = NULL;
3961    struct kvm_stats_desc *pdesc;
3962    StatsList *stats_list = NULL;
3963    size_t size_desc, size_data = 0;
3964    ssize_t ret;
3965    int i;
3966
3967    descriptors = find_stats_descriptors(target, stats_fd, errp);
3968    if (!descriptors) {
3969        return;
3970    }
3971
3972    kvm_stats_header = descriptors->kvm_stats_header;
3973    kvm_stats_desc = descriptors->kvm_stats_desc;
3974    size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
3975
3976    /* Tally the total data size; read schema data */
3977    for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3978        pdesc = (void *)kvm_stats_desc + i * size_desc;
3979        size_data += pdesc->size * sizeof(*stats_data);
3980    }
3981
3982    stats_data = g_malloc0(size_data);
3983    ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset);
3984
3985    if (ret != size_data) {
3986        error_setg(errp, "KVM stats: failed to read data: "
3987                   "expected %zu actual %zu", size_data, ret);
3988        return;
3989    }
3990
3991    for (i = 0; i < kvm_stats_header->num_desc; ++i) {
3992        uint64_t *stats;
3993        pdesc = (void *)kvm_stats_desc + i * size_desc;
3994
3995        /* Add entry to the list */
3996        stats = (void *)stats_data + pdesc->offset;
3997        if (!apply_str_list_filter(pdesc->name, names)) {
3998            continue;
3999        }
4000        stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp);
4001    }
4002
4003    if (!stats_list) {
4004        return;
4005    }
4006
4007    switch (target) {
4008    case STATS_TARGET_VM:
4009        add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list);
4010        break;
4011    case STATS_TARGET_VCPU:
4012        add_stats_entry(result, STATS_PROVIDER_KVM,
4013                        current_cpu->parent_obj.canonical_path,
4014                        stats_list);
4015        break;
4016    default:
4017        g_assert_not_reached();
4018    }
4019}
4020
4021static void query_stats_schema(StatsSchemaList **result, StatsTarget target,
4022                               int stats_fd, Error **errp)
4023{
4024    struct kvm_stats_desc *kvm_stats_desc;
4025    struct kvm_stats_header *kvm_stats_header;
4026    StatsDescriptors *descriptors;
4027    struct kvm_stats_desc *pdesc;
4028    StatsSchemaValueList *stats_list = NULL;
4029    size_t size_desc;
4030    int i;
4031
4032    descriptors = find_stats_descriptors(target, stats_fd, errp);
4033    if (!descriptors) {
4034        return;
4035    }
4036
4037    kvm_stats_header = descriptors->kvm_stats_header;
4038    kvm_stats_desc = descriptors->kvm_stats_desc;
4039    size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size;
4040
4041    /* Tally the total data size; read schema data */
4042    for (i = 0; i < kvm_stats_header->num_desc; ++i) {
4043        pdesc = (void *)kvm_stats_desc + i * size_desc;
4044        stats_list = add_kvmschema_entry(pdesc, stats_list, errp);
4045    }
4046
4047    add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list);
4048}
4049
4050static void query_stats_vcpu(CPUState *cpu, run_on_cpu_data data)
4051{
4052    StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
4053    int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
4054    Error *local_err = NULL;
4055
4056    if (stats_fd == -1) {
4057        error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4058        error_propagate(kvm_stats_args->errp, local_err);
4059        return;
4060    }
4061    query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU,
4062                kvm_stats_args->names, stats_fd, kvm_stats_args->errp);
4063    close(stats_fd);
4064}
4065
4066static void query_stats_schema_vcpu(CPUState *cpu, run_on_cpu_data data)
4067{
4068    StatsArgs *kvm_stats_args = (StatsArgs *) data.host_ptr;
4069    int stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL);
4070    Error *local_err = NULL;
4071
4072    if (stats_fd == -1) {
4073        error_setg_errno(&local_err, errno, "KVM stats: ioctl failed");
4074        error_propagate(kvm_stats_args->errp, local_err);
4075        return;
4076    }
4077    query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd,
4078                       kvm_stats_args->errp);
4079    close(stats_fd);
4080}
4081
4082static void query_stats_cb(StatsResultList **result, StatsTarget target,
4083                           strList *names, strList *targets, Error **errp)
4084{
4085    KVMState *s = kvm_state;
4086    CPUState *cpu;
4087    int stats_fd;
4088
4089    switch (target) {
4090    case STATS_TARGET_VM:
4091    {
4092        stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4093        if (stats_fd == -1) {
4094            error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4095            return;
4096        }
4097        query_stats(result, target, names, stats_fd, errp);
4098        close(stats_fd);
4099        break;
4100    }
4101    case STATS_TARGET_VCPU:
4102    {
4103        StatsArgs stats_args;
4104        stats_args.result.stats = result;
4105        stats_args.names = names;
4106        stats_args.errp = errp;
4107        CPU_FOREACH(cpu) {
4108            if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) {
4109                continue;
4110            }
4111            run_on_cpu(cpu, query_stats_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
4112        }
4113        break;
4114    }
4115    default:
4116        break;
4117    }
4118}
4119
4120void query_stats_schemas_cb(StatsSchemaList **result, Error **errp)
4121{
4122    StatsArgs stats_args;
4123    KVMState *s = kvm_state;
4124    int stats_fd;
4125
4126    stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL);
4127    if (stats_fd == -1) {
4128        error_setg_errno(errp, errno, "KVM stats: ioctl failed");
4129        return;
4130    }
4131    query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp);
4132    close(stats_fd);
4133
4134    if (first_cpu) {
4135        stats_args.result.schema = result;
4136        stats_args.errp = errp;
4137        run_on_cpu(first_cpu, query_stats_schema_vcpu, RUN_ON_CPU_HOST_PTR(&stats_args));
4138    }
4139}
4140