qemu/kvm-all.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *           Red Hat, Inc. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *  Glauber Costa     <gcosta@redhat.com>
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12 * See the COPYING file in the top-level directory.
  13 *
  14 */
  15
  16#include "qemu/osdep.h"
  17#include <sys/ioctl.h>
  18
  19#include <linux/kvm.h>
  20
  21#include "qemu-common.h"
  22#include "qemu/atomic.h"
  23#include "qemu/option.h"
  24#include "qemu/config-file.h"
  25#include "qemu/error-report.h"
  26#include "hw/hw.h"
  27#include "hw/pci/msi.h"
  28#include "hw/pci/msix.h"
  29#include "hw/s390x/adapter.h"
  30#include "exec/gdbstub.h"
  31#include "sysemu/kvm_int.h"
  32#include "qemu/bswap.h"
  33#include "exec/memory.h"
  34#include "exec/ram_addr.h"
  35#include "exec/address-spaces.h"
  36#include "qemu/event_notifier.h"
  37#include "trace.h"
  38#include "hw/irq.h"
  39
  40#include "hw/boards.h"
  41
  42/* This check must be after config-host.h is included */
  43#ifdef CONFIG_EVENTFD
  44#include <sys/eventfd.h>
  45#endif
  46
  47/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
  48 * need to use the real host PAGE_SIZE, as that's what KVM will use.
  49 */
  50#define PAGE_SIZE getpagesize()
  51
  52//#define DEBUG_KVM
  53
  54#ifdef DEBUG_KVM
  55#define DPRINTF(fmt, ...) \
  56    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  57#else
  58#define DPRINTF(fmt, ...) \
  59    do { } while (0)
  60#endif
  61
  62#define KVM_MSI_HASHTAB_SIZE    256
  63
  64struct KVMParkedVcpu {
  65    unsigned long vcpu_id;
  66    int kvm_fd;
  67    QLIST_ENTRY(KVMParkedVcpu) node;
  68};
  69
  70struct KVMState
  71{
  72    AccelState parent_obj;
  73
  74    int nr_slots;
  75    int fd;
  76    int vmfd;
  77    int coalesced_mmio;
  78    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
  79    bool coalesced_flush_in_progress;
  80    int broken_set_mem_region;
  81    int vcpu_events;
  82    int robust_singlestep;
  83    int debugregs;
  84#ifdef KVM_CAP_SET_GUEST_DEBUG
  85    struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
  86#endif
  87    int many_ioeventfds;
  88    int intx_set_mask;
  89    /* The man page (and posix) say ioctl numbers are signed int, but
  90     * they're not.  Linux, glibc and *BSD all treat ioctl numbers as
  91     * unsigned, and treating them as signed here can break things */
  92    unsigned irq_set_ioctl;
  93    unsigned int sigmask_len;
  94    GHashTable *gsimap;
  95#ifdef KVM_CAP_IRQ_ROUTING
  96    struct kvm_irq_routing *irq_routes;
  97    int nr_allocated_irq_routes;
  98    unsigned long *used_gsi_bitmap;
  99    unsigned int gsi_count;
 100    QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
 101#endif
 102    KVMMemoryListener memory_listener;
 103    QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
 104};
 105
 106KVMState *kvm_state;
 107bool kvm_kernel_irqchip;
 108bool kvm_split_irqchip;
 109bool kvm_async_interrupts_allowed;
 110bool kvm_halt_in_kernel_allowed;
 111bool kvm_eventfds_allowed;
 112bool kvm_irqfds_allowed;
 113bool kvm_resamplefds_allowed;
 114bool kvm_msi_via_irqfd_allowed;
 115bool kvm_gsi_routing_allowed;
 116bool kvm_gsi_direct_mapping;
 117bool kvm_allowed;
 118bool kvm_readonly_mem_allowed;
 119bool kvm_vm_attributes_allowed;
 120bool kvm_direct_msi_allowed;
 121bool kvm_ioeventfd_any_length_allowed;
 122bool kvm_msi_use_devid;
 123
 124static const KVMCapabilityInfo kvm_required_capabilites[] = {
 125    KVM_CAP_INFO(USER_MEMORY),
 126    KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
 127    KVM_CAP_LAST_INFO
 128};
 129
 130int kvm_get_max_memslots(void)
 131{
 132    KVMState *s = KVM_STATE(current_machine->accelerator);
 133
 134    return s->nr_slots;
 135}
 136
 137static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 138{
 139    KVMState *s = kvm_state;
 140    int i;
 141
 142    for (i = 0; i < s->nr_slots; i++) {
 143        if (kml->slots[i].memory_size == 0) {
 144            return &kml->slots[i];
 145        }
 146    }
 147
 148    return NULL;
 149}
 150
 151bool kvm_has_free_slot(MachineState *ms)
 152{
 153    KVMState *s = KVM_STATE(ms->accelerator);
 154
 155    return kvm_get_free_slot(&s->memory_listener);
 156}
 157
 158static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 159{
 160    KVMSlot *slot = kvm_get_free_slot(kml);
 161
 162    if (slot) {
 163        return slot;
 164    }
 165
 166    fprintf(stderr, "%s: no free slot available\n", __func__);
 167    abort();
 168}
 169
 170static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
 171                                         hwaddr start_addr,
 172                                         hwaddr end_addr)
 173{
 174    KVMState *s = kvm_state;
 175    int i;
 176
 177    for (i = 0; i < s->nr_slots; i++) {
 178        KVMSlot *mem = &kml->slots[i];
 179
 180        if (start_addr == mem->start_addr &&
 181            end_addr == mem->start_addr + mem->memory_size) {
 182            return mem;
 183        }
 184    }
 185
 186    return NULL;
 187}
 188
 189/*
 190 * Find overlapping slot with lowest start address
 191 */
 192static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml,
 193                                            hwaddr start_addr,
 194                                            hwaddr end_addr)
 195{
 196    KVMState *s = kvm_state;
 197    KVMSlot *found = NULL;
 198    int i;
 199
 200    for (i = 0; i < s->nr_slots; i++) {
 201        KVMSlot *mem = &kml->slots[i];
 202
 203        if (mem->memory_size == 0 ||
 204            (found && found->start_addr < mem->start_addr)) {
 205            continue;
 206        }
 207
 208        if (end_addr > mem->start_addr &&
 209            start_addr < mem->start_addr + mem->memory_size) {
 210            found = mem;
 211        }
 212    }
 213
 214    return found;
 215}
 216
 217int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
 218                                       hwaddr *phys_addr)
 219{
 220    KVMMemoryListener *kml = &s->memory_listener;
 221    int i;
 222
 223    for (i = 0; i < s->nr_slots; i++) {
 224        KVMSlot *mem = &kml->slots[i];
 225
 226        if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 227            *phys_addr = mem->start_addr + (ram - mem->ram);
 228            return 1;
 229        }
 230    }
 231
 232    return 0;
 233}
 234
 235static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
 236{
 237    KVMState *s = kvm_state;
 238    struct kvm_userspace_memory_region mem;
 239
 240    mem.slot = slot->slot | (kml->as_id << 16);
 241    mem.guest_phys_addr = slot->start_addr;
 242    mem.userspace_addr = (unsigned long)slot->ram;
 243    mem.flags = slot->flags;
 244
 245    if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
 246        /* Set the slot size to 0 before setting the slot to the desired
 247         * value. This is needed based on KVM commit 75d61fbc. */
 248        mem.memory_size = 0;
 249        kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 250    }
 251    mem.memory_size = slot->memory_size;
 252    return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
 253}
 254
 255int kvm_destroy_vcpu(CPUState *cpu)
 256{
 257    KVMState *s = kvm_state;
 258    long mmap_size;
 259    struct KVMParkedVcpu *vcpu = NULL;
 260    int ret = 0;
 261
 262    DPRINTF("kvm_destroy_vcpu\n");
 263
 264    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 265    if (mmap_size < 0) {
 266        ret = mmap_size;
 267        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 268        goto err;
 269    }
 270
 271    ret = munmap(cpu->kvm_run, mmap_size);
 272    if (ret < 0) {
 273        goto err;
 274    }
 275
 276    vcpu = g_malloc0(sizeof(*vcpu));
 277    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
 278    vcpu->kvm_fd = cpu->kvm_fd;
 279    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
 280err:
 281    return ret;
 282}
 283
 284static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
 285{
 286    struct KVMParkedVcpu *cpu;
 287
 288    QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
 289        if (cpu->vcpu_id == vcpu_id) {
 290            int kvm_fd;
 291
 292            QLIST_REMOVE(cpu, node);
 293            kvm_fd = cpu->kvm_fd;
 294            g_free(cpu);
 295            return kvm_fd;
 296        }
 297    }
 298
 299    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
 300}
 301
 302int kvm_init_vcpu(CPUState *cpu)
 303{
 304    KVMState *s = kvm_state;
 305    long mmap_size;
 306    int ret;
 307
 308    DPRINTF("kvm_init_vcpu\n");
 309
 310    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
 311    if (ret < 0) {
 312        DPRINTF("kvm_create_vcpu failed\n");
 313        goto err;
 314    }
 315
 316    cpu->kvm_fd = ret;
 317    cpu->kvm_state = s;
 318    cpu->kvm_vcpu_dirty = true;
 319
 320    mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
 321    if (mmap_size < 0) {
 322        ret = mmap_size;
 323        DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
 324        goto err;
 325    }
 326
 327    cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 328                        cpu->kvm_fd, 0);
 329    if (cpu->kvm_run == MAP_FAILED) {
 330        ret = -errno;
 331        DPRINTF("mmap'ing vcpu state failed\n");
 332        goto err;
 333    }
 334
 335    if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
 336        s->coalesced_mmio_ring =
 337            (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
 338    }
 339
 340    ret = kvm_arch_init_vcpu(cpu);
 341err:
 342    return ret;
 343}
 344
 345/*
 346 * dirty pages logging control
 347 */
 348
 349static int kvm_mem_flags(MemoryRegion *mr)
 350{
 351    bool readonly = mr->readonly || memory_region_is_romd(mr);
 352    int flags = 0;
 353
 354    if (memory_region_get_dirty_log_mask(mr) != 0) {
 355        flags |= KVM_MEM_LOG_DIRTY_PAGES;
 356    }
 357    if (readonly && kvm_readonly_mem_allowed) {
 358        flags |= KVM_MEM_READONLY;
 359    }
 360    return flags;
 361}
 362
 363static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
 364                                 MemoryRegion *mr)
 365{
 366    int old_flags;
 367
 368    old_flags = mem->flags;
 369    mem->flags = kvm_mem_flags(mr);
 370
 371    /* If nothing changed effectively, no need to issue ioctl */
 372    if (mem->flags == old_flags) {
 373        return 0;
 374    }
 375
 376    return kvm_set_user_memory_region(kml, mem);
 377}
 378
 379static int kvm_section_update_flags(KVMMemoryListener *kml,
 380                                    MemoryRegionSection *section)
 381{
 382    hwaddr phys_addr = section->offset_within_address_space;
 383    ram_addr_t size = int128_get64(section->size);
 384    KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size);
 385
 386    if (mem == NULL)  {
 387        return 0;
 388    } else {
 389        return kvm_slot_update_flags(kml, mem, section->mr);
 390    }
 391}
 392
 393static void kvm_log_start(MemoryListener *listener,
 394                          MemoryRegionSection *section,
 395                          int old, int new)
 396{
 397    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 398    int r;
 399
 400    if (old != 0) {
 401        return;
 402    }
 403
 404    r = kvm_section_update_flags(kml, section);
 405    if (r < 0) {
 406        abort();
 407    }
 408}
 409
 410static void kvm_log_stop(MemoryListener *listener,
 411                          MemoryRegionSection *section,
 412                          int old, int new)
 413{
 414    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 415    int r;
 416
 417    if (new != 0) {
 418        return;
 419    }
 420
 421    r = kvm_section_update_flags(kml, section);
 422    if (r < 0) {
 423        abort();
 424    }
 425}
 426
 427/* get kvm's dirty pages bitmap and update qemu's */
 428static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
 429                                         unsigned long *bitmap)
 430{
 431    ram_addr_t start = section->offset_within_region +
 432                       memory_region_get_ram_addr(section->mr);
 433    ram_addr_t pages = int128_get64(section->size) / getpagesize();
 434
 435    cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
 436    return 0;
 437}
 438
 439#define ALIGN(x, y)  (((x)+(y)-1) & ~((y)-1))
 440
 441/**
 442 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
 443 * This function updates qemu's dirty bitmap using
 444 * memory_region_set_dirty().  This means all bits are set
 445 * to dirty.
 446 *
 447 * @start_add: start of logged region.
 448 * @end_addr: end of logged region.
 449 */
 450static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 451                                          MemoryRegionSection *section)
 452{
 453    KVMState *s = kvm_state;
 454    unsigned long size, allocated_size = 0;
 455    struct kvm_dirty_log d = {};
 456    KVMSlot *mem;
 457    int ret = 0;
 458    hwaddr start_addr = section->offset_within_address_space;
 459    hwaddr end_addr = start_addr + int128_get64(section->size);
 460
 461    d.dirty_bitmap = NULL;
 462    while (start_addr < end_addr) {
 463        mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr);
 464        if (mem == NULL) {
 465            break;
 466        }
 467
 468        /* XXX bad kernel interface alert
 469         * For dirty bitmap, kernel allocates array of size aligned to
 470         * bits-per-long.  But for case when the kernel is 64bits and
 471         * the userspace is 32bits, userspace can't align to the same
 472         * bits-per-long, since sizeof(long) is different between kernel
 473         * and user space.  This way, userspace will provide buffer which
 474         * may be 4 bytes less than the kernel will use, resulting in
 475         * userspace memory corruption (which is not detectable by valgrind
 476         * too, in most cases).
 477         * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
 478         * a hope that sizeof(long) won't become >8 any time soon.
 479         */
 480        size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
 481                     /*HOST_LONG_BITS*/ 64) / 8;
 482        if (!d.dirty_bitmap) {
 483            d.dirty_bitmap = g_malloc(size);
 484        } else if (size > allocated_size) {
 485            d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
 486        }
 487        allocated_size = size;
 488        memset(d.dirty_bitmap, 0, allocated_size);
 489
 490        d.slot = mem->slot | (kml->as_id << 16);
 491        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
 492            DPRINTF("ioctl failed %d\n", errno);
 493            ret = -1;
 494            break;
 495        }
 496
 497        kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
 498        start_addr = mem->start_addr + mem->memory_size;
 499    }
 500    g_free(d.dirty_bitmap);
 501
 502    return ret;
 503}
 504
 505static void kvm_coalesce_mmio_region(MemoryListener *listener,
 506                                     MemoryRegionSection *secion,
 507                                     hwaddr start, hwaddr size)
 508{
 509    KVMState *s = kvm_state;
 510
 511    if (s->coalesced_mmio) {
 512        struct kvm_coalesced_mmio_zone zone;
 513
 514        zone.addr = start;
 515        zone.size = size;
 516        zone.pad = 0;
 517
 518        (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
 519    }
 520}
 521
 522static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
 523                                       MemoryRegionSection *secion,
 524                                       hwaddr start, hwaddr size)
 525{
 526    KVMState *s = kvm_state;
 527
 528    if (s->coalesced_mmio) {
 529        struct kvm_coalesced_mmio_zone zone;
 530
 531        zone.addr = start;
 532        zone.size = size;
 533        zone.pad = 0;
 534
 535        (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
 536    }
 537}
 538
 539int kvm_check_extension(KVMState *s, unsigned int extension)
 540{
 541    int ret;
 542
 543    ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 544    if (ret < 0) {
 545        ret = 0;
 546    }
 547
 548    return ret;
 549}
 550
 551int kvm_vm_check_extension(KVMState *s, unsigned int extension)
 552{
 553    int ret;
 554
 555    ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
 556    if (ret < 0) {
 557        /* VM wide version not implemented, use global one instead */
 558        ret = kvm_check_extension(s, extension);
 559    }
 560
 561    return ret;
 562}
 563
 564static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
 565{
 566#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
 567    /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
 568     * endianness, but the memory core hands them in target endianness.
 569     * For example, PPC is always treated as big-endian even if running
 570     * on KVM and on PPC64LE.  Correct here.
 571     */
 572    switch (size) {
 573    case 2:
 574        val = bswap16(val);
 575        break;
 576    case 4:
 577        val = bswap32(val);
 578        break;
 579    }
 580#endif
 581    return val;
 582}
 583
 584static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
 585                                  bool assign, uint32_t size, bool datamatch)
 586{
 587    int ret;
 588    struct kvm_ioeventfd iofd = {
 589        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 590        .addr = addr,
 591        .len = size,
 592        .flags = 0,
 593        .fd = fd,
 594    };
 595
 596    if (!kvm_enabled()) {
 597        return -ENOSYS;
 598    }
 599
 600    if (datamatch) {
 601        iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 602    }
 603    if (!assign) {
 604        iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 605    }
 606
 607    ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
 608
 609    if (ret < 0) {
 610        return -errno;
 611    }
 612
 613    return 0;
 614}
 615
 616static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
 617                                 bool assign, uint32_t size, bool datamatch)
 618{
 619    struct kvm_ioeventfd kick = {
 620        .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
 621        .addr = addr,
 622        .flags = KVM_IOEVENTFD_FLAG_PIO,
 623        .len = size,
 624        .fd = fd,
 625    };
 626    int r;
 627    if (!kvm_enabled()) {
 628        return -ENOSYS;
 629    }
 630    if (datamatch) {
 631        kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
 632    }
 633    if (!assign) {
 634        kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
 635    }
 636    r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
 637    if (r < 0) {
 638        return r;
 639    }
 640    return 0;
 641}
 642
 643
 644static int kvm_check_many_ioeventfds(void)
 645{
 646    /* Userspace can use ioeventfd for io notification.  This requires a host
 647     * that supports eventfd(2) and an I/O thread; since eventfd does not
 648     * support SIGIO it cannot interrupt the vcpu.
 649     *
 650     * Older kernels have a 6 device limit on the KVM io bus.  Find out so we
 651     * can avoid creating too many ioeventfds.
 652     */
 653#if defined(CONFIG_EVENTFD)
 654    int ioeventfds[7];
 655    int i, ret = 0;
 656    for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
 657        ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
 658        if (ioeventfds[i] < 0) {
 659            break;
 660        }
 661        ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
 662        if (ret < 0) {
 663            close(ioeventfds[i]);
 664            break;
 665        }
 666    }
 667
 668    /* Decide whether many devices are supported or not */
 669    ret = i == ARRAY_SIZE(ioeventfds);
 670
 671    while (i-- > 0) {
 672        kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
 673        close(ioeventfds[i]);
 674    }
 675    return ret;
 676#else
 677    return 0;
 678#endif
 679}
 680
 681static const KVMCapabilityInfo *
 682kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
 683{
 684    while (list->name) {
 685        if (!kvm_check_extension(s, list->value)) {
 686            return list;
 687        }
 688        list++;
 689    }
 690    return NULL;
 691}
 692
 693static void kvm_set_phys_mem(KVMMemoryListener *kml,
 694                             MemoryRegionSection *section, bool add)
 695{
 696    KVMState *s = kvm_state;
 697    KVMSlot *mem, old;
 698    int err;
 699    MemoryRegion *mr = section->mr;
 700    bool writeable = !mr->readonly && !mr->rom_device;
 701    hwaddr start_addr = section->offset_within_address_space;
 702    ram_addr_t size = int128_get64(section->size);
 703    void *ram = NULL;
 704    unsigned delta;
 705
 706    /* kvm works in page size chunks, but the function may be called
 707       with sub-page size and unaligned start address. Pad the start
 708       address to next and truncate size to previous page boundary. */
 709    delta = qemu_real_host_page_size - (start_addr & ~qemu_real_host_page_mask);
 710    delta &= ~qemu_real_host_page_mask;
 711    if (delta > size) {
 712        return;
 713    }
 714    start_addr += delta;
 715    size -= delta;
 716    size &= qemu_real_host_page_mask;
 717    if (!size || (start_addr & ~qemu_real_host_page_mask)) {
 718        return;
 719    }
 720
 721    if (!memory_region_is_ram(mr)) {
 722        if (writeable || !kvm_readonly_mem_allowed) {
 723            return;
 724        } else if (!mr->romd_mode) {
 725            /* If the memory device is not in romd_mode, then we actually want
 726             * to remove the kvm memory slot so all accesses will trap. */
 727            add = false;
 728        }
 729    }
 730
 731    ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
 732
 733    while (1) {
 734        mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size);
 735        if (!mem) {
 736            break;
 737        }
 738
 739        if (add && start_addr >= mem->start_addr &&
 740            (start_addr + size <= mem->start_addr + mem->memory_size) &&
 741            (ram - start_addr == mem->ram - mem->start_addr)) {
 742            /* The new slot fits into the existing one and comes with
 743             * identical parameters - update flags and done. */
 744            kvm_slot_update_flags(kml, mem, mr);
 745            return;
 746        }
 747
 748        old = *mem;
 749
 750        if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
 751            kvm_physical_sync_dirty_bitmap(kml, section);
 752        }
 753
 754        /* unregister the overlapping slot */
 755        mem->memory_size = 0;
 756        err = kvm_set_user_memory_region(kml, mem);
 757        if (err) {
 758            fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
 759                    __func__, strerror(-err));
 760            abort();
 761        }
 762
 763        /* Workaround for older KVM versions: we can't join slots, even not by
 764         * unregistering the previous ones and then registering the larger
 765         * slot. We have to maintain the existing fragmentation. Sigh.
 766         *
 767         * This workaround assumes that the new slot starts at the same
 768         * address as the first existing one. If not or if some overlapping
 769         * slot comes around later, we will fail (not seen in practice so far)
 770         * - and actually require a recent KVM version. */
 771        if (s->broken_set_mem_region &&
 772            old.start_addr == start_addr && old.memory_size < size && add) {
 773            mem = kvm_alloc_slot(kml);
 774            mem->memory_size = old.memory_size;
 775            mem->start_addr = old.start_addr;
 776            mem->ram = old.ram;
 777            mem->flags = kvm_mem_flags(mr);
 778
 779            err = kvm_set_user_memory_region(kml, mem);
 780            if (err) {
 781                fprintf(stderr, "%s: error updating slot: %s\n", __func__,
 782                        strerror(-err));
 783                abort();
 784            }
 785
 786            start_addr += old.memory_size;
 787            ram += old.memory_size;
 788            size -= old.memory_size;
 789            continue;
 790        }
 791
 792        /* register prefix slot */
 793        if (old.start_addr < start_addr) {
 794            mem = kvm_alloc_slot(kml);
 795            mem->memory_size = start_addr - old.start_addr;
 796            mem->start_addr = old.start_addr;
 797            mem->ram = old.ram;
 798            mem->flags =  kvm_mem_flags(mr);
 799
 800            err = kvm_set_user_memory_region(kml, mem);
 801            if (err) {
 802                fprintf(stderr, "%s: error registering prefix slot: %s\n",
 803                        __func__, strerror(-err));
 804#ifdef TARGET_PPC
 805                fprintf(stderr, "%s: This is probably because your kernel's " \
 806                                "PAGE_SIZE is too big. Please try to use 4k " \
 807                                "PAGE_SIZE!\n", __func__);
 808#endif
 809                abort();
 810            }
 811        }
 812
 813        /* register suffix slot */
 814        if (old.start_addr + old.memory_size > start_addr + size) {
 815            ram_addr_t size_delta;
 816
 817            mem = kvm_alloc_slot(kml);
 818            mem->start_addr = start_addr + size;
 819            size_delta = mem->start_addr - old.start_addr;
 820            mem->memory_size = old.memory_size - size_delta;
 821            mem->ram = old.ram + size_delta;
 822            mem->flags = kvm_mem_flags(mr);
 823
 824            err = kvm_set_user_memory_region(kml, mem);
 825            if (err) {
 826                fprintf(stderr, "%s: error registering suffix slot: %s\n",
 827                        __func__, strerror(-err));
 828                abort();
 829            }
 830        }
 831    }
 832
 833    /* in case the KVM bug workaround already "consumed" the new slot */
 834    if (!size) {
 835        return;
 836    }
 837    if (!add) {
 838        return;
 839    }
 840    mem = kvm_alloc_slot(kml);
 841    mem->memory_size = size;
 842    mem->start_addr = start_addr;
 843    mem->ram = ram;
 844    mem->flags = kvm_mem_flags(mr);
 845
 846    err = kvm_set_user_memory_region(kml, mem);
 847    if (err) {
 848        fprintf(stderr, "%s: error registering slot: %s\n", __func__,
 849                strerror(-err));
 850        abort();
 851    }
 852}
 853
 854static void kvm_region_add(MemoryListener *listener,
 855                           MemoryRegionSection *section)
 856{
 857    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 858
 859    memory_region_ref(section->mr);
 860    kvm_set_phys_mem(kml, section, true);
 861}
 862
 863static void kvm_region_del(MemoryListener *listener,
 864                           MemoryRegionSection *section)
 865{
 866    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 867
 868    kvm_set_phys_mem(kml, section, false);
 869    memory_region_unref(section->mr);
 870}
 871
 872static void kvm_log_sync(MemoryListener *listener,
 873                         MemoryRegionSection *section)
 874{
 875    KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
 876    int r;
 877
 878    r = kvm_physical_sync_dirty_bitmap(kml, section);
 879    if (r < 0) {
 880        abort();
 881    }
 882}
 883
 884static void kvm_mem_ioeventfd_add(MemoryListener *listener,
 885                                  MemoryRegionSection *section,
 886                                  bool match_data, uint64_t data,
 887                                  EventNotifier *e)
 888{
 889    int fd = event_notifier_get_fd(e);
 890    int r;
 891
 892    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
 893                               data, true, int128_get64(section->size),
 894                               match_data);
 895    if (r < 0) {
 896        fprintf(stderr, "%s: error adding ioeventfd: %s\n",
 897                __func__, strerror(-r));
 898        abort();
 899    }
 900}
 901
 902static void kvm_mem_ioeventfd_del(MemoryListener *listener,
 903                                  MemoryRegionSection *section,
 904                                  bool match_data, uint64_t data,
 905                                  EventNotifier *e)
 906{
 907    int fd = event_notifier_get_fd(e);
 908    int r;
 909
 910    r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
 911                               data, false, int128_get64(section->size),
 912                               match_data);
 913    if (r < 0) {
 914        abort();
 915    }
 916}
 917
 918static void kvm_io_ioeventfd_add(MemoryListener *listener,
 919                                 MemoryRegionSection *section,
 920                                 bool match_data, uint64_t data,
 921                                 EventNotifier *e)
 922{
 923    int fd = event_notifier_get_fd(e);
 924    int r;
 925
 926    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
 927                              data, true, int128_get64(section->size),
 928                              match_data);
 929    if (r < 0) {
 930        fprintf(stderr, "%s: error adding ioeventfd: %s\n",
 931                __func__, strerror(-r));
 932        abort();
 933    }
 934}
 935
 936static void kvm_io_ioeventfd_del(MemoryListener *listener,
 937                                 MemoryRegionSection *section,
 938                                 bool match_data, uint64_t data,
 939                                 EventNotifier *e)
 940
 941{
 942    int fd = event_notifier_get_fd(e);
 943    int r;
 944
 945    r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
 946                              data, false, int128_get64(section->size),
 947                              match_data);
 948    if (r < 0) {
 949        abort();
 950    }
 951}
 952
 953void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
 954                                  AddressSpace *as, int as_id)
 955{
 956    int i;
 957
 958    kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
 959    kml->as_id = as_id;
 960
 961    for (i = 0; i < s->nr_slots; i++) {
 962        kml->slots[i].slot = i;
 963    }
 964
 965    kml->listener.region_add = kvm_region_add;
 966    kml->listener.region_del = kvm_region_del;
 967    kml->listener.log_start = kvm_log_start;
 968    kml->listener.log_stop = kvm_log_stop;
 969    kml->listener.log_sync = kvm_log_sync;
 970    kml->listener.priority = 10;
 971
 972    memory_listener_register(&kml->listener, as);
 973}
 974
 975static MemoryListener kvm_io_listener = {
 976    .eventfd_add = kvm_io_ioeventfd_add,
 977    .eventfd_del = kvm_io_ioeventfd_del,
 978    .priority = 10,
 979};
 980
 981static void kvm_handle_interrupt(CPUState *cpu, int mask)
 982{
 983    cpu->interrupt_request |= mask;
 984
 985    if (!qemu_cpu_is_self(cpu)) {
 986        qemu_cpu_kick(cpu);
 987    }
 988}
 989
 990int kvm_set_irq(KVMState *s, int irq, int level)
 991{
 992    struct kvm_irq_level event;
 993    int ret;
 994
 995    assert(kvm_async_interrupts_enabled());
 996
 997    event.level = level;
 998    event.irq = irq;
 999    ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1000    if (ret < 0) {
1001        perror("kvm_set_irq");
1002        abort();
1003    }
1004
1005    return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1006}
1007
1008#ifdef KVM_CAP_IRQ_ROUTING
1009typedef struct KVMMSIRoute {
1010    struct kvm_irq_routing_entry kroute;
1011    QTAILQ_ENTRY(KVMMSIRoute) entry;
1012} KVMMSIRoute;
1013
1014static void set_gsi(KVMState *s, unsigned int gsi)
1015{
1016    set_bit(gsi, s->used_gsi_bitmap);
1017}
1018
1019static void clear_gsi(KVMState *s, unsigned int gsi)
1020{
1021    clear_bit(gsi, s->used_gsi_bitmap);
1022}
1023
1024void kvm_init_irq_routing(KVMState *s)
1025{
1026    int gsi_count, i;
1027
1028    gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1029    if (gsi_count > 0) {
1030        /* Round up so we can search ints using ffs */
1031        s->used_gsi_bitmap = bitmap_new(gsi_count);
1032        s->gsi_count = gsi_count;
1033    }
1034
1035    s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1036    s->nr_allocated_irq_routes = 0;
1037
1038    if (!kvm_direct_msi_allowed) {
1039        for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1040            QTAILQ_INIT(&s->msi_hashtab[i]);
1041        }
1042    }
1043
1044    kvm_arch_init_irq_routing(s);
1045}
1046
1047void kvm_irqchip_commit_routes(KVMState *s)
1048{
1049    int ret;
1050
1051    if (kvm_gsi_direct_mapping()) {
1052        return;
1053    }
1054
1055    if (!kvm_gsi_routing_enabled()) {
1056        return;
1057    }
1058
1059    s->irq_routes->flags = 0;
1060    trace_kvm_irqchip_commit_routes();
1061    ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1062    assert(ret == 0);
1063}
1064
1065static void kvm_add_routing_entry(KVMState *s,
1066                                  struct kvm_irq_routing_entry *entry)
1067{
1068    struct kvm_irq_routing_entry *new;
1069    int n, size;
1070
1071    if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1072        n = s->nr_allocated_irq_routes * 2;
1073        if (n < 64) {
1074            n = 64;
1075        }
1076        size = sizeof(struct kvm_irq_routing);
1077        size += n * sizeof(*new);
1078        s->irq_routes = g_realloc(s->irq_routes, size);
1079        s->nr_allocated_irq_routes = n;
1080    }
1081    n = s->irq_routes->nr++;
1082    new = &s->irq_routes->entries[n];
1083
1084    *new = *entry;
1085
1086    set_gsi(s, entry->gsi);
1087}
1088
1089static int kvm_update_routing_entry(KVMState *s,
1090                                    struct kvm_irq_routing_entry *new_entry)
1091{
1092    struct kvm_irq_routing_entry *entry;
1093    int n;
1094
1095    for (n = 0; n < s->irq_routes->nr; n++) {
1096        entry = &s->irq_routes->entries[n];
1097        if (entry->gsi != new_entry->gsi) {
1098            continue;
1099        }
1100
1101        if(!memcmp(entry, new_entry, sizeof *entry)) {
1102            return 0;
1103        }
1104
1105        *entry = *new_entry;
1106
1107        return 0;
1108    }
1109
1110    return -ESRCH;
1111}
1112
1113void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1114{
1115    struct kvm_irq_routing_entry e = {};
1116
1117    assert(pin < s->gsi_count);
1118
1119    e.gsi = irq;
1120    e.type = KVM_IRQ_ROUTING_IRQCHIP;
1121    e.flags = 0;
1122    e.u.irqchip.irqchip = irqchip;
1123    e.u.irqchip.pin = pin;
1124    kvm_add_routing_entry(s, &e);
1125}
1126
1127void kvm_irqchip_release_virq(KVMState *s, int virq)
1128{
1129    struct kvm_irq_routing_entry *e;
1130    int i;
1131
1132    if (kvm_gsi_direct_mapping()) {
1133        return;
1134    }
1135
1136    for (i = 0; i < s->irq_routes->nr; i++) {
1137        e = &s->irq_routes->entries[i];
1138        if (e->gsi == virq) {
1139            s->irq_routes->nr--;
1140            *e = s->irq_routes->entries[s->irq_routes->nr];
1141        }
1142    }
1143    clear_gsi(s, virq);
1144    kvm_arch_release_virq_post(virq);
1145}
1146
1147static unsigned int kvm_hash_msi(uint32_t data)
1148{
1149    /* This is optimized for IA32 MSI layout. However, no other arch shall
1150     * repeat the mistake of not providing a direct MSI injection API. */
1151    return data & 0xff;
1152}
1153
1154static void kvm_flush_dynamic_msi_routes(KVMState *s)
1155{
1156    KVMMSIRoute *route, *next;
1157    unsigned int hash;
1158
1159    for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1160        QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1161            kvm_irqchip_release_virq(s, route->kroute.gsi);
1162            QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1163            g_free(route);
1164        }
1165    }
1166}
1167
1168static int kvm_irqchip_get_virq(KVMState *s)
1169{
1170    int next_virq;
1171
1172    /*
1173     * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1174     * GSI numbers are more than the number of IRQ route. Allocating a GSI
1175     * number can succeed even though a new route entry cannot be added.
1176     * When this happens, flush dynamic MSI entries to free IRQ route entries.
1177     */
1178    if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1179        kvm_flush_dynamic_msi_routes(s);
1180    }
1181
1182    /* Return the lowest unused GSI in the bitmap */
1183    next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1184    if (next_virq >= s->gsi_count) {
1185        return -ENOSPC;
1186    } else {
1187        return next_virq;
1188    }
1189}
1190
1191static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1192{
1193    unsigned int hash = kvm_hash_msi(msg.data);
1194    KVMMSIRoute *route;
1195
1196    QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1197        if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1198            route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1199            route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1200            return route;
1201        }
1202    }
1203    return NULL;
1204}
1205
1206int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1207{
1208    struct kvm_msi msi;
1209    KVMMSIRoute *route;
1210
1211    if (kvm_direct_msi_allowed) {
1212        msi.address_lo = (uint32_t)msg.address;
1213        msi.address_hi = msg.address >> 32;
1214        msi.data = le32_to_cpu(msg.data);
1215        msi.flags = 0;
1216        memset(msi.pad, 0, sizeof(msi.pad));
1217
1218        return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1219    }
1220
1221    route = kvm_lookup_msi_route(s, msg);
1222    if (!route) {
1223        int virq;
1224
1225        virq = kvm_irqchip_get_virq(s);
1226        if (virq < 0) {
1227            return virq;
1228        }
1229
1230        route = g_malloc0(sizeof(KVMMSIRoute));
1231        route->kroute.gsi = virq;
1232        route->kroute.type = KVM_IRQ_ROUTING_MSI;
1233        route->kroute.flags = 0;
1234        route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1235        route->kroute.u.msi.address_hi = msg.address >> 32;
1236        route->kroute.u.msi.data = le32_to_cpu(msg.data);
1237
1238        kvm_add_routing_entry(s, &route->kroute);
1239        kvm_irqchip_commit_routes(s);
1240
1241        QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1242                           entry);
1243    }
1244
1245    assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1246
1247    return kvm_set_irq(s, route->kroute.gsi, 1);
1248}
1249
1250int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1251{
1252    struct kvm_irq_routing_entry kroute = {};
1253    int virq;
1254    MSIMessage msg = {0, 0};
1255
1256    if (dev) {
1257        msg = pci_get_msi_message(dev, vector);
1258    }
1259
1260    if (kvm_gsi_direct_mapping()) {
1261        return kvm_arch_msi_data_to_gsi(msg.data);
1262    }
1263
1264    if (!kvm_gsi_routing_enabled()) {
1265        return -ENOSYS;
1266    }
1267
1268    virq = kvm_irqchip_get_virq(s);
1269    if (virq < 0) {
1270        return virq;
1271    }
1272
1273    kroute.gsi = virq;
1274    kroute.type = KVM_IRQ_ROUTING_MSI;
1275    kroute.flags = 0;
1276    kroute.u.msi.address_lo = (uint32_t)msg.address;
1277    kroute.u.msi.address_hi = msg.address >> 32;
1278    kroute.u.msi.data = le32_to_cpu(msg.data);
1279    if (kvm_msi_devid_required()) {
1280        kroute.flags = KVM_MSI_VALID_DEVID;
1281        kroute.u.msi.devid = pci_requester_id(dev);
1282    }
1283    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1284        kvm_irqchip_release_virq(s, virq);
1285        return -EINVAL;
1286    }
1287
1288    trace_kvm_irqchip_add_msi_route(virq);
1289
1290    kvm_add_routing_entry(s, &kroute);
1291    kvm_arch_add_msi_route_post(&kroute, vector, dev);
1292    kvm_irqchip_commit_routes(s);
1293
1294    return virq;
1295}
1296
1297int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1298                                 PCIDevice *dev)
1299{
1300    struct kvm_irq_routing_entry kroute = {};
1301
1302    if (kvm_gsi_direct_mapping()) {
1303        return 0;
1304    }
1305
1306    if (!kvm_irqchip_in_kernel()) {
1307        return -ENOSYS;
1308    }
1309
1310    kroute.gsi = virq;
1311    kroute.type = KVM_IRQ_ROUTING_MSI;
1312    kroute.flags = 0;
1313    kroute.u.msi.address_lo = (uint32_t)msg.address;
1314    kroute.u.msi.address_hi = msg.address >> 32;
1315    kroute.u.msi.data = le32_to_cpu(msg.data);
1316    if (kvm_msi_devid_required()) {
1317        kroute.flags = KVM_MSI_VALID_DEVID;
1318        kroute.u.msi.devid = pci_requester_id(dev);
1319    }
1320    if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1321        return -EINVAL;
1322    }
1323
1324    trace_kvm_irqchip_update_msi_route(virq);
1325
1326    return kvm_update_routing_entry(s, &kroute);
1327}
1328
1329static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1330                                    bool assign)
1331{
1332    struct kvm_irqfd irqfd = {
1333        .fd = fd,
1334        .gsi = virq,
1335        .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1336    };
1337
1338    if (rfd != -1) {
1339        irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1340        irqfd.resamplefd = rfd;
1341    }
1342
1343    if (!kvm_irqfds_enabled()) {
1344        return -ENOSYS;
1345    }
1346
1347    return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1348}
1349
1350int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1351{
1352    struct kvm_irq_routing_entry kroute = {};
1353    int virq;
1354
1355    if (!kvm_gsi_routing_enabled()) {
1356        return -ENOSYS;
1357    }
1358
1359    virq = kvm_irqchip_get_virq(s);
1360    if (virq < 0) {
1361        return virq;
1362    }
1363
1364    kroute.gsi = virq;
1365    kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1366    kroute.flags = 0;
1367    kroute.u.adapter.summary_addr = adapter->summary_addr;
1368    kroute.u.adapter.ind_addr = adapter->ind_addr;
1369    kroute.u.adapter.summary_offset = adapter->summary_offset;
1370    kroute.u.adapter.ind_offset = adapter->ind_offset;
1371    kroute.u.adapter.adapter_id = adapter->adapter_id;
1372
1373    kvm_add_routing_entry(s, &kroute);
1374
1375    return virq;
1376}
1377
1378int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1379{
1380    struct kvm_irq_routing_entry kroute = {};
1381    int virq;
1382
1383    if (!kvm_gsi_routing_enabled()) {
1384        return -ENOSYS;
1385    }
1386    if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1387        return -ENOSYS;
1388    }
1389    virq = kvm_irqchip_get_virq(s);
1390    if (virq < 0) {
1391        return virq;
1392    }
1393
1394    kroute.gsi = virq;
1395    kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1396    kroute.flags = 0;
1397    kroute.u.hv_sint.vcpu = vcpu;
1398    kroute.u.hv_sint.sint = sint;
1399
1400    kvm_add_routing_entry(s, &kroute);
1401    kvm_irqchip_commit_routes(s);
1402
1403    return virq;
1404}
1405
1406#else /* !KVM_CAP_IRQ_ROUTING */
1407
1408void kvm_init_irq_routing(KVMState *s)
1409{
1410}
1411
1412void kvm_irqchip_release_virq(KVMState *s, int virq)
1413{
1414}
1415
1416int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1417{
1418    abort();
1419}
1420
1421int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1422{
1423    return -ENOSYS;
1424}
1425
1426int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1427{
1428    return -ENOSYS;
1429}
1430
1431int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1432{
1433    return -ENOSYS;
1434}
1435
1436static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1437{
1438    abort();
1439}
1440
1441int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1442{
1443    return -ENOSYS;
1444}
1445#endif /* !KVM_CAP_IRQ_ROUTING */
1446
1447int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1448                                       EventNotifier *rn, int virq)
1449{
1450    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1451           rn ? event_notifier_get_fd(rn) : -1, virq, true);
1452}
1453
1454int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1455                                          int virq)
1456{
1457    return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1458           false);
1459}
1460
1461int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1462                                   EventNotifier *rn, qemu_irq irq)
1463{
1464    gpointer key, gsi;
1465    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1466
1467    if (!found) {
1468        return -ENXIO;
1469    }
1470    return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1471}
1472
1473int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1474                                      qemu_irq irq)
1475{
1476    gpointer key, gsi;
1477    gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1478
1479    if (!found) {
1480        return -ENXIO;
1481    }
1482    return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1483}
1484
1485void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1486{
1487    g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1488}
1489
1490static void kvm_irqchip_create(MachineState *machine, KVMState *s)
1491{
1492    int ret;
1493
1494    if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1495        ;
1496    } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1497        ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1498        if (ret < 0) {
1499            fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1500            exit(1);
1501        }
1502    } else {
1503        return;
1504    }
1505
1506    /* First probe and see if there's a arch-specific hook to create the
1507     * in-kernel irqchip for us */
1508    ret = kvm_arch_irqchip_create(machine, s);
1509    if (ret == 0) {
1510        if (machine_kernel_irqchip_split(machine)) {
1511            perror("Split IRQ chip mode not supported.");
1512            exit(1);
1513        } else {
1514            ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1515        }
1516    }
1517    if (ret < 0) {
1518        fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1519        exit(1);
1520    }
1521
1522    kvm_kernel_irqchip = true;
1523    /* If we have an in-kernel IRQ chip then we must have asynchronous
1524     * interrupt delivery (though the reverse is not necessarily true)
1525     */
1526    kvm_async_interrupts_allowed = true;
1527    kvm_halt_in_kernel_allowed = true;
1528
1529    kvm_init_irq_routing(s);
1530
1531    s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1532}
1533
1534/* Find number of supported CPUs using the recommended
1535 * procedure from the kernel API documentation to cope with
1536 * older kernels that may be missing capabilities.
1537 */
1538static int kvm_recommended_vcpus(KVMState *s)
1539{
1540    int ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1541    return (ret) ? ret : 4;
1542}
1543
1544static int kvm_max_vcpus(KVMState *s)
1545{
1546    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1547    return (ret) ? ret : kvm_recommended_vcpus(s);
1548}
1549
1550static int kvm_max_vcpu_id(KVMState *s)
1551{
1552    int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1553    return (ret) ? ret : kvm_max_vcpus(s);
1554}
1555
1556bool kvm_vcpu_id_is_valid(int vcpu_id)
1557{
1558    KVMState *s = KVM_STATE(current_machine->accelerator);
1559    return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1560}
1561
1562static int kvm_init(MachineState *ms)
1563{
1564    MachineClass *mc = MACHINE_GET_CLASS(ms);
1565    static const char upgrade_note[] =
1566        "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1567        "(see http://sourceforge.net/projects/kvm).\n";
1568    struct {
1569        const char *name;
1570        int num;
1571    } num_cpus[] = {
1572        { "SMP",          smp_cpus },
1573        { "hotpluggable", max_cpus },
1574        { NULL, }
1575    }, *nc = num_cpus;
1576    int soft_vcpus_limit, hard_vcpus_limit;
1577    KVMState *s;
1578    const KVMCapabilityInfo *missing_cap;
1579    int ret;
1580    int type = 0;
1581    const char *kvm_type;
1582
1583    s = KVM_STATE(ms->accelerator);
1584
1585    /*
1586     * On systems where the kernel can support different base page
1587     * sizes, host page size may be different from TARGET_PAGE_SIZE,
1588     * even with KVM.  TARGET_PAGE_SIZE is assumed to be the minimum
1589     * page size for the system though.
1590     */
1591    assert(TARGET_PAGE_SIZE <= getpagesize());
1592
1593    s->sigmask_len = 8;
1594
1595#ifdef KVM_CAP_SET_GUEST_DEBUG
1596    QTAILQ_INIT(&s->kvm_sw_breakpoints);
1597#endif
1598    QLIST_INIT(&s->kvm_parked_vcpus);
1599    s->vmfd = -1;
1600    s->fd = qemu_open("/dev/kvm", O_RDWR);
1601    if (s->fd == -1) {
1602        fprintf(stderr, "Could not access KVM kernel module: %m\n");
1603        ret = -errno;
1604        goto err;
1605    }
1606
1607    ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1608    if (ret < KVM_API_VERSION) {
1609        if (ret >= 0) {
1610            ret = -EINVAL;
1611        }
1612        fprintf(stderr, "kvm version too old\n");
1613        goto err;
1614    }
1615
1616    if (ret > KVM_API_VERSION) {
1617        ret = -EINVAL;
1618        fprintf(stderr, "kvm version not supported\n");
1619        goto err;
1620    }
1621
1622    s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1623
1624    /* If unspecified, use the default value */
1625    if (!s->nr_slots) {
1626        s->nr_slots = 32;
1627    }
1628
1629    /* check the vcpu limits */
1630    soft_vcpus_limit = kvm_recommended_vcpus(s);
1631    hard_vcpus_limit = kvm_max_vcpus(s);
1632
1633    while (nc->name) {
1634        if (nc->num > soft_vcpus_limit) {
1635            fprintf(stderr,
1636                    "Warning: Number of %s cpus requested (%d) exceeds "
1637                    "the recommended cpus supported by KVM (%d)\n",
1638                    nc->name, nc->num, soft_vcpus_limit);
1639
1640            if (nc->num > hard_vcpus_limit) {
1641                fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1642                        "the maximum cpus supported by KVM (%d)\n",
1643                        nc->name, nc->num, hard_vcpus_limit);
1644                exit(1);
1645            }
1646        }
1647        nc++;
1648    }
1649
1650    kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1651    if (mc->kvm_type) {
1652        type = mc->kvm_type(kvm_type);
1653    } else if (kvm_type) {
1654        ret = -EINVAL;
1655        fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1656        goto err;
1657    }
1658
1659    do {
1660        ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1661    } while (ret == -EINTR);
1662
1663    if (ret < 0) {
1664        fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1665                strerror(-ret));
1666
1667#ifdef TARGET_S390X
1668        if (ret == -EINVAL) {
1669            fprintf(stderr,
1670                    "Host kernel setup problem detected. Please verify:\n");
1671            fprintf(stderr, "- for kernels supporting the switch_amode or"
1672                    " user_mode parameters, whether\n");
1673            fprintf(stderr,
1674                    "  user space is running in primary address space\n");
1675            fprintf(stderr,
1676                    "- for kernels supporting the vm.allocate_pgste sysctl, "
1677                    "whether it is enabled\n");
1678        }
1679#endif
1680        goto err;
1681    }
1682
1683    s->vmfd = ret;
1684    missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1685    if (!missing_cap) {
1686        missing_cap =
1687            kvm_check_extension_list(s, kvm_arch_required_capabilities);
1688    }
1689    if (missing_cap) {
1690        ret = -EINVAL;
1691        fprintf(stderr, "kvm does not support %s\n%s",
1692                missing_cap->name, upgrade_note);
1693        goto err;
1694    }
1695
1696    s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1697
1698    s->broken_set_mem_region = 1;
1699    ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1700    if (ret > 0) {
1701        s->broken_set_mem_region = 0;
1702    }
1703
1704#ifdef KVM_CAP_VCPU_EVENTS
1705    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1706#endif
1707
1708    s->robust_singlestep =
1709        kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1710
1711#ifdef KVM_CAP_DEBUGREGS
1712    s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1713#endif
1714
1715#ifdef KVM_CAP_IRQ_ROUTING
1716    kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1717#endif
1718
1719    s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1720
1721    s->irq_set_ioctl = KVM_IRQ_LINE;
1722    if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1723        s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1724    }
1725
1726#ifdef KVM_CAP_READONLY_MEM
1727    kvm_readonly_mem_allowed =
1728        (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1729#endif
1730
1731    kvm_eventfds_allowed =
1732        (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1733
1734    kvm_irqfds_allowed =
1735        (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
1736
1737    kvm_resamplefds_allowed =
1738        (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
1739
1740    kvm_vm_attributes_allowed =
1741        (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
1742
1743    kvm_ioeventfd_any_length_allowed =
1744        (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
1745
1746    ret = kvm_arch_init(ms, s);
1747    if (ret < 0) {
1748        goto err;
1749    }
1750
1751    if (machine_kernel_irqchip_allowed(ms)) {
1752        kvm_irqchip_create(ms, s);
1753    }
1754
1755    kvm_state = s;
1756
1757    if (kvm_eventfds_allowed) {
1758        s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
1759        s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
1760    }
1761    s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
1762    s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region;
1763
1764    kvm_memory_listener_register(s, &s->memory_listener,
1765                                 &address_space_memory, 0);
1766    memory_listener_register(&kvm_io_listener,
1767                             &address_space_io);
1768
1769    s->many_ioeventfds = kvm_check_many_ioeventfds();
1770
1771    cpu_interrupt_handler = kvm_handle_interrupt;
1772
1773    return 0;
1774
1775err:
1776    assert(ret < 0);
1777    if (s->vmfd >= 0) {
1778        close(s->vmfd);
1779    }
1780    if (s->fd != -1) {
1781        close(s->fd);
1782    }
1783    g_free(s->memory_listener.slots);
1784
1785    return ret;
1786}
1787
1788void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1789{
1790    s->sigmask_len = sigmask_len;
1791}
1792
1793static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
1794                          int size, uint32_t count)
1795{
1796    int i;
1797    uint8_t *ptr = data;
1798
1799    for (i = 0; i < count; i++) {
1800        address_space_rw(&address_space_io, port, attrs,
1801                         ptr, size,
1802                         direction == KVM_EXIT_IO_OUT);
1803        ptr += size;
1804    }
1805}
1806
1807static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1808{
1809    fprintf(stderr, "KVM internal error. Suberror: %d\n",
1810            run->internal.suberror);
1811
1812    if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1813        int i;
1814
1815        for (i = 0; i < run->internal.ndata; ++i) {
1816            fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1817                    i, (uint64_t)run->internal.data[i]);
1818        }
1819    }
1820    if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1821        fprintf(stderr, "emulation failure\n");
1822        if (!kvm_arch_stop_on_emulation_error(cpu)) {
1823            cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1824            return EXCP_INTERRUPT;
1825        }
1826    }
1827    /* FIXME: Should trigger a qmp message to let management know
1828     * something went wrong.
1829     */
1830    return -1;
1831}
1832
1833void kvm_flush_coalesced_mmio_buffer(void)
1834{
1835    KVMState *s = kvm_state;
1836
1837    if (s->coalesced_flush_in_progress) {
1838        return;
1839    }
1840
1841    s->coalesced_flush_in_progress = true;
1842
1843    if (s->coalesced_mmio_ring) {
1844        struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1845        while (ring->first != ring->last) {
1846            struct kvm_coalesced_mmio *ent;
1847
1848            ent = &ring->coalesced_mmio[ring->first];
1849
1850            cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1851            smp_wmb();
1852            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1853        }
1854    }
1855
1856    s->coalesced_flush_in_progress = false;
1857}
1858
1859static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
1860{
1861    if (!cpu->kvm_vcpu_dirty) {
1862        kvm_arch_get_registers(cpu);
1863        cpu->kvm_vcpu_dirty = true;
1864    }
1865}
1866
1867void kvm_cpu_synchronize_state(CPUState *cpu)
1868{
1869    if (!cpu->kvm_vcpu_dirty) {
1870        run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
1871    }
1872}
1873
1874static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
1875{
1876    kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1877    cpu->kvm_vcpu_dirty = false;
1878}
1879
1880void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1881{
1882    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
1883}
1884
1885static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
1886{
1887    kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1888    cpu->kvm_vcpu_dirty = false;
1889}
1890
1891void kvm_cpu_synchronize_post_init(CPUState *cpu)
1892{
1893    run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
1894}
1895
1896int kvm_cpu_exec(CPUState *cpu)
1897{
1898    struct kvm_run *run = cpu->kvm_run;
1899    int ret, run_ret;
1900
1901    DPRINTF("kvm_cpu_exec()\n");
1902
1903    if (kvm_arch_process_async_events(cpu)) {
1904        cpu->exit_request = 0;
1905        return EXCP_HLT;
1906    }
1907
1908    qemu_mutex_unlock_iothread();
1909
1910    do {
1911        MemTxAttrs attrs;
1912
1913        if (cpu->kvm_vcpu_dirty) {
1914            kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1915            cpu->kvm_vcpu_dirty = false;
1916        }
1917
1918        kvm_arch_pre_run(cpu, run);
1919        if (cpu->exit_request) {
1920            DPRINTF("interrupt exit requested\n");
1921            /*
1922             * KVM requires us to reenter the kernel after IO exits to complete
1923             * instruction emulation. This self-signal will ensure that we
1924             * leave ASAP again.
1925             */
1926            qemu_cpu_kick_self();
1927        }
1928
1929        run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1930
1931        attrs = kvm_arch_post_run(cpu, run);
1932
1933        if (run_ret < 0) {
1934            if (run_ret == -EINTR || run_ret == -EAGAIN) {
1935                DPRINTF("io window exit\n");
1936                ret = EXCP_INTERRUPT;
1937                break;
1938            }
1939            fprintf(stderr, "error: kvm run failed %s\n",
1940                    strerror(-run_ret));
1941#ifdef TARGET_PPC
1942            if (run_ret == -EBUSY) {
1943                fprintf(stderr,
1944                        "This is probably because your SMT is enabled.\n"
1945                        "VCPU can only run on primary threads with all "
1946                        "secondary threads offline.\n");
1947            }
1948#endif
1949            ret = -1;
1950            break;
1951        }
1952
1953        trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
1954        switch (run->exit_reason) {
1955        case KVM_EXIT_IO:
1956            DPRINTF("handle_io\n");
1957            /* Called outside BQL */
1958            kvm_handle_io(run->io.port, attrs,
1959                          (uint8_t *)run + run->io.data_offset,
1960                          run->io.direction,
1961                          run->io.size,
1962                          run->io.count);
1963            ret = 0;
1964            break;
1965        case KVM_EXIT_MMIO:
1966            DPRINTF("handle_mmio\n");
1967            /* Called outside BQL */
1968            address_space_rw(&address_space_memory,
1969                             run->mmio.phys_addr, attrs,
1970                             run->mmio.data,
1971                             run->mmio.len,
1972                             run->mmio.is_write);
1973            ret = 0;
1974            break;
1975        case KVM_EXIT_IRQ_WINDOW_OPEN:
1976            DPRINTF("irq_window_open\n");
1977            ret = EXCP_INTERRUPT;
1978            break;
1979        case KVM_EXIT_SHUTDOWN:
1980            DPRINTF("shutdown\n");
1981            qemu_system_reset_request();
1982            ret = EXCP_INTERRUPT;
1983            break;
1984        case KVM_EXIT_UNKNOWN:
1985            fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1986                    (uint64_t)run->hw.hardware_exit_reason);
1987            ret = -1;
1988            break;
1989        case KVM_EXIT_INTERNAL_ERROR:
1990            ret = kvm_handle_internal_error(cpu, run);
1991            break;
1992        case KVM_EXIT_SYSTEM_EVENT:
1993            switch (run->system_event.type) {
1994            case KVM_SYSTEM_EVENT_SHUTDOWN:
1995                qemu_system_shutdown_request();
1996                ret = EXCP_INTERRUPT;
1997                break;
1998            case KVM_SYSTEM_EVENT_RESET:
1999                qemu_system_reset_request();
2000                ret = EXCP_INTERRUPT;
2001                break;
2002            case KVM_SYSTEM_EVENT_CRASH:
2003                qemu_mutex_lock_iothread();
2004                qemu_system_guest_panicked();
2005                qemu_mutex_unlock_iothread();
2006                ret = 0;
2007                break;
2008            default:
2009                DPRINTF("kvm_arch_handle_exit\n");
2010                ret = kvm_arch_handle_exit(cpu, run);
2011                break;
2012            }
2013            break;
2014        default:
2015            DPRINTF("kvm_arch_handle_exit\n");
2016            ret = kvm_arch_handle_exit(cpu, run);
2017            break;
2018        }
2019    } while (ret == 0);
2020
2021    qemu_mutex_lock_iothread();
2022
2023    if (ret < 0) {
2024        cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
2025        vm_stop(RUN_STATE_INTERNAL_ERROR);
2026    }
2027
2028    cpu->exit_request = 0;
2029    return ret;
2030}
2031
2032int kvm_ioctl(KVMState *s, int type, ...)
2033{
2034    int ret;
2035    void *arg;
2036    va_list ap;
2037
2038    va_start(ap, type);
2039    arg = va_arg(ap, void *);
2040    va_end(ap);
2041
2042    trace_kvm_ioctl(type, arg);
2043    ret = ioctl(s->fd, type, arg);
2044    if (ret == -1) {
2045        ret = -errno;
2046    }
2047    return ret;
2048}
2049
2050int kvm_vm_ioctl(KVMState *s, int type, ...)
2051{
2052    int ret;
2053    void *arg;
2054    va_list ap;
2055
2056    va_start(ap, type);
2057    arg = va_arg(ap, void *);
2058    va_end(ap);
2059
2060    trace_kvm_vm_ioctl(type, arg);
2061    ret = ioctl(s->vmfd, type, arg);
2062    if (ret == -1) {
2063        ret = -errno;
2064    }
2065    return ret;
2066}
2067
2068int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2069{
2070    int ret;
2071    void *arg;
2072    va_list ap;
2073
2074    va_start(ap, type);
2075    arg = va_arg(ap, void *);
2076    va_end(ap);
2077
2078    trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2079    ret = ioctl(cpu->kvm_fd, type, arg);
2080    if (ret == -1) {
2081        ret = -errno;
2082    }
2083    return ret;
2084}
2085
2086int kvm_device_ioctl(int fd, int type, ...)
2087{
2088    int ret;
2089    void *arg;
2090    va_list ap;
2091
2092    va_start(ap, type);
2093    arg = va_arg(ap, void *);
2094    va_end(ap);
2095
2096    trace_kvm_device_ioctl(fd, type, arg);
2097    ret = ioctl(fd, type, arg);
2098    if (ret == -1) {
2099        ret = -errno;
2100    }
2101    return ret;
2102}
2103
2104int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2105{
2106    int ret;
2107    struct kvm_device_attr attribute = {
2108        .group = group,
2109        .attr = attr,
2110    };
2111
2112    if (!kvm_vm_attributes_allowed) {
2113        return 0;
2114    }
2115
2116    ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2117    /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2118    return ret ? 0 : 1;
2119}
2120
2121int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2122{
2123    struct kvm_device_attr attribute = {
2124        .group = group,
2125        .attr = attr,
2126        .flags = 0,
2127    };
2128
2129    return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2130}
2131
2132void kvm_device_access(int fd, int group, uint64_t attr,
2133                       void *val, bool write)
2134{
2135    struct kvm_device_attr kvmattr;
2136    int err;
2137
2138    kvmattr.flags = 0;
2139    kvmattr.group = group;
2140    kvmattr.attr = attr;
2141    kvmattr.addr = (uintptr_t)val;
2142
2143    err = kvm_device_ioctl(fd,
2144                           write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2145                           &kvmattr);
2146    if (err < 0) {
2147        error_report("KVM_%s_DEVICE_ATTR failed: %s",
2148                     write ? "SET" : "GET", strerror(-err));
2149        error_printf("Group %d attr 0x%016" PRIx64 "\n", group, attr);
2150        abort();
2151    }
2152}
2153
2154/* Return 1 on success, 0 on failure */
2155int kvm_has_sync_mmu(void)
2156{
2157    return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2158}
2159
2160int kvm_has_vcpu_events(void)
2161{
2162    return kvm_state->vcpu_events;
2163}
2164
2165int kvm_has_robust_singlestep(void)
2166{
2167    return kvm_state->robust_singlestep;
2168}
2169
2170int kvm_has_debugregs(void)
2171{
2172    return kvm_state->debugregs;
2173}
2174
2175int kvm_has_many_ioeventfds(void)
2176{
2177    if (!kvm_enabled()) {
2178        return 0;
2179    }
2180    return kvm_state->many_ioeventfds;
2181}
2182
2183int kvm_has_gsi_routing(void)
2184{
2185#ifdef KVM_CAP_IRQ_ROUTING
2186    return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2187#else
2188    return false;
2189#endif
2190}
2191
2192int kvm_has_intx_set_mask(void)
2193{
2194    return kvm_state->intx_set_mask;
2195}
2196
2197#ifdef KVM_CAP_SET_GUEST_DEBUG
2198struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2199                                                 target_ulong pc)
2200{
2201    struct kvm_sw_breakpoint *bp;
2202
2203    QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2204        if (bp->pc == pc) {
2205            return bp;
2206        }
2207    }
2208    return NULL;
2209}
2210
2211int kvm_sw_breakpoints_active(CPUState *cpu)
2212{
2213    return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2214}
2215
2216struct kvm_set_guest_debug_data {
2217    struct kvm_guest_debug dbg;
2218    int err;
2219};
2220
2221static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2222{
2223    struct kvm_set_guest_debug_data *dbg_data =
2224        (struct kvm_set_guest_debug_data *) data.host_ptr;
2225
2226    dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2227                                   &dbg_data->dbg);
2228}
2229
2230int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2231{
2232    struct kvm_set_guest_debug_data data;
2233
2234    data.dbg.control = reinject_trap;
2235
2236    if (cpu->singlestep_enabled) {
2237        data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2238    }
2239    kvm_arch_update_guest_debug(cpu, &data.dbg);
2240
2241    run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2242               RUN_ON_CPU_HOST_PTR(&data));
2243    return data.err;
2244}
2245
2246int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2247                          target_ulong len, int type)
2248{
2249    struct kvm_sw_breakpoint *bp;
2250    int err;
2251
2252    if (type == GDB_BREAKPOINT_SW) {
2253        bp = kvm_find_sw_breakpoint(cpu, addr);
2254        if (bp) {
2255            bp->use_count++;
2256            return 0;
2257        }
2258
2259        bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2260        bp->pc = addr;
2261        bp->use_count = 1;
2262        err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2263        if (err) {
2264            g_free(bp);
2265            return err;
2266        }
2267
2268        QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2269    } else {
2270        err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2271        if (err) {
2272            return err;
2273        }
2274    }
2275
2276    CPU_FOREACH(cpu) {
2277        err = kvm_update_guest_debug(cpu, 0);
2278        if (err) {
2279            return err;
2280        }
2281    }
2282    return 0;
2283}
2284
2285int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2286                          target_ulong len, int type)
2287{
2288    struct kvm_sw_breakpoint *bp;
2289    int err;
2290
2291    if (type == GDB_BREAKPOINT_SW) {
2292        bp = kvm_find_sw_breakpoint(cpu, addr);
2293        if (!bp) {
2294            return -ENOENT;
2295        }
2296
2297        if (bp->use_count > 1) {
2298            bp->use_count--;
2299            return 0;
2300        }
2301
2302        err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2303        if (err) {
2304            return err;
2305        }
2306
2307        QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2308        g_free(bp);
2309    } else {
2310        err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2311        if (err) {
2312            return err;
2313        }
2314    }
2315
2316    CPU_FOREACH(cpu) {
2317        err = kvm_update_guest_debug(cpu, 0);
2318        if (err) {
2319            return err;
2320        }
2321    }
2322    return 0;
2323}
2324
2325void kvm_remove_all_breakpoints(CPUState *cpu)
2326{
2327    struct kvm_sw_breakpoint *bp, *next;
2328    KVMState *s = cpu->kvm_state;
2329    CPUState *tmpcpu;
2330
2331    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2332        if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2333            /* Try harder to find a CPU that currently sees the breakpoint. */
2334            CPU_FOREACH(tmpcpu) {
2335                if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2336                    break;
2337                }
2338            }
2339        }
2340        QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2341        g_free(bp);
2342    }
2343    kvm_arch_remove_all_hw_breakpoints();
2344
2345    CPU_FOREACH(cpu) {
2346        kvm_update_guest_debug(cpu, 0);
2347    }
2348}
2349
2350#else /* !KVM_CAP_SET_GUEST_DEBUG */
2351
2352int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2353{
2354    return -EINVAL;
2355}
2356
2357int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2358                          target_ulong len, int type)
2359{
2360    return -EINVAL;
2361}
2362
2363int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2364                          target_ulong len, int type)
2365{
2366    return -EINVAL;
2367}
2368
2369void kvm_remove_all_breakpoints(CPUState *cpu)
2370{
2371}
2372#endif /* !KVM_CAP_SET_GUEST_DEBUG */
2373
2374int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2375{
2376    KVMState *s = kvm_state;
2377    struct kvm_signal_mask *sigmask;
2378    int r;
2379
2380    if (!sigset) {
2381        return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
2382    }
2383
2384    sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2385
2386    sigmask->len = s->sigmask_len;
2387    memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2388    r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2389    g_free(sigmask);
2390
2391    return r;
2392}
2393int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2394{
2395    return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
2396}
2397
2398int kvm_on_sigbus(int code, void *addr)
2399{
2400    return kvm_arch_on_sigbus(code, addr);
2401}
2402
2403int kvm_create_device(KVMState *s, uint64_t type, bool test)
2404{
2405    int ret;
2406    struct kvm_create_device create_dev;
2407
2408    create_dev.type = type;
2409    create_dev.fd = -1;
2410    create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2411
2412    if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2413        return -ENOTSUP;
2414    }
2415
2416    ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2417    if (ret) {
2418        return ret;
2419    }
2420
2421    return test ? 0 : create_dev.fd;
2422}
2423
2424bool kvm_device_supported(int vmfd, uint64_t type)
2425{
2426    struct kvm_create_device create_dev = {
2427        .type = type,
2428        .fd = -1,
2429        .flags = KVM_CREATE_DEVICE_TEST,
2430    };
2431
2432    if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2433        return false;
2434    }
2435
2436    return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2437}
2438
2439int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2440{
2441    struct kvm_one_reg reg;
2442    int r;
2443
2444    reg.id = id;
2445    reg.addr = (uintptr_t) source;
2446    r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2447    if (r) {
2448        trace_kvm_failed_reg_set(id, strerror(-r));
2449    }
2450    return r;
2451}
2452
2453int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2454{
2455    struct kvm_one_reg reg;
2456    int r;
2457
2458    reg.id = id;
2459    reg.addr = (uintptr_t) target;
2460    r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2461    if (r) {
2462        trace_kvm_failed_reg_get(id, strerror(-r));
2463    }
2464    return r;
2465}
2466
2467static void kvm_accel_class_init(ObjectClass *oc, void *data)
2468{
2469    AccelClass *ac = ACCEL_CLASS(oc);
2470    ac->name = "KVM";
2471    ac->init_machine = kvm_init;
2472    ac->allowed = &kvm_allowed;
2473}
2474
2475static const TypeInfo kvm_accel_type = {
2476    .name = TYPE_KVM_ACCEL,
2477    .parent = TYPE_ACCEL,
2478    .class_init = kvm_accel_class_init,
2479    .instance_size = sizeof(KVMState),
2480};
2481
2482static void kvm_type_init(void)
2483{
2484    type_register_static(&kvm_accel_type);
2485}
2486
2487type_init(kvm_type_init);
2488