qemu/target/arm/kvm.c
<<
>>
Prefs
   1/*
   2 * ARM implementation of KVM hooks
   3 *
   4 * Copyright Christoffer Dall 2009-2010
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include "qemu/osdep.h"
  12#include <sys/ioctl.h>
  13
  14#include <linux/kvm.h>
  15
  16#include "qemu-common.h"
  17#include "qemu/timer.h"
  18#include "qemu/error-report.h"
  19#include "sysemu/sysemu.h"
  20#include "sysemu/kvm.h"
  21#include "kvm_arm.h"
  22#include "cpu.h"
  23#include "trace.h"
  24#include "internals.h"
  25#include "hw/arm/arm.h"
  26#include "hw/pci/pci.h"
  27#include "exec/memattrs.h"
  28#include "exec/address-spaces.h"
  29#include "hw/boards.h"
  30#include "qemu/log.h"
  31
  32const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
  33    KVM_CAP_LAST_INFO
  34};
  35
  36static bool cap_has_mp_state;
  37static bool cap_has_inject_serror_esr;
  38
  39static ARMHostCPUFeatures arm_host_cpu_features;
  40
  41int kvm_arm_vcpu_init(CPUState *cs)
  42{
  43    ARMCPU *cpu = ARM_CPU(cs);
  44    struct kvm_vcpu_init init;
  45
  46    init.target = cpu->kvm_target;
  47    memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
  48
  49    return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
  50}
  51
  52void kvm_arm_init_serror_injection(CPUState *cs)
  53{
  54    cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
  55                                    KVM_CAP_ARM_INJECT_SERROR_ESR);
  56}
  57
  58bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
  59                                      int *fdarray,
  60                                      struct kvm_vcpu_init *init)
  61{
  62    int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
  63
  64    kvmfd = qemu_open("/dev/kvm", O_RDWR);
  65    if (kvmfd < 0) {
  66        goto err;
  67    }
  68    vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
  69    if (vmfd < 0) {
  70        goto err;
  71    }
  72    cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
  73    if (cpufd < 0) {
  74        goto err;
  75    }
  76
  77    if (!init) {
  78        /* Caller doesn't want the VCPU to be initialized, so skip it */
  79        goto finish;
  80    }
  81
  82    ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
  83    if (ret >= 0) {
  84        ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
  85        if (ret < 0) {
  86            goto err;
  87        }
  88    } else if (cpus_to_try) {
  89        /* Old kernel which doesn't know about the
  90         * PREFERRED_TARGET ioctl: we know it will only support
  91         * creating one kind of guest CPU which is its preferred
  92         * CPU type.
  93         */
  94        while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
  95            init->target = *cpus_to_try++;
  96            memset(init->features, 0, sizeof(init->features));
  97            ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
  98            if (ret >= 0) {
  99                break;
 100            }
 101        }
 102        if (ret < 0) {
 103            goto err;
 104        }
 105    } else {
 106        /* Treat a NULL cpus_to_try argument the same as an empty
 107         * list, which means we will fail the call since this must
 108         * be an old kernel which doesn't support PREFERRED_TARGET.
 109         */
 110        goto err;
 111    }
 112
 113finish:
 114    fdarray[0] = kvmfd;
 115    fdarray[1] = vmfd;
 116    fdarray[2] = cpufd;
 117
 118    return true;
 119
 120err:
 121    if (cpufd >= 0) {
 122        close(cpufd);
 123    }
 124    if (vmfd >= 0) {
 125        close(vmfd);
 126    }
 127    if (kvmfd >= 0) {
 128        close(kvmfd);
 129    }
 130
 131    return false;
 132}
 133
 134void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
 135{
 136    int i;
 137
 138    for (i = 2; i >= 0; i--) {
 139        close(fdarray[i]);
 140    }
 141}
 142
 143void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
 144{
 145    CPUARMState *env = &cpu->env;
 146
 147    if (!arm_host_cpu_features.dtb_compatible) {
 148        if (!kvm_enabled() ||
 149            !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
 150            /* We can't report this error yet, so flag that we need to
 151             * in arm_cpu_realizefn().
 152             */
 153            cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
 154            cpu->host_cpu_probe_failed = true;
 155            return;
 156        }
 157    }
 158
 159    cpu->kvm_target = arm_host_cpu_features.target;
 160    cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible;
 161    cpu->isar = arm_host_cpu_features.isar;
 162    env->features = arm_host_cpu_features.features;
 163}
 164
 165int kvm_arch_init(MachineState *ms, KVMState *s)
 166{
 167    /* For ARM interrupt delivery is always asynchronous,
 168     * whether we are using an in-kernel VGIC or not.
 169     */
 170    kvm_async_interrupts_allowed = true;
 171
 172    /*
 173     * PSCI wakes up secondary cores, so we always need to
 174     * have vCPUs waiting in kernel space
 175     */
 176    kvm_halt_in_kernel_allowed = true;
 177
 178    cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
 179
 180    return 0;
 181}
 182
 183unsigned long kvm_arch_vcpu_id(CPUState *cpu)
 184{
 185    return cpu->cpu_index;
 186}
 187
 188/* We track all the KVM devices which need their memory addresses
 189 * passing to the kernel in a list of these structures.
 190 * When board init is complete we run through the list and
 191 * tell the kernel the base addresses of the memory regions.
 192 * We use a MemoryListener to track mapping and unmapping of
 193 * the regions during board creation, so the board models don't
 194 * need to do anything special for the KVM case.
 195 *
 196 * Sometimes the address must be OR'ed with some other fields
 197 * (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
 198 * @kda_addr_ormask aims at storing the value of those fields.
 199 */
 200typedef struct KVMDevice {
 201    struct kvm_arm_device_addr kda;
 202    struct kvm_device_attr kdattr;
 203    uint64_t kda_addr_ormask;
 204    MemoryRegion *mr;
 205    QSLIST_ENTRY(KVMDevice) entries;
 206    int dev_fd;
 207} KVMDevice;
 208
 209static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
 210
 211static void kvm_arm_devlistener_add(MemoryListener *listener,
 212                                    MemoryRegionSection *section)
 213{
 214    KVMDevice *kd;
 215
 216    QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
 217        if (section->mr == kd->mr) {
 218            kd->kda.addr = section->offset_within_address_space;
 219        }
 220    }
 221}
 222
 223static void kvm_arm_devlistener_del(MemoryListener *listener,
 224                                    MemoryRegionSection *section)
 225{
 226    KVMDevice *kd;
 227
 228    QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
 229        if (section->mr == kd->mr) {
 230            kd->kda.addr = -1;
 231        }
 232    }
 233}
 234
 235static MemoryListener devlistener = {
 236    .region_add = kvm_arm_devlistener_add,
 237    .region_del = kvm_arm_devlistener_del,
 238};
 239
 240static void kvm_arm_set_device_addr(KVMDevice *kd)
 241{
 242    struct kvm_device_attr *attr = &kd->kdattr;
 243    int ret;
 244
 245    /* If the device control API is available and we have a device fd on the
 246     * KVMDevice struct, let's use the newer API
 247     */
 248    if (kd->dev_fd >= 0) {
 249        uint64_t addr = kd->kda.addr;
 250
 251        addr |= kd->kda_addr_ormask;
 252        attr->addr = (uintptr_t)&addr;
 253        ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
 254    } else {
 255        ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
 256    }
 257
 258    if (ret < 0) {
 259        fprintf(stderr, "Failed to set device address: %s\n",
 260                strerror(-ret));
 261        abort();
 262    }
 263}
 264
 265static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
 266{
 267    KVMDevice *kd, *tkd;
 268
 269    QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
 270        if (kd->kda.addr != -1) {
 271            kvm_arm_set_device_addr(kd);
 272        }
 273        memory_region_unref(kd->mr);
 274        QSLIST_REMOVE_HEAD(&kvm_devices_head, entries);
 275        g_free(kd);
 276    }
 277    memory_listener_unregister(&devlistener);
 278}
 279
 280static Notifier notify = {
 281    .notify = kvm_arm_machine_init_done,
 282};
 283
 284void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
 285                             uint64_t attr, int dev_fd, uint64_t addr_ormask)
 286{
 287    KVMDevice *kd;
 288
 289    if (!kvm_irqchip_in_kernel()) {
 290        return;
 291    }
 292
 293    if (QSLIST_EMPTY(&kvm_devices_head)) {
 294        memory_listener_register(&devlistener, &address_space_memory);
 295        qemu_add_machine_init_done_notifier(&notify);
 296    }
 297    kd = g_new0(KVMDevice, 1);
 298    kd->mr = mr;
 299    kd->kda.id = devid;
 300    kd->kda.addr = -1;
 301    kd->kdattr.flags = 0;
 302    kd->kdattr.group = group;
 303    kd->kdattr.attr = attr;
 304    kd->dev_fd = dev_fd;
 305    kd->kda_addr_ormask = addr_ormask;
 306    QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
 307    memory_region_ref(kd->mr);
 308}
 309
 310static int compare_u64(const void *a, const void *b)
 311{
 312    if (*(uint64_t *)a > *(uint64_t *)b) {
 313        return 1;
 314    }
 315    if (*(uint64_t *)a < *(uint64_t *)b) {
 316        return -1;
 317    }
 318    return 0;
 319}
 320
 321/* Initialize the ARMCPU cpreg list according to the kernel's
 322 * definition of what CPU registers it knows about (and throw away
 323 * the previous TCG-created cpreg list).
 324 */
 325int kvm_arm_init_cpreg_list(ARMCPU *cpu)
 326{
 327    struct kvm_reg_list rl;
 328    struct kvm_reg_list *rlp;
 329    int i, ret, arraylen;
 330    CPUState *cs = CPU(cpu);
 331
 332    rl.n = 0;
 333    ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
 334    if (ret != -E2BIG) {
 335        return ret;
 336    }
 337    rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
 338    rlp->n = rl.n;
 339    ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
 340    if (ret) {
 341        goto out;
 342    }
 343    /* Sort the list we get back from the kernel, since cpreg_tuples
 344     * must be in strictly ascending order.
 345     */
 346    qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
 347
 348    for (i = 0, arraylen = 0; i < rlp->n; i++) {
 349        if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
 350            continue;
 351        }
 352        switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
 353        case KVM_REG_SIZE_U32:
 354        case KVM_REG_SIZE_U64:
 355            break;
 356        default:
 357            fprintf(stderr, "Can't handle size of register in kernel list\n");
 358            ret = -EINVAL;
 359            goto out;
 360        }
 361
 362        arraylen++;
 363    }
 364
 365    cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
 366    cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
 367    cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
 368                                         arraylen);
 369    cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
 370                                        arraylen);
 371    cpu->cpreg_array_len = arraylen;
 372    cpu->cpreg_vmstate_array_len = arraylen;
 373
 374    for (i = 0, arraylen = 0; i < rlp->n; i++) {
 375        uint64_t regidx = rlp->reg[i];
 376        if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
 377            continue;
 378        }
 379        cpu->cpreg_indexes[arraylen] = regidx;
 380        arraylen++;
 381    }
 382    assert(cpu->cpreg_array_len == arraylen);
 383
 384    if (!write_kvmstate_to_list(cpu)) {
 385        /* Shouldn't happen unless kernel is inconsistent about
 386         * what registers exist.
 387         */
 388        fprintf(stderr, "Initial read of kernel register state failed\n");
 389        ret = -EINVAL;
 390        goto out;
 391    }
 392
 393out:
 394    g_free(rlp);
 395    return ret;
 396}
 397
 398bool write_kvmstate_to_list(ARMCPU *cpu)
 399{
 400    CPUState *cs = CPU(cpu);
 401    int i;
 402    bool ok = true;
 403
 404    for (i = 0; i < cpu->cpreg_array_len; i++) {
 405        struct kvm_one_reg r;
 406        uint64_t regidx = cpu->cpreg_indexes[i];
 407        uint32_t v32;
 408        int ret;
 409
 410        r.id = regidx;
 411
 412        switch (regidx & KVM_REG_SIZE_MASK) {
 413        case KVM_REG_SIZE_U32:
 414            r.addr = (uintptr_t)&v32;
 415            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 416            if (!ret) {
 417                cpu->cpreg_values[i] = v32;
 418            }
 419            break;
 420        case KVM_REG_SIZE_U64:
 421            r.addr = (uintptr_t)(cpu->cpreg_values + i);
 422            ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 423            break;
 424        default:
 425            abort();
 426        }
 427        if (ret) {
 428            ok = false;
 429        }
 430    }
 431    return ok;
 432}
 433
 434bool write_list_to_kvmstate(ARMCPU *cpu, int level)
 435{
 436    CPUState *cs = CPU(cpu);
 437    int i;
 438    bool ok = true;
 439
 440    for (i = 0; i < cpu->cpreg_array_len; i++) {
 441        struct kvm_one_reg r;
 442        uint64_t regidx = cpu->cpreg_indexes[i];
 443        uint32_t v32;
 444        int ret;
 445
 446        if (kvm_arm_cpreg_level(regidx) > level) {
 447            continue;
 448        }
 449
 450        r.id = regidx;
 451        switch (regidx & KVM_REG_SIZE_MASK) {
 452        case KVM_REG_SIZE_U32:
 453            v32 = cpu->cpreg_values[i];
 454            r.addr = (uintptr_t)&v32;
 455            break;
 456        case KVM_REG_SIZE_U64:
 457            r.addr = (uintptr_t)(cpu->cpreg_values + i);
 458            break;
 459        default:
 460            abort();
 461        }
 462        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 463        if (ret) {
 464            /* We might fail for "unknown register" and also for
 465             * "you tried to set a register which is constant with
 466             * a different value from what it actually contains".
 467             */
 468            ok = false;
 469        }
 470    }
 471    return ok;
 472}
 473
 474void kvm_arm_reset_vcpu(ARMCPU *cpu)
 475{
 476    int ret;
 477
 478    /* Re-init VCPU so that all registers are set to
 479     * their respective reset values.
 480     */
 481    ret = kvm_arm_vcpu_init(CPU(cpu));
 482    if (ret < 0) {
 483        fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
 484        abort();
 485    }
 486    if (!write_kvmstate_to_list(cpu)) {
 487        fprintf(stderr, "write_kvmstate_to_list failed\n");
 488        abort();
 489    }
 490}
 491
 492/*
 493 * Update KVM's MP_STATE based on what QEMU thinks it is
 494 */
 495int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
 496{
 497    if (cap_has_mp_state) {
 498        struct kvm_mp_state mp_state = {
 499            .mp_state = (cpu->power_state == PSCI_OFF) ?
 500            KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
 501        };
 502        int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
 503        if (ret) {
 504            fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
 505                    __func__, ret, strerror(-ret));
 506            return -1;
 507        }
 508    }
 509
 510    return 0;
 511}
 512
 513/*
 514 * Sync the KVM MP_STATE into QEMU
 515 */
 516int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
 517{
 518    if (cap_has_mp_state) {
 519        struct kvm_mp_state mp_state;
 520        int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
 521        if (ret) {
 522            fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
 523                    __func__, ret, strerror(-ret));
 524            abort();
 525        }
 526        cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
 527            PSCI_OFF : PSCI_ON;
 528    }
 529
 530    return 0;
 531}
 532
 533int kvm_put_vcpu_events(ARMCPU *cpu)
 534{
 535    CPUARMState *env = &cpu->env;
 536    struct kvm_vcpu_events events;
 537    int ret;
 538
 539    if (!kvm_has_vcpu_events()) {
 540        return 0;
 541    }
 542
 543    memset(&events, 0, sizeof(events));
 544    events.exception.serror_pending = env->serror.pending;
 545
 546    /* Inject SError to guest with specified syndrome if host kernel
 547     * supports it, otherwise inject SError without syndrome.
 548     */
 549    if (cap_has_inject_serror_esr) {
 550        events.exception.serror_has_esr = env->serror.has_esr;
 551        events.exception.serror_esr = env->serror.esr;
 552    }
 553
 554    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
 555    if (ret) {
 556        error_report("failed to put vcpu events");
 557    }
 558
 559    return ret;
 560}
 561
 562int kvm_get_vcpu_events(ARMCPU *cpu)
 563{
 564    CPUARMState *env = &cpu->env;
 565    struct kvm_vcpu_events events;
 566    int ret;
 567
 568    if (!kvm_has_vcpu_events()) {
 569        return 0;
 570    }
 571
 572    memset(&events, 0, sizeof(events));
 573    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
 574    if (ret) {
 575        error_report("failed to get vcpu events");
 576        return ret;
 577    }
 578
 579    env->serror.pending = events.exception.serror_pending;
 580    env->serror.has_esr = events.exception.serror_has_esr;
 581    env->serror.esr = events.exception.serror_esr;
 582
 583    return 0;
 584}
 585
 586void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
 587{
 588}
 589
 590MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
 591{
 592    ARMCPU *cpu;
 593    uint32_t switched_level;
 594
 595    if (kvm_irqchip_in_kernel()) {
 596        /*
 597         * We only need to sync timer states with user-space interrupt
 598         * controllers, so return early and save cycles if we don't.
 599         */
 600        return MEMTXATTRS_UNSPECIFIED;
 601    }
 602
 603    cpu = ARM_CPU(cs);
 604
 605    /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
 606    if (run->s.regs.device_irq_level != cpu->device_irq_level) {
 607        switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level;
 608
 609        qemu_mutex_lock_iothread();
 610
 611        if (switched_level & KVM_ARM_DEV_EL1_VTIMER) {
 612            qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT],
 613                         !!(run->s.regs.device_irq_level &
 614                            KVM_ARM_DEV_EL1_VTIMER));
 615            switched_level &= ~KVM_ARM_DEV_EL1_VTIMER;
 616        }
 617
 618        if (switched_level & KVM_ARM_DEV_EL1_PTIMER) {
 619            qemu_set_irq(cpu->gt_timer_outputs[GTIMER_PHYS],
 620                         !!(run->s.regs.device_irq_level &
 621                            KVM_ARM_DEV_EL1_PTIMER));
 622            switched_level &= ~KVM_ARM_DEV_EL1_PTIMER;
 623        }
 624
 625        if (switched_level & KVM_ARM_DEV_PMU) {
 626            qemu_set_irq(cpu->pmu_interrupt,
 627                         !!(run->s.regs.device_irq_level & KVM_ARM_DEV_PMU));
 628            switched_level &= ~KVM_ARM_DEV_PMU;
 629        }
 630
 631        if (switched_level) {
 632            qemu_log_mask(LOG_UNIMP, "%s: unhandled in-kernel device IRQ %x\n",
 633                          __func__, switched_level);
 634        }
 635
 636        /* We also mark unknown levels as processed to not waste cycles */
 637        cpu->device_irq_level = run->s.regs.device_irq_level;
 638        qemu_mutex_unlock_iothread();
 639    }
 640
 641    return MEMTXATTRS_UNSPECIFIED;
 642}
 643
 644
 645int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
 646{
 647    int ret = 0;
 648
 649    switch (run->exit_reason) {
 650    case KVM_EXIT_DEBUG:
 651        if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
 652            ret = EXCP_DEBUG;
 653        } /* otherwise return to guest */
 654        break;
 655    default:
 656        qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
 657                      __func__, run->exit_reason);
 658        break;
 659    }
 660    return ret;
 661}
 662
 663bool kvm_arch_stop_on_emulation_error(CPUState *cs)
 664{
 665    return true;
 666}
 667
 668int kvm_arch_process_async_events(CPUState *cs)
 669{
 670    return 0;
 671}
 672
 673/* The #ifdef protections are until 32bit headers are imported and can
 674 * be removed once both 32 and 64 bit reach feature parity.
 675 */
 676void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
 677{
 678#ifdef KVM_GUESTDBG_USE_SW_BP
 679    if (kvm_sw_breakpoints_active(cs)) {
 680        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
 681    }
 682#endif
 683#ifdef KVM_GUESTDBG_USE_HW
 684    if (kvm_arm_hw_debug_active(cs)) {
 685        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
 686        kvm_arm_copy_hw_debug_data(&dbg->arch);
 687    }
 688#endif
 689}
 690
 691void kvm_arch_init_irq_routing(KVMState *s)
 692{
 693}
 694
 695int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
 696{
 697     if (machine_kernel_irqchip_split(ms)) {
 698         perror("-machine kernel_irqchip=split is not supported on ARM.");
 699         exit(1);
 700    }
 701
 702    /* If we can create the VGIC using the newer device control API, we
 703     * let the device do this when it initializes itself, otherwise we
 704     * fall back to the old API */
 705    return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
 706}
 707
 708int kvm_arm_vgic_probe(void)
 709{
 710    if (kvm_create_device(kvm_state,
 711                          KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
 712        return 3;
 713    } else if (kvm_create_device(kvm_state,
 714                                 KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
 715        return 2;
 716    } else {
 717        return 0;
 718    }
 719}
 720
 721int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
 722                             uint64_t address, uint32_t data, PCIDevice *dev)
 723{
 724    AddressSpace *as = pci_device_iommu_address_space(dev);
 725    hwaddr xlat, len, doorbell_gpa;
 726    MemoryRegionSection mrs;
 727    MemoryRegion *mr;
 728    int ret = 1;
 729
 730    if (as == &address_space_memory) {
 731        return 0;
 732    }
 733
 734    /* MSI doorbell address is translated by an IOMMU */
 735
 736    rcu_read_lock();
 737    mr = address_space_translate(as, address, &xlat, &len, true,
 738                                 MEMTXATTRS_UNSPECIFIED);
 739    if (!mr) {
 740        goto unlock;
 741    }
 742    mrs = memory_region_find(mr, xlat, 1);
 743    if (!mrs.mr) {
 744        goto unlock;
 745    }
 746
 747    doorbell_gpa = mrs.offset_within_address_space;
 748    memory_region_unref(mrs.mr);
 749
 750    route->u.msi.address_lo = doorbell_gpa;
 751    route->u.msi.address_hi = doorbell_gpa >> 32;
 752
 753    trace_kvm_arm_fixup_msi_route(address, doorbell_gpa);
 754
 755    ret = 0;
 756
 757unlock:
 758    rcu_read_unlock();
 759    return ret;
 760}
 761
 762int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
 763                                int vector, PCIDevice *dev)
 764{
 765    return 0;
 766}
 767
 768int kvm_arch_release_virq_post(int virq)
 769{
 770    return 0;
 771}
 772
 773int kvm_arch_msi_data_to_gsi(uint32_t data)
 774{
 775    return (data - 32) & 0xffff;
 776}
 777