qemu/target-arm/kvm32.c
<<
>>
Prefs
   1/*
   2 * ARM implementation of KVM hooks, 32 bit specific code.
   3 *
   4 * Copyright Christoffer Dall 2009-2010
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include <stdio.h>
  12#include <sys/types.h>
  13#include <sys/ioctl.h>
  14#include <sys/mman.h>
  15
  16#include <linux/kvm.h>
  17
  18#include "qemu-common.h"
  19#include "qemu/timer.h"
  20#include "sysemu/sysemu.h"
  21#include "sysemu/kvm.h"
  22#include "kvm_arm.h"
  23#include "cpu.h"
  24#include "internals.h"
  25#include "hw/arm/arm.h"
  26
  27static inline void set_feature(uint64_t *features, int feature)
  28{
  29    *features |= 1ULL << feature;
  30}
  31
  32bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
  33{
  34    /* Identify the feature bits corresponding to the host CPU, and
  35     * fill out the ARMHostCPUClass fields accordingly. To do this
  36     * we have to create a scratch VM, create a single CPU inside it,
  37     * and then query that CPU for the relevant ID registers.
  38     */
  39    int i, ret, fdarray[3];
  40    uint32_t midr, id_pfr0, id_isar0, mvfr1;
  41    uint64_t features = 0;
  42    /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
  43     * we know these will only support creating one kind of guest CPU,
  44     * which is its preferred CPU type.
  45     */
  46    static const uint32_t cpus_to_try[] = {
  47        QEMU_KVM_ARM_TARGET_CORTEX_A15,
  48        QEMU_KVM_ARM_TARGET_NONE
  49    };
  50    struct kvm_vcpu_init init;
  51    struct kvm_one_reg idregs[] = {
  52        {
  53            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
  54            | ENCODE_CP_REG(15, 0, 0, 0, 0, 0),
  55            .addr = (uintptr_t)&midr,
  56        },
  57        {
  58            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
  59            | ENCODE_CP_REG(15, 0, 0, 1, 0, 0),
  60            .addr = (uintptr_t)&id_pfr0,
  61        },
  62        {
  63            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
  64            | ENCODE_CP_REG(15, 0, 0, 2, 0, 0),
  65            .addr = (uintptr_t)&id_isar0,
  66        },
  67        {
  68            .id = KVM_REG_ARM | KVM_REG_SIZE_U32
  69            | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
  70            .addr = (uintptr_t)&mvfr1,
  71        },
  72    };
  73
  74    if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
  75        return false;
  76    }
  77
  78    ahcc->target = init.target;
  79
  80    /* This is not strictly blessed by the device tree binding docs yet,
  81     * but in practice the kernel does not care about this string so
  82     * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
  83     */
  84    ahcc->dtb_compatible = "arm,arm-v7";
  85
  86    for (i = 0; i < ARRAY_SIZE(idregs); i++) {
  87        ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
  88        if (ret) {
  89            break;
  90        }
  91    }
  92
  93    kvm_arm_destroy_scratch_host_vcpu(fdarray);
  94
  95    if (ret) {
  96        return false;
  97    }
  98
  99    /* Now we've retrieved all the register information we can
 100     * set the feature bits based on the ID register fields.
 101     * We can assume any KVM supporting CPU is at least a v7
 102     * with VFPv3, LPAE and the generic timers; this in turn implies
 103     * most of the other feature bits, but a few must be tested.
 104     */
 105    set_feature(&features, ARM_FEATURE_V7);
 106    set_feature(&features, ARM_FEATURE_VFP3);
 107    set_feature(&features, ARM_FEATURE_LPAE);
 108    set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
 109
 110    switch (extract32(id_isar0, 24, 4)) {
 111    case 1:
 112        set_feature(&features, ARM_FEATURE_THUMB_DIV);
 113        break;
 114    case 2:
 115        set_feature(&features, ARM_FEATURE_ARM_DIV);
 116        set_feature(&features, ARM_FEATURE_THUMB_DIV);
 117        break;
 118    default:
 119        break;
 120    }
 121
 122    if (extract32(id_pfr0, 12, 4) == 1) {
 123        set_feature(&features, ARM_FEATURE_THUMB2EE);
 124    }
 125    if (extract32(mvfr1, 20, 4) == 1) {
 126        set_feature(&features, ARM_FEATURE_VFP_FP16);
 127    }
 128    if (extract32(mvfr1, 12, 4) == 1) {
 129        set_feature(&features, ARM_FEATURE_NEON);
 130    }
 131    if (extract32(mvfr1, 28, 4) == 1) {
 132        /* FMAC support implies VFPv4 */
 133        set_feature(&features, ARM_FEATURE_VFP4);
 134    }
 135
 136    ahcc->features = features;
 137
 138    return true;
 139}
 140
 141static bool reg_syncs_via_tuple_list(uint64_t regidx)
 142{
 143    /* Return true if the regidx is a register we should synchronize
 144     * via the cpreg_tuples array (ie is not a core reg we sync by
 145     * hand in kvm_arch_get/put_registers())
 146     */
 147    switch (regidx & KVM_REG_ARM_COPROC_MASK) {
 148    case KVM_REG_ARM_CORE:
 149    case KVM_REG_ARM_VFP:
 150        return false;
 151    default:
 152        return true;
 153    }
 154}
 155
 156static int compare_u64(const void *a, const void *b)
 157{
 158    if (*(uint64_t *)a > *(uint64_t *)b) {
 159        return 1;
 160    }
 161    if (*(uint64_t *)a < *(uint64_t *)b) {
 162        return -1;
 163    }
 164    return 0;
 165}
 166
 167int kvm_arch_init_vcpu(CPUState *cs)
 168{
 169    int i, ret, arraylen;
 170    uint64_t v;
 171    struct kvm_one_reg r;
 172    struct kvm_reg_list rl;
 173    struct kvm_reg_list *rlp;
 174    ARMCPU *cpu = ARM_CPU(cs);
 175
 176    if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
 177        fprintf(stderr, "KVM is not supported for this guest CPU type\n");
 178        return -EINVAL;
 179    }
 180
 181    /* Determine init features for this CPU */
 182    memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
 183    if (cpu->start_powered_off) {
 184        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
 185    }
 186    if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
 187        cpu->psci_version = 2;
 188        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
 189    }
 190
 191    /* Do KVM_ARM_VCPU_INIT ioctl */
 192    ret = kvm_arm_vcpu_init(cs);
 193    if (ret) {
 194        return ret;
 195    }
 196
 197    /* Query the kernel to make sure it supports 32 VFP
 198     * registers: QEMU's "cortex-a15" CPU is always a
 199     * VFP-D32 core. The simplest way to do this is just
 200     * to attempt to read register d31.
 201     */
 202    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
 203    r.addr = (uintptr_t)(&v);
 204    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 205    if (ret == -ENOENT) {
 206        return -EINVAL;
 207    }
 208
 209    /* Populate the cpreg list based on the kernel's idea
 210     * of what registers exist (and throw away the TCG-created list).
 211     */
 212    rl.n = 0;
 213    ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
 214    if (ret != -E2BIG) {
 215        return ret;
 216    }
 217    rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
 218    rlp->n = rl.n;
 219    ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
 220    if (ret) {
 221        goto out;
 222    }
 223    /* Sort the list we get back from the kernel, since cpreg_tuples
 224     * must be in strictly ascending order.
 225     */
 226    qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
 227
 228    for (i = 0, arraylen = 0; i < rlp->n; i++) {
 229        if (!reg_syncs_via_tuple_list(rlp->reg[i])) {
 230            continue;
 231        }
 232        switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
 233        case KVM_REG_SIZE_U32:
 234        case KVM_REG_SIZE_U64:
 235            break;
 236        default:
 237            fprintf(stderr, "Can't handle size of register in kernel list\n");
 238            ret = -EINVAL;
 239            goto out;
 240        }
 241
 242        arraylen++;
 243    }
 244
 245    cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
 246    cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
 247    cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
 248                                         arraylen);
 249    cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
 250                                        arraylen);
 251    cpu->cpreg_array_len = arraylen;
 252    cpu->cpreg_vmstate_array_len = arraylen;
 253
 254    for (i = 0, arraylen = 0; i < rlp->n; i++) {
 255        uint64_t regidx = rlp->reg[i];
 256        if (!reg_syncs_via_tuple_list(regidx)) {
 257            continue;
 258        }
 259        cpu->cpreg_indexes[arraylen] = regidx;
 260        arraylen++;
 261    }
 262    assert(cpu->cpreg_array_len == arraylen);
 263
 264    if (!write_kvmstate_to_list(cpu)) {
 265        /* Shouldn't happen unless kernel is inconsistent about
 266         * what registers exist.
 267         */
 268        fprintf(stderr, "Initial read of kernel register state failed\n");
 269        ret = -EINVAL;
 270        goto out;
 271    }
 272
 273out:
 274    g_free(rlp);
 275    return ret;
 276}
 277
 278typedef struct Reg {
 279    uint64_t id;
 280    int offset;
 281} Reg;
 282
 283#define COREREG(KERNELNAME, QEMUFIELD)                       \
 284    {                                                        \
 285        KVM_REG_ARM | KVM_REG_SIZE_U32 |                     \
 286        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
 287        offsetof(CPUARMState, QEMUFIELD)                     \
 288    }
 289
 290#define VFPSYSREG(R)                                       \
 291    {                                                      \
 292        KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
 293        KVM_REG_ARM_VFP_##R,                               \
 294        offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R])      \
 295    }
 296
 297/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
 298#define COREREG64(KERNELNAME, QEMUFIELD)                     \
 299    {                                                        \
 300        KVM_REG_ARM | KVM_REG_SIZE_U32 |                     \
 301        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
 302        offsetoflow32(CPUARMState, QEMUFIELD)                \
 303    }
 304
 305static const Reg regs[] = {
 306    /* R0_usr .. R14_usr */
 307    COREREG(usr_regs.uregs[0], regs[0]),
 308    COREREG(usr_regs.uregs[1], regs[1]),
 309    COREREG(usr_regs.uregs[2], regs[2]),
 310    COREREG(usr_regs.uregs[3], regs[3]),
 311    COREREG(usr_regs.uregs[4], regs[4]),
 312    COREREG(usr_regs.uregs[5], regs[5]),
 313    COREREG(usr_regs.uregs[6], regs[6]),
 314    COREREG(usr_regs.uregs[7], regs[7]),
 315    COREREG(usr_regs.uregs[8], usr_regs[0]),
 316    COREREG(usr_regs.uregs[9], usr_regs[1]),
 317    COREREG(usr_regs.uregs[10], usr_regs[2]),
 318    COREREG(usr_regs.uregs[11], usr_regs[3]),
 319    COREREG(usr_regs.uregs[12], usr_regs[4]),
 320    COREREG(usr_regs.uregs[13], banked_r13[0]),
 321    COREREG(usr_regs.uregs[14], banked_r14[0]),
 322    /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
 323    COREREG(svc_regs[0], banked_r13[1]),
 324    COREREG(svc_regs[1], banked_r14[1]),
 325    COREREG64(svc_regs[2], banked_spsr[1]),
 326    COREREG(abt_regs[0], banked_r13[2]),
 327    COREREG(abt_regs[1], banked_r14[2]),
 328    COREREG64(abt_regs[2], banked_spsr[2]),
 329    COREREG(und_regs[0], banked_r13[3]),
 330    COREREG(und_regs[1], banked_r14[3]),
 331    COREREG64(und_regs[2], banked_spsr[3]),
 332    COREREG(irq_regs[0], banked_r13[4]),
 333    COREREG(irq_regs[1], banked_r14[4]),
 334    COREREG64(irq_regs[2], banked_spsr[4]),
 335    /* R8_fiq .. R14_fiq and SPSR_fiq */
 336    COREREG(fiq_regs[0], fiq_regs[0]),
 337    COREREG(fiq_regs[1], fiq_regs[1]),
 338    COREREG(fiq_regs[2], fiq_regs[2]),
 339    COREREG(fiq_regs[3], fiq_regs[3]),
 340    COREREG(fiq_regs[4], fiq_regs[4]),
 341    COREREG(fiq_regs[5], banked_r13[5]),
 342    COREREG(fiq_regs[6], banked_r14[5]),
 343    COREREG64(fiq_regs[7], banked_spsr[5]),
 344    /* R15 */
 345    COREREG(usr_regs.uregs[15], regs[15]),
 346    /* VFP system registers */
 347    VFPSYSREG(FPSID),
 348    VFPSYSREG(MVFR1),
 349    VFPSYSREG(MVFR0),
 350    VFPSYSREG(FPEXC),
 351    VFPSYSREG(FPINST),
 352    VFPSYSREG(FPINST2),
 353};
 354
 355int kvm_arch_put_registers(CPUState *cs, int level)
 356{
 357    ARMCPU *cpu = ARM_CPU(cs);
 358    CPUARMState *env = &cpu->env;
 359    struct kvm_one_reg r;
 360    int mode, bn;
 361    int ret, i;
 362    uint32_t cpsr, fpscr;
 363
 364    /* Make sure the banked regs are properly set */
 365    mode = env->uncached_cpsr & CPSR_M;
 366    bn = bank_number(mode);
 367    if (mode == ARM_CPU_MODE_FIQ) {
 368        memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
 369    } else {
 370        memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
 371    }
 372    env->banked_r13[bn] = env->regs[13];
 373    env->banked_r14[bn] = env->regs[14];
 374    env->banked_spsr[bn] = env->spsr;
 375
 376    /* Now we can safely copy stuff down to the kernel */
 377    for (i = 0; i < ARRAY_SIZE(regs); i++) {
 378        r.id = regs[i].id;
 379        r.addr = (uintptr_t)(env) + regs[i].offset;
 380        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 381        if (ret) {
 382            return ret;
 383        }
 384    }
 385
 386    /* Special cases which aren't a single CPUARMState field */
 387    cpsr = cpsr_read(env);
 388    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
 389        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
 390    r.addr = (uintptr_t)(&cpsr);
 391    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 392    if (ret) {
 393        return ret;
 394    }
 395
 396    /* VFP registers */
 397    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 398    for (i = 0; i < 32; i++) {
 399        r.addr = (uintptr_t)(&env->vfp.regs[i]);
 400        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 401        if (ret) {
 402            return ret;
 403        }
 404        r.id++;
 405    }
 406
 407    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
 408        KVM_REG_ARM_VFP_FPSCR;
 409    fpscr = vfp_get_fpscr(env);
 410    r.addr = (uintptr_t)&fpscr;
 411    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 412    if (ret) {
 413        return ret;
 414    }
 415
 416    /* Note that we do not call write_cpustate_to_list()
 417     * here, so we are only writing the tuple list back to
 418     * KVM. This is safe because nothing can change the
 419     * CPUARMState cp15 fields (in particular gdb accesses cannot)
 420     * and so there are no changes to sync. In fact syncing would
 421     * be wrong at this point: for a constant register where TCG and
 422     * KVM disagree about its value, the preceding write_list_to_cpustate()
 423     * would not have had any effect on the CPUARMState value (since the
 424     * register is read-only), and a write_cpustate_to_list() here would
 425     * then try to write the TCG value back into KVM -- this would either
 426     * fail or incorrectly change the value the guest sees.
 427     *
 428     * If we ever want to allow the user to modify cp15 registers via
 429     * the gdb stub, we would need to be more clever here (for instance
 430     * tracking the set of registers kvm_arch_get_registers() successfully
 431     * managed to update the CPUARMState with, and only allowing those
 432     * to be written back up into the kernel).
 433     */
 434    if (!write_list_to_kvmstate(cpu)) {
 435        return EINVAL;
 436    }
 437
 438    return ret;
 439}
 440
 441int kvm_arch_get_registers(CPUState *cs)
 442{
 443    ARMCPU *cpu = ARM_CPU(cs);
 444    CPUARMState *env = &cpu->env;
 445    struct kvm_one_reg r;
 446    int mode, bn;
 447    int ret, i;
 448    uint32_t cpsr, fpscr;
 449
 450    for (i = 0; i < ARRAY_SIZE(regs); i++) {
 451        r.id = regs[i].id;
 452        r.addr = (uintptr_t)(env) + regs[i].offset;
 453        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 454        if (ret) {
 455            return ret;
 456        }
 457    }
 458
 459    /* Special cases which aren't a single CPUARMState field */
 460    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
 461        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
 462    r.addr = (uintptr_t)(&cpsr);
 463    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 464    if (ret) {
 465        return ret;
 466    }
 467    cpsr_write(env, cpsr, 0xffffffff);
 468
 469    /* Make sure the current mode regs are properly set */
 470    mode = env->uncached_cpsr & CPSR_M;
 471    bn = bank_number(mode);
 472    if (mode == ARM_CPU_MODE_FIQ) {
 473        memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
 474    } else {
 475        memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
 476    }
 477    env->regs[13] = env->banked_r13[bn];
 478    env->regs[14] = env->banked_r14[bn];
 479    env->spsr = env->banked_spsr[bn];
 480
 481    /* VFP registers */
 482    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 483    for (i = 0; i < 32; i++) {
 484        r.addr = (uintptr_t)(&env->vfp.regs[i]);
 485        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 486        if (ret) {
 487            return ret;
 488        }
 489        r.id++;
 490    }
 491
 492    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
 493        KVM_REG_ARM_VFP_FPSCR;
 494    r.addr = (uintptr_t)&fpscr;
 495    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 496    if (ret) {
 497        return ret;
 498    }
 499    vfp_set_fpscr(env, fpscr);
 500
 501    if (!write_kvmstate_to_list(cpu)) {
 502        return EINVAL;
 503    }
 504    /* Note that it's OK to have registers which aren't in CPUState,
 505     * so we can ignore a failure return here.
 506     */
 507    write_list_to_cpustate(cpu);
 508
 509    return 0;
 510}
 511
 512void kvm_arm_reset_vcpu(ARMCPU *cpu)
 513{
 514    /* Re-init VCPU so that all registers are set to
 515     * their respective reset values.
 516     */
 517    kvm_arm_vcpu_init(CPU(cpu));
 518    write_kvmstate_to_list(cpu);
 519}
 520