qemu/target-arm/kvm64.c
<<
>>
Prefs
   1/*
   2 * ARM implementation of KVM hooks, 64 bit specific code
   3 *
   4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include <stdio.h>
  12#include <sys/types.h>
  13#include <sys/ioctl.h>
  14#include <sys/mman.h>
  15
  16#include <linux/kvm.h>
  17
  18#include "config-host.h"
  19#include "qemu-common.h"
  20#include "qemu/timer.h"
  21#include "sysemu/sysemu.h"
  22#include "sysemu/kvm.h"
  23#include "kvm_arm.h"
  24#include "cpu.h"
  25#include "internals.h"
  26#include "hw/arm/arm.h"
  27
  28static inline void set_feature(uint64_t *features, int feature)
  29{
  30    *features |= 1ULL << feature;
  31}
  32
  33bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
  34{
  35    /* Identify the feature bits corresponding to the host CPU, and
  36     * fill out the ARMHostCPUClass fields accordingly. To do this
  37     * we have to create a scratch VM, create a single CPU inside it,
  38     * and then query that CPU for the relevant ID registers.
  39     * For AArch64 we currently don't care about ID registers at
  40     * all; we just want to know the CPU type.
  41     */
  42    int fdarray[3];
  43    uint64_t features = 0;
  44    /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
  45     * we know these will only support creating one kind of guest CPU,
  46     * which is its preferred CPU type. Fortunately these old kernels
  47     * support only a very limited number of CPUs.
  48     */
  49    static const uint32_t cpus_to_try[] = {
  50        KVM_ARM_TARGET_AEM_V8,
  51        KVM_ARM_TARGET_FOUNDATION_V8,
  52        KVM_ARM_TARGET_CORTEX_A57,
  53        QEMU_KVM_ARM_TARGET_NONE
  54    };
  55    struct kvm_vcpu_init init;
  56
  57    if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
  58        return false;
  59    }
  60
  61    ahcc->target = init.target;
  62    ahcc->dtb_compatible = "arm,arm-v8";
  63
  64    kvm_arm_destroy_scratch_host_vcpu(fdarray);
  65
  66   /* We can assume any KVM supporting CPU is at least a v8
  67     * with VFPv4+Neon; this in turn implies most of the other
  68     * feature bits.
  69     */
  70    set_feature(&features, ARM_FEATURE_V8);
  71    set_feature(&features, ARM_FEATURE_VFP4);
  72    set_feature(&features, ARM_FEATURE_NEON);
  73    set_feature(&features, ARM_FEATURE_AARCH64);
  74
  75    ahcc->features = features;
  76
  77    return true;
  78}
  79
  80int kvm_arch_init_vcpu(CPUState *cs)
  81{
  82    int ret;
  83    ARMCPU *cpu = ARM_CPU(cs);
  84
  85    if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
  86        !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
  87        fprintf(stderr, "KVM is not supported for this guest CPU type\n");
  88        return -EINVAL;
  89    }
  90
  91    /* Determine init features for this CPU */
  92    memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
  93    if (cpu->start_powered_off) {
  94        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
  95    }
  96    if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
  97        cpu->psci_version = 2;
  98        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
  99    }
 100    if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 101        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
 102    }
 103
 104    /* Do KVM_ARM_VCPU_INIT ioctl */
 105    ret = kvm_arm_vcpu_init(cs);
 106    if (ret) {
 107        return ret;
 108    }
 109
 110    return kvm_arm_init_cpreg_list(cpu);
 111}
 112
 113bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
 114{
 115    /* Return true if the regidx is a register we should synchronize
 116     * via the cpreg_tuples array (ie is not a core reg we sync by
 117     * hand in kvm_arch_get/put_registers())
 118     */
 119    switch (regidx & KVM_REG_ARM_COPROC_MASK) {
 120    case KVM_REG_ARM_CORE:
 121        return false;
 122    default:
 123        return true;
 124    }
 125}
 126
 127#define AARCH64_CORE_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
 128                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 129
 130#define AARCH64_SIMD_CORE_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
 131                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 132
 133#define AARCH64_SIMD_CTRL_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
 134                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 135
 136int kvm_arch_put_registers(CPUState *cs, int level)
 137{
 138    struct kvm_one_reg reg;
 139    uint32_t fpr;
 140    uint64_t val;
 141    int i;
 142    int ret;
 143    unsigned int el;
 144
 145    ARMCPU *cpu = ARM_CPU(cs);
 146    CPUARMState *env = &cpu->env;
 147
 148    /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
 149     * AArch64 registers before pushing them out to 64-bit KVM.
 150     */
 151    if (!is_a64(env)) {
 152        aarch64_sync_32_to_64(env);
 153    }
 154
 155    for (i = 0; i < 31; i++) {
 156        reg.id = AARCH64_CORE_REG(regs.regs[i]);
 157        reg.addr = (uintptr_t) &env->xregs[i];
 158        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 159        if (ret) {
 160            return ret;
 161        }
 162    }
 163
 164    /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
 165     * QEMU side we keep the current SP in xregs[31] as well.
 166     */
 167    aarch64_save_sp(env, 1);
 168
 169    reg.id = AARCH64_CORE_REG(regs.sp);
 170    reg.addr = (uintptr_t) &env->sp_el[0];
 171    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 172    if (ret) {
 173        return ret;
 174    }
 175
 176    reg.id = AARCH64_CORE_REG(sp_el1);
 177    reg.addr = (uintptr_t) &env->sp_el[1];
 178    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 179    if (ret) {
 180        return ret;
 181    }
 182
 183    /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
 184    if (is_a64(env)) {
 185        val = pstate_read(env);
 186    } else {
 187        val = cpsr_read(env);
 188    }
 189    reg.id = AARCH64_CORE_REG(regs.pstate);
 190    reg.addr = (uintptr_t) &val;
 191    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 192    if (ret) {
 193        return ret;
 194    }
 195
 196    reg.id = AARCH64_CORE_REG(regs.pc);
 197    reg.addr = (uintptr_t) &env->pc;
 198    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 199    if (ret) {
 200        return ret;
 201    }
 202
 203    reg.id = AARCH64_CORE_REG(elr_el1);
 204    reg.addr = (uintptr_t) &env->elr_el[1];
 205    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 206    if (ret) {
 207        return ret;
 208    }
 209
 210    /* Saved Program State Registers
 211     *
 212     * Before we restore from the banked_spsr[] array we need to
 213     * ensure that any modifications to env->spsr are correctly
 214     * reflected in the banks.
 215     */
 216    el = arm_current_el(env);
 217    if (el > 0 && !is_a64(env)) {
 218        i = bank_number(env->uncached_cpsr & CPSR_M);
 219        env->banked_spsr[i] = env->spsr;
 220    }
 221
 222    /* KVM 0-4 map to QEMU banks 1-5 */
 223    for (i = 0; i < KVM_NR_SPSR; i++) {
 224        reg.id = AARCH64_CORE_REG(spsr[i]);
 225        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
 226        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 227        if (ret) {
 228            return ret;
 229        }
 230    }
 231
 232    /* Advanced SIMD and FP registers
 233     * We map Qn = regs[2n+1]:regs[2n]
 234     */
 235    for (i = 0; i < 32; i++) {
 236        int rd = i << 1;
 237        uint64_t fp_val[2];
 238#ifdef HOST_WORDS_BIGENDIAN
 239        fp_val[0] = env->vfp.regs[rd + 1];
 240        fp_val[1] = env->vfp.regs[rd];
 241#else
 242        fp_val[1] = env->vfp.regs[rd + 1];
 243        fp_val[0] = env->vfp.regs[rd];
 244#endif
 245        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
 246        reg.addr = (uintptr_t)(&fp_val);
 247        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 248        if (ret) {
 249            return ret;
 250        }
 251    }
 252
 253    reg.addr = (uintptr_t)(&fpr);
 254    fpr = vfp_get_fpsr(env);
 255    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
 256    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 257    if (ret) {
 258        return ret;
 259    }
 260
 261    fpr = vfp_get_fpcr(env);
 262    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
 263    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 264    if (ret) {
 265        return ret;
 266    }
 267
 268    if (!write_list_to_kvmstate(cpu)) {
 269        return EINVAL;
 270    }
 271
 272    kvm_arm_sync_mpstate_to_kvm(cpu);
 273
 274    return ret;
 275}
 276
 277int kvm_arch_get_registers(CPUState *cs)
 278{
 279    struct kvm_one_reg reg;
 280    uint64_t val;
 281    uint32_t fpr;
 282    unsigned int el;
 283    int i;
 284    int ret;
 285
 286    ARMCPU *cpu = ARM_CPU(cs);
 287    CPUARMState *env = &cpu->env;
 288
 289    for (i = 0; i < 31; i++) {
 290        reg.id = AARCH64_CORE_REG(regs.regs[i]);
 291        reg.addr = (uintptr_t) &env->xregs[i];
 292        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 293        if (ret) {
 294            return ret;
 295        }
 296    }
 297
 298    reg.id = AARCH64_CORE_REG(regs.sp);
 299    reg.addr = (uintptr_t) &env->sp_el[0];
 300    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 301    if (ret) {
 302        return ret;
 303    }
 304
 305    reg.id = AARCH64_CORE_REG(sp_el1);
 306    reg.addr = (uintptr_t) &env->sp_el[1];
 307    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 308    if (ret) {
 309        return ret;
 310    }
 311
 312    reg.id = AARCH64_CORE_REG(regs.pstate);
 313    reg.addr = (uintptr_t) &val;
 314    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 315    if (ret) {
 316        return ret;
 317    }
 318
 319    env->aarch64 = ((val & PSTATE_nRW) == 0);
 320    if (is_a64(env)) {
 321        pstate_write(env, val);
 322    } else {
 323        env->uncached_cpsr = val & CPSR_M;
 324        cpsr_write(env, val, 0xffffffff);
 325    }
 326
 327    /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
 328     * QEMU side we keep the current SP in xregs[31] as well.
 329     */
 330    aarch64_restore_sp(env, 1);
 331
 332    reg.id = AARCH64_CORE_REG(regs.pc);
 333    reg.addr = (uintptr_t) &env->pc;
 334    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 335    if (ret) {
 336        return ret;
 337    }
 338
 339    /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
 340     * incoming AArch64 regs received from 64-bit KVM.
 341     * We must perform this after all of the registers have been acquired from
 342     * the kernel.
 343     */
 344    if (!is_a64(env)) {
 345        aarch64_sync_64_to_32(env);
 346    }
 347
 348    reg.id = AARCH64_CORE_REG(elr_el1);
 349    reg.addr = (uintptr_t) &env->elr_el[1];
 350    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 351    if (ret) {
 352        return ret;
 353    }
 354
 355    /* Fetch the SPSR registers
 356     *
 357     * KVM SPSRs 0-4 map to QEMU banks 1-5
 358     */
 359    for (i = 0; i < KVM_NR_SPSR; i++) {
 360        reg.id = AARCH64_CORE_REG(spsr[i]);
 361        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
 362        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 363        if (ret) {
 364            return ret;
 365        }
 366    }
 367
 368    el = arm_current_el(env);
 369    if (el > 0 && !is_a64(env)) {
 370        i = bank_number(env->uncached_cpsr & CPSR_M);
 371        env->spsr = env->banked_spsr[i];
 372    }
 373
 374    /* Advanced SIMD and FP registers
 375     * We map Qn = regs[2n+1]:regs[2n]
 376     */
 377    for (i = 0; i < 32; i++) {
 378        uint64_t fp_val[2];
 379        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
 380        reg.addr = (uintptr_t)(&fp_val);
 381        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 382        if (ret) {
 383            return ret;
 384        } else {
 385            int rd = i << 1;
 386#ifdef HOST_WORDS_BIGENDIAN
 387            env->vfp.regs[rd + 1] = fp_val[0];
 388            env->vfp.regs[rd] = fp_val[1];
 389#else
 390            env->vfp.regs[rd + 1] = fp_val[1];
 391            env->vfp.regs[rd] = fp_val[0];
 392#endif
 393        }
 394    }
 395
 396    reg.addr = (uintptr_t)(&fpr);
 397    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
 398    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 399    if (ret) {
 400        return ret;
 401    }
 402    vfp_set_fpsr(env, fpr);
 403
 404    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
 405    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 406    if (ret) {
 407        return ret;
 408    }
 409    vfp_set_fpcr(env, fpr);
 410
 411    if (!write_kvmstate_to_list(cpu)) {
 412        return EINVAL;
 413    }
 414    /* Note that it's OK to have registers which aren't in CPUState,
 415     * so we can ignore a failure return here.
 416     */
 417    write_list_to_cpustate(cpu);
 418
 419    kvm_arm_sync_mpstate_to_qemu(cpu);
 420
 421    /* TODO: other registers */
 422    return ret;
 423}
 424