qemu/target-arm/kvm64.c
<<
>>
Prefs
   1/*
   2 * ARM implementation of KVM hooks, 64 bit specific code
   3 *
   4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include <stdio.h>
  12#include <sys/types.h>
  13#include <sys/ioctl.h>
  14#include <sys/mman.h>
  15
  16#include <linux/kvm.h>
  17
  18#include "config-host.h"
  19#include "qemu-common.h"
  20#include "qemu/timer.h"
  21#include "sysemu/sysemu.h"
  22#include "sysemu/kvm.h"
  23#include "kvm_arm.h"
  24#include "cpu.h"
  25#include "internals.h"
  26#include "hw/arm/arm.h"
  27
  28static inline void set_feature(uint64_t *features, int feature)
  29{
  30    *features |= 1ULL << feature;
  31}
  32
  33bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
  34{
  35    /* Identify the feature bits corresponding to the host CPU, and
  36     * fill out the ARMHostCPUClass fields accordingly. To do this
  37     * we have to create a scratch VM, create a single CPU inside it,
  38     * and then query that CPU for the relevant ID registers.
  39     * For AArch64 we currently don't care about ID registers at
  40     * all; we just want to know the CPU type.
  41     */
  42    int fdarray[3];
  43    uint64_t features = 0;
  44    /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
  45     * we know these will only support creating one kind of guest CPU,
  46     * which is its preferred CPU type. Fortunately these old kernels
  47     * support only a very limited number of CPUs.
  48     */
  49    static const uint32_t cpus_to_try[] = {
  50        KVM_ARM_TARGET_AEM_V8,
  51        KVM_ARM_TARGET_FOUNDATION_V8,
  52        KVM_ARM_TARGET_CORTEX_A57,
  53        QEMU_KVM_ARM_TARGET_NONE
  54    };
  55    struct kvm_vcpu_init init;
  56
  57    if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
  58        return false;
  59    }
  60
  61    ahcc->target = init.target;
  62    ahcc->dtb_compatible = "arm,arm-v8";
  63
  64    kvm_arm_destroy_scratch_host_vcpu(fdarray);
  65
  66   /* We can assume any KVM supporting CPU is at least a v8
  67     * with VFPv4+Neon; this in turn implies most of the other
  68     * feature bits.
  69     */
  70    set_feature(&features, ARM_FEATURE_V8);
  71    set_feature(&features, ARM_FEATURE_VFP4);
  72    set_feature(&features, ARM_FEATURE_NEON);
  73    set_feature(&features, ARM_FEATURE_AARCH64);
  74
  75    ahcc->features = features;
  76
  77    return true;
  78}
  79
  80#define ARM_MPIDR_HWID_BITMASK 0xFF00FFFFFFULL
  81#define ARM_CPU_ID_MPIDR       3, 0, 0, 0, 5
  82
  83int kvm_arch_init_vcpu(CPUState *cs)
  84{
  85    int ret;
  86    uint64_t mpidr;
  87    ARMCPU *cpu = ARM_CPU(cs);
  88
  89    if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
  90        !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
  91        fprintf(stderr, "KVM is not supported for this guest CPU type\n");
  92        return -EINVAL;
  93    }
  94
  95    /* Determine init features for this CPU */
  96    memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
  97    if (cpu->start_powered_off) {
  98        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
  99    }
 100    if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
 101        cpu->psci_version = 2;
 102        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
 103    }
 104    if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 105        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
 106    }
 107
 108    /* Do KVM_ARM_VCPU_INIT ioctl */
 109    ret = kvm_arm_vcpu_init(cs);
 110    if (ret) {
 111        return ret;
 112    }
 113
 114    /*
 115     * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
 116     * Currently KVM has its own idea about MPIDR assignment, so we
 117     * override our defaults with what we get from KVM.
 118     */
 119    ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
 120    if (ret) {
 121        return ret;
 122    }
 123    cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
 124
 125    return kvm_arm_init_cpreg_list(cpu);
 126}
 127
 128bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
 129{
 130    /* Return true if the regidx is a register we should synchronize
 131     * via the cpreg_tuples array (ie is not a core reg we sync by
 132     * hand in kvm_arch_get/put_registers())
 133     */
 134    switch (regidx & KVM_REG_ARM_COPROC_MASK) {
 135    case KVM_REG_ARM_CORE:
 136        return false;
 137    default:
 138        return true;
 139    }
 140}
 141
 142typedef struct CPRegStateLevel {
 143    uint64_t regidx;
 144    int level;
 145} CPRegStateLevel;
 146
 147/* All system registers not listed in the following table are assumed to be
 148 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
 149 * often, you must add it to this table with a state of either
 150 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
 151 */
 152static const CPRegStateLevel non_runtime_cpregs[] = {
 153    { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
 154};
 155
 156int kvm_arm_cpreg_level(uint64_t regidx)
 157{
 158    int i;
 159
 160    for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
 161        const CPRegStateLevel *l = &non_runtime_cpregs[i];
 162        if (l->regidx == regidx) {
 163            return l->level;
 164        }
 165    }
 166
 167    return KVM_PUT_RUNTIME_STATE;
 168}
 169
 170#define AARCH64_CORE_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
 171                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 172
 173#define AARCH64_SIMD_CORE_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
 174                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 175
 176#define AARCH64_SIMD_CTRL_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
 177                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 178
 179int kvm_arch_put_registers(CPUState *cs, int level)
 180{
 181    struct kvm_one_reg reg;
 182    uint32_t fpr;
 183    uint64_t val;
 184    int i;
 185    int ret;
 186    unsigned int el;
 187
 188    ARMCPU *cpu = ARM_CPU(cs);
 189    CPUARMState *env = &cpu->env;
 190
 191    /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
 192     * AArch64 registers before pushing them out to 64-bit KVM.
 193     */
 194    if (!is_a64(env)) {
 195        aarch64_sync_32_to_64(env);
 196    }
 197
 198    for (i = 0; i < 31; i++) {
 199        reg.id = AARCH64_CORE_REG(regs.regs[i]);
 200        reg.addr = (uintptr_t) &env->xregs[i];
 201        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 202        if (ret) {
 203            return ret;
 204        }
 205    }
 206
 207    /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
 208     * QEMU side we keep the current SP in xregs[31] as well.
 209     */
 210    aarch64_save_sp(env, 1);
 211
 212    reg.id = AARCH64_CORE_REG(regs.sp);
 213    reg.addr = (uintptr_t) &env->sp_el[0];
 214    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 215    if (ret) {
 216        return ret;
 217    }
 218
 219    reg.id = AARCH64_CORE_REG(sp_el1);
 220    reg.addr = (uintptr_t) &env->sp_el[1];
 221    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 222    if (ret) {
 223        return ret;
 224    }
 225
 226    /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
 227    if (is_a64(env)) {
 228        val = pstate_read(env);
 229    } else {
 230        val = cpsr_read(env);
 231    }
 232    reg.id = AARCH64_CORE_REG(regs.pstate);
 233    reg.addr = (uintptr_t) &val;
 234    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 235    if (ret) {
 236        return ret;
 237    }
 238
 239    reg.id = AARCH64_CORE_REG(regs.pc);
 240    reg.addr = (uintptr_t) &env->pc;
 241    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 242    if (ret) {
 243        return ret;
 244    }
 245
 246    reg.id = AARCH64_CORE_REG(elr_el1);
 247    reg.addr = (uintptr_t) &env->elr_el[1];
 248    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 249    if (ret) {
 250        return ret;
 251    }
 252
 253    /* Saved Program State Registers
 254     *
 255     * Before we restore from the banked_spsr[] array we need to
 256     * ensure that any modifications to env->spsr are correctly
 257     * reflected in the banks.
 258     */
 259    el = arm_current_el(env);
 260    if (el > 0 && !is_a64(env)) {
 261        i = bank_number(env->uncached_cpsr & CPSR_M);
 262        env->banked_spsr[i] = env->spsr;
 263    }
 264
 265    /* KVM 0-4 map to QEMU banks 1-5 */
 266    for (i = 0; i < KVM_NR_SPSR; i++) {
 267        reg.id = AARCH64_CORE_REG(spsr[i]);
 268        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
 269        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 270        if (ret) {
 271            return ret;
 272        }
 273    }
 274
 275    /* Advanced SIMD and FP registers
 276     * We map Qn = regs[2n+1]:regs[2n]
 277     */
 278    for (i = 0; i < 32; i++) {
 279        int rd = i << 1;
 280        uint64_t fp_val[2];
 281#ifdef HOST_WORDS_BIGENDIAN
 282        fp_val[0] = env->vfp.regs[rd + 1];
 283        fp_val[1] = env->vfp.regs[rd];
 284#else
 285        fp_val[1] = env->vfp.regs[rd + 1];
 286        fp_val[0] = env->vfp.regs[rd];
 287#endif
 288        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
 289        reg.addr = (uintptr_t)(&fp_val);
 290        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 291        if (ret) {
 292            return ret;
 293        }
 294    }
 295
 296    reg.addr = (uintptr_t)(&fpr);
 297    fpr = vfp_get_fpsr(env);
 298    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
 299    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 300    if (ret) {
 301        return ret;
 302    }
 303
 304    fpr = vfp_get_fpcr(env);
 305    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
 306    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 307    if (ret) {
 308        return ret;
 309    }
 310
 311    if (!write_list_to_kvmstate(cpu, level)) {
 312        return EINVAL;
 313    }
 314
 315    kvm_arm_sync_mpstate_to_kvm(cpu);
 316
 317    return ret;
 318}
 319
 320int kvm_arch_get_registers(CPUState *cs)
 321{
 322    struct kvm_one_reg reg;
 323    uint64_t val;
 324    uint32_t fpr;
 325    unsigned int el;
 326    int i;
 327    int ret;
 328
 329    ARMCPU *cpu = ARM_CPU(cs);
 330    CPUARMState *env = &cpu->env;
 331
 332    for (i = 0; i < 31; i++) {
 333        reg.id = AARCH64_CORE_REG(regs.regs[i]);
 334        reg.addr = (uintptr_t) &env->xregs[i];
 335        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 336        if (ret) {
 337            return ret;
 338        }
 339    }
 340
 341    reg.id = AARCH64_CORE_REG(regs.sp);
 342    reg.addr = (uintptr_t) &env->sp_el[0];
 343    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 344    if (ret) {
 345        return ret;
 346    }
 347
 348    reg.id = AARCH64_CORE_REG(sp_el1);
 349    reg.addr = (uintptr_t) &env->sp_el[1];
 350    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 351    if (ret) {
 352        return ret;
 353    }
 354
 355    reg.id = AARCH64_CORE_REG(regs.pstate);
 356    reg.addr = (uintptr_t) &val;
 357    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 358    if (ret) {
 359        return ret;
 360    }
 361
 362    env->aarch64 = ((val & PSTATE_nRW) == 0);
 363    if (is_a64(env)) {
 364        pstate_write(env, val);
 365    } else {
 366        env->uncached_cpsr = val & CPSR_M;
 367        cpsr_write(env, val, 0xffffffff);
 368    }
 369
 370    /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
 371     * QEMU side we keep the current SP in xregs[31] as well.
 372     */
 373    aarch64_restore_sp(env, 1);
 374
 375    reg.id = AARCH64_CORE_REG(regs.pc);
 376    reg.addr = (uintptr_t) &env->pc;
 377    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 378    if (ret) {
 379        return ret;
 380    }
 381
 382    /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
 383     * incoming AArch64 regs received from 64-bit KVM.
 384     * We must perform this after all of the registers have been acquired from
 385     * the kernel.
 386     */
 387    if (!is_a64(env)) {
 388        aarch64_sync_64_to_32(env);
 389    }
 390
 391    reg.id = AARCH64_CORE_REG(elr_el1);
 392    reg.addr = (uintptr_t) &env->elr_el[1];
 393    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 394    if (ret) {
 395        return ret;
 396    }
 397
 398    /* Fetch the SPSR registers
 399     *
 400     * KVM SPSRs 0-4 map to QEMU banks 1-5
 401     */
 402    for (i = 0; i < KVM_NR_SPSR; i++) {
 403        reg.id = AARCH64_CORE_REG(spsr[i]);
 404        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
 405        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 406        if (ret) {
 407            return ret;
 408        }
 409    }
 410
 411    el = arm_current_el(env);
 412    if (el > 0 && !is_a64(env)) {
 413        i = bank_number(env->uncached_cpsr & CPSR_M);
 414        env->spsr = env->banked_spsr[i];
 415    }
 416
 417    /* Advanced SIMD and FP registers
 418     * We map Qn = regs[2n+1]:regs[2n]
 419     */
 420    for (i = 0; i < 32; i++) {
 421        uint64_t fp_val[2];
 422        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
 423        reg.addr = (uintptr_t)(&fp_val);
 424        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 425        if (ret) {
 426            return ret;
 427        } else {
 428            int rd = i << 1;
 429#ifdef HOST_WORDS_BIGENDIAN
 430            env->vfp.regs[rd + 1] = fp_val[0];
 431            env->vfp.regs[rd] = fp_val[1];
 432#else
 433            env->vfp.regs[rd + 1] = fp_val[1];
 434            env->vfp.regs[rd] = fp_val[0];
 435#endif
 436        }
 437    }
 438
 439    reg.addr = (uintptr_t)(&fpr);
 440    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
 441    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 442    if (ret) {
 443        return ret;
 444    }
 445    vfp_set_fpsr(env, fpr);
 446
 447    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
 448    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
 449    if (ret) {
 450        return ret;
 451    }
 452    vfp_set_fpcr(env, fpr);
 453
 454    if (!write_kvmstate_to_list(cpu)) {
 455        return EINVAL;
 456    }
 457    /* Note that it's OK to have registers which aren't in CPUState,
 458     * so we can ignore a failure return here.
 459     */
 460    write_list_to_cpustate(cpu);
 461
 462    kvm_arm_sync_mpstate_to_qemu(cpu);
 463
 464    /* TODO: other registers */
 465    return ret;
 466}
 467