qemu/target/arm/kvm32.c
<<
>>
Prefs
   1/*
   2 * ARM implementation of KVM hooks, 32 bit specific code.
   3 *
   4 * Copyright Christoffer Dall 2009-2010
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   7 * See the COPYING file in the top-level directory.
   8 *
   9 */
  10
  11#include "qemu/osdep.h"
  12#include <sys/ioctl.h>
  13
  14#include <linux/kvm.h>
  15
  16#include "qemu-common.h"
  17#include "cpu.h"
  18#include "qemu/timer.h"
  19#include "sysemu/sysemu.h"
  20#include "sysemu/kvm.h"
  21#include "kvm_arm.h"
  22#include "internals.h"
  23#include "hw/arm/arm.h"
  24#include "qemu/log.h"
  25
  26static inline void set_feature(uint64_t *features, int feature)
  27{
  28    *features |= 1ULL << feature;
  29}
  30
  31static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
  32{
  33    struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
  34
  35    assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32);
  36    return ioctl(fd, KVM_GET_ONE_REG, &idreg);
  37}
  38
  39bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
  40{
  41    /* Identify the feature bits corresponding to the host CPU, and
  42     * fill out the ARMHostCPUClass fields accordingly. To do this
  43     * we have to create a scratch VM, create a single CPU inside it,
  44     * and then query that CPU for the relevant ID registers.
  45     */
  46    int err = 0, fdarray[3];
  47    uint32_t midr, id_pfr0;
  48    uint64_t features = 0;
  49
  50    /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
  51     * we know these will only support creating one kind of guest CPU,
  52     * which is its preferred CPU type.
  53     */
  54    static const uint32_t cpus_to_try[] = {
  55        QEMU_KVM_ARM_TARGET_CORTEX_A15,
  56        QEMU_KVM_ARM_TARGET_NONE
  57    };
  58    struct kvm_vcpu_init init;
  59
  60    if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
  61        return false;
  62    }
  63
  64    ahcf->target = init.target;
  65
  66    /* This is not strictly blessed by the device tree binding docs yet,
  67     * but in practice the kernel does not care about this string so
  68     * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
  69     */
  70    ahcf->dtb_compatible = "arm,arm-v7";
  71
  72    err |= read_sys_reg32(fdarray[2], &midr, ARM_CP15_REG32(0, 0, 0, 0));
  73    err |= read_sys_reg32(fdarray[2], &id_pfr0, ARM_CP15_REG32(0, 0, 1, 0));
  74
  75    err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
  76                          ARM_CP15_REG32(0, 0, 2, 0));
  77    err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
  78                          ARM_CP15_REG32(0, 0, 2, 1));
  79    err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
  80                          ARM_CP15_REG32(0, 0, 2, 2));
  81    err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
  82                          ARM_CP15_REG32(0, 0, 2, 3));
  83    err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
  84                          ARM_CP15_REG32(0, 0, 2, 4));
  85    err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
  86                          ARM_CP15_REG32(0, 0, 2, 5));
  87    if (read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
  88                       ARM_CP15_REG32(0, 0, 2, 7))) {
  89        /*
  90         * Older kernels don't support reading ID_ISAR6. This register was
  91         * only introduced in ARMv8, so we can assume that it is zero on a
  92         * CPU that a kernel this old is running on.
  93         */
  94        ahcf->isar.id_isar6 = 0;
  95    }
  96
  97    err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
  98                          KVM_REG_ARM | KVM_REG_SIZE_U32 |
  99                          KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0);
 100    err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
 101                          KVM_REG_ARM | KVM_REG_SIZE_U32 |
 102                          KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1);
 103    /*
 104     * FIXME: There is not yet a way to read MVFR2.
 105     * Fortunately there is not yet anything in there that affects migration.
 106     */
 107
 108    kvm_arm_destroy_scratch_host_vcpu(fdarray);
 109
 110    if (err < 0) {
 111        return false;
 112    }
 113
 114    /* Now we've retrieved all the register information we can
 115     * set the feature bits based on the ID register fields.
 116     * We can assume any KVM supporting CPU is at least a v7
 117     * with VFPv3, virtualization extensions, and the generic
 118     * timers; this in turn implies most of the other feature
 119     * bits, but a few must be tested.
 120     */
 121    set_feature(&features, ARM_FEATURE_V7VE);
 122    set_feature(&features, ARM_FEATURE_VFP3);
 123    set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
 124
 125    if (extract32(id_pfr0, 12, 4) == 1) {
 126        set_feature(&features, ARM_FEATURE_THUMB2EE);
 127    }
 128    if (extract32(ahcf->isar.mvfr1, 20, 4) == 1) {
 129        set_feature(&features, ARM_FEATURE_VFP_FP16);
 130    }
 131    if (extract32(ahcf->isar.mvfr1, 12, 4) == 1) {
 132        set_feature(&features, ARM_FEATURE_NEON);
 133    }
 134    if (extract32(ahcf->isar.mvfr1, 28, 4) == 1) {
 135        /* FMAC support implies VFPv4 */
 136        set_feature(&features, ARM_FEATURE_VFP4);
 137    }
 138
 139    ahcf->features = features;
 140
 141    return true;
 142}
 143
 144bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
 145{
 146    /* Return true if the regidx is a register we should synchronize
 147     * via the cpreg_tuples array (ie is not a core reg we sync by
 148     * hand in kvm_arch_get/put_registers())
 149     */
 150    switch (regidx & KVM_REG_ARM_COPROC_MASK) {
 151    case KVM_REG_ARM_CORE:
 152    case KVM_REG_ARM_VFP:
 153        return false;
 154    default:
 155        return true;
 156    }
 157}
 158
 159typedef struct CPRegStateLevel {
 160    uint64_t regidx;
 161    int level;
 162} CPRegStateLevel;
 163
 164/* All coprocessor registers not listed in the following table are assumed to
 165 * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
 166 * often, you must add it to this table with a state of either
 167 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
 168 */
 169static const CPRegStateLevel non_runtime_cpregs[] = {
 170    { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
 171};
 172
 173int kvm_arm_cpreg_level(uint64_t regidx)
 174{
 175    int i;
 176
 177    for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
 178        const CPRegStateLevel *l = &non_runtime_cpregs[i];
 179        if (l->regidx == regidx) {
 180            return l->level;
 181        }
 182    }
 183
 184    return KVM_PUT_RUNTIME_STATE;
 185}
 186
 187#define ARM_CPU_ID_MPIDR       0, 0, 0, 5
 188
 189int kvm_arch_init_vcpu(CPUState *cs)
 190{
 191    int ret;
 192    uint64_t v;
 193    uint32_t mpidr;
 194    struct kvm_one_reg r;
 195    ARMCPU *cpu = ARM_CPU(cs);
 196
 197    if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
 198        fprintf(stderr, "KVM is not supported for this guest CPU type\n");
 199        return -EINVAL;
 200    }
 201
 202    /* Determine init features for this CPU */
 203    memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
 204    if (cpu->start_powered_off) {
 205        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
 206    }
 207    if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
 208        cpu->psci_version = 2;
 209        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
 210    }
 211
 212    /* Do KVM_ARM_VCPU_INIT ioctl */
 213    ret = kvm_arm_vcpu_init(cs);
 214    if (ret) {
 215        return ret;
 216    }
 217
 218    /* Query the kernel to make sure it supports 32 VFP
 219     * registers: QEMU's "cortex-a15" CPU is always a
 220     * VFP-D32 core. The simplest way to do this is just
 221     * to attempt to read register d31.
 222     */
 223    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
 224    r.addr = (uintptr_t)(&v);
 225    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 226    if (ret == -ENOENT) {
 227        return -EINVAL;
 228    }
 229
 230    /*
 231     * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
 232     * Currently KVM has its own idea about MPIDR assignment, so we
 233     * override our defaults with what we get from KVM.
 234     */
 235    ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
 236    if (ret) {
 237        return ret;
 238    }
 239    cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
 240
 241    /* Check whether userspace can specify guest syndrome value */
 242    kvm_arm_init_serror_injection(cs);
 243
 244    return kvm_arm_init_cpreg_list(cpu);
 245}
 246
 247typedef struct Reg {
 248    uint64_t id;
 249    int offset;
 250} Reg;
 251
 252#define COREREG(KERNELNAME, QEMUFIELD)                       \
 253    {                                                        \
 254        KVM_REG_ARM | KVM_REG_SIZE_U32 |                     \
 255        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
 256        offsetof(CPUARMState, QEMUFIELD)                     \
 257    }
 258
 259#define VFPSYSREG(R)                                       \
 260    {                                                      \
 261        KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
 262        KVM_REG_ARM_VFP_##R,                               \
 263        offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R])      \
 264    }
 265
 266/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
 267#define COREREG64(KERNELNAME, QEMUFIELD)                     \
 268    {                                                        \
 269        KVM_REG_ARM | KVM_REG_SIZE_U32 |                     \
 270        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
 271        offsetoflow32(CPUARMState, QEMUFIELD)                \
 272    }
 273
 274static const Reg regs[] = {
 275    /* R0_usr .. R14_usr */
 276    COREREG(usr_regs.uregs[0], regs[0]),
 277    COREREG(usr_regs.uregs[1], regs[1]),
 278    COREREG(usr_regs.uregs[2], regs[2]),
 279    COREREG(usr_regs.uregs[3], regs[3]),
 280    COREREG(usr_regs.uregs[4], regs[4]),
 281    COREREG(usr_regs.uregs[5], regs[5]),
 282    COREREG(usr_regs.uregs[6], regs[6]),
 283    COREREG(usr_regs.uregs[7], regs[7]),
 284    COREREG(usr_regs.uregs[8], usr_regs[0]),
 285    COREREG(usr_regs.uregs[9], usr_regs[1]),
 286    COREREG(usr_regs.uregs[10], usr_regs[2]),
 287    COREREG(usr_regs.uregs[11], usr_regs[3]),
 288    COREREG(usr_regs.uregs[12], usr_regs[4]),
 289    COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
 290    COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
 291    /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
 292    COREREG(svc_regs[0], banked_r13[BANK_SVC]),
 293    COREREG(svc_regs[1], banked_r14[BANK_SVC]),
 294    COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
 295    COREREG(abt_regs[0], banked_r13[BANK_ABT]),
 296    COREREG(abt_regs[1], banked_r14[BANK_ABT]),
 297    COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
 298    COREREG(und_regs[0], banked_r13[BANK_UND]),
 299    COREREG(und_regs[1], banked_r14[BANK_UND]),
 300    COREREG64(und_regs[2], banked_spsr[BANK_UND]),
 301    COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
 302    COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
 303    COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
 304    /* R8_fiq .. R14_fiq and SPSR_fiq */
 305    COREREG(fiq_regs[0], fiq_regs[0]),
 306    COREREG(fiq_regs[1], fiq_regs[1]),
 307    COREREG(fiq_regs[2], fiq_regs[2]),
 308    COREREG(fiq_regs[3], fiq_regs[3]),
 309    COREREG(fiq_regs[4], fiq_regs[4]),
 310    COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
 311    COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
 312    COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
 313    /* R15 */
 314    COREREG(usr_regs.uregs[15], regs[15]),
 315    /* VFP system registers */
 316    VFPSYSREG(FPSID),
 317    VFPSYSREG(MVFR1),
 318    VFPSYSREG(MVFR0),
 319    VFPSYSREG(FPEXC),
 320    VFPSYSREG(FPINST),
 321    VFPSYSREG(FPINST2),
 322};
 323
 324int kvm_arch_put_registers(CPUState *cs, int level)
 325{
 326    ARMCPU *cpu = ARM_CPU(cs);
 327    CPUARMState *env = &cpu->env;
 328    struct kvm_one_reg r;
 329    int mode, bn;
 330    int ret, i;
 331    uint32_t cpsr, fpscr;
 332
 333    /* Make sure the banked regs are properly set */
 334    mode = env->uncached_cpsr & CPSR_M;
 335    bn = bank_number(mode);
 336    if (mode == ARM_CPU_MODE_FIQ) {
 337        memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
 338    } else {
 339        memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
 340    }
 341    env->banked_r13[bn] = env->regs[13];
 342    env->banked_spsr[bn] = env->spsr;
 343    env->banked_r14[r14_bank_number(mode)] = env->regs[14];
 344
 345    /* Now we can safely copy stuff down to the kernel */
 346    for (i = 0; i < ARRAY_SIZE(regs); i++) {
 347        r.id = regs[i].id;
 348        r.addr = (uintptr_t)(env) + regs[i].offset;
 349        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 350        if (ret) {
 351            return ret;
 352        }
 353    }
 354
 355    /* Special cases which aren't a single CPUARMState field */
 356    cpsr = cpsr_read(env);
 357    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
 358        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
 359    r.addr = (uintptr_t)(&cpsr);
 360    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 361    if (ret) {
 362        return ret;
 363    }
 364
 365    /* VFP registers */
 366    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 367    for (i = 0; i < 32; i++) {
 368        r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
 369        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 370        if (ret) {
 371            return ret;
 372        }
 373        r.id++;
 374    }
 375
 376    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
 377        KVM_REG_ARM_VFP_FPSCR;
 378    fpscr = vfp_get_fpscr(env);
 379    r.addr = (uintptr_t)&fpscr;
 380    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
 381    if (ret) {
 382        return ret;
 383    }
 384
 385    ret = kvm_put_vcpu_events(cpu);
 386    if (ret) {
 387        return ret;
 388    }
 389
 390    /* Note that we do not call write_cpustate_to_list()
 391     * here, so we are only writing the tuple list back to
 392     * KVM. This is safe because nothing can change the
 393     * CPUARMState cp15 fields (in particular gdb accesses cannot)
 394     * and so there are no changes to sync. In fact syncing would
 395     * be wrong at this point: for a constant register where TCG and
 396     * KVM disagree about its value, the preceding write_list_to_cpustate()
 397     * would not have had any effect on the CPUARMState value (since the
 398     * register is read-only), and a write_cpustate_to_list() here would
 399     * then try to write the TCG value back into KVM -- this would either
 400     * fail or incorrectly change the value the guest sees.
 401     *
 402     * If we ever want to allow the user to modify cp15 registers via
 403     * the gdb stub, we would need to be more clever here (for instance
 404     * tracking the set of registers kvm_arch_get_registers() successfully
 405     * managed to update the CPUARMState with, and only allowing those
 406     * to be written back up into the kernel).
 407     */
 408    if (!write_list_to_kvmstate(cpu, level)) {
 409        return EINVAL;
 410    }
 411
 412    kvm_arm_sync_mpstate_to_kvm(cpu);
 413
 414    return ret;
 415}
 416
 417int kvm_arch_get_registers(CPUState *cs)
 418{
 419    ARMCPU *cpu = ARM_CPU(cs);
 420    CPUARMState *env = &cpu->env;
 421    struct kvm_one_reg r;
 422    int mode, bn;
 423    int ret, i;
 424    uint32_t cpsr, fpscr;
 425
 426    for (i = 0; i < ARRAY_SIZE(regs); i++) {
 427        r.id = regs[i].id;
 428        r.addr = (uintptr_t)(env) + regs[i].offset;
 429        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 430        if (ret) {
 431            return ret;
 432        }
 433    }
 434
 435    /* Special cases which aren't a single CPUARMState field */
 436    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
 437        KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
 438    r.addr = (uintptr_t)(&cpsr);
 439    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 440    if (ret) {
 441        return ret;
 442    }
 443    cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
 444
 445    /* Make sure the current mode regs are properly set */
 446    mode = env->uncached_cpsr & CPSR_M;
 447    bn = bank_number(mode);
 448    if (mode == ARM_CPU_MODE_FIQ) {
 449        memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
 450    } else {
 451        memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
 452    }
 453    env->regs[13] = env->banked_r13[bn];
 454    env->spsr = env->banked_spsr[bn];
 455    env->regs[14] = env->banked_r14[r14_bank_number(mode)];
 456
 457    /* VFP registers */
 458    r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
 459    for (i = 0; i < 32; i++) {
 460        r.addr = (uintptr_t)aa32_vfp_dreg(env, i);
 461        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 462        if (ret) {
 463            return ret;
 464        }
 465        r.id++;
 466    }
 467
 468    r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
 469        KVM_REG_ARM_VFP_FPSCR;
 470    r.addr = (uintptr_t)&fpscr;
 471    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
 472    if (ret) {
 473        return ret;
 474    }
 475    vfp_set_fpscr(env, fpscr);
 476
 477    ret = kvm_get_vcpu_events(cpu);
 478    if (ret) {
 479        return ret;
 480    }
 481
 482    if (!write_kvmstate_to_list(cpu)) {
 483        return EINVAL;
 484    }
 485    /* Note that it's OK to have registers which aren't in CPUState,
 486     * so we can ignore a failure return here.
 487     */
 488    write_list_to_cpustate(cpu);
 489
 490    kvm_arm_sync_mpstate_to_qemu(cpu);
 491
 492    return 0;
 493}
 494
 495int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
 496{
 497    qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
 498    return -EINVAL;
 499}
 500
 501int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
 502{
 503    qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
 504    return -EINVAL;
 505}
 506
 507bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
 508{
 509    qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
 510    return false;
 511}
 512
 513int kvm_arch_insert_hw_breakpoint(target_ulong addr,
 514                                  target_ulong len, int type)
 515{
 516    qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
 517    return -EINVAL;
 518}
 519
 520int kvm_arch_remove_hw_breakpoint(target_ulong addr,
 521                                  target_ulong len, int type)
 522{
 523    qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
 524    return -EINVAL;
 525}
 526
 527void kvm_arch_remove_all_hw_breakpoints(void)
 528{
 529    qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
 530}
 531
 532void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
 533{
 534    qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
 535}
 536
 537bool kvm_arm_hw_debug_active(CPUState *cs)
 538{
 539    return false;
 540}
 541
 542void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
 543{
 544    qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
 545}
 546
 547void kvm_arm_pmu_init(CPUState *cs)
 548{
 549    qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
 550}
 551