qemu/target/arm/kvm64.c
<<
>>
Prefs
   1/*
   2 * ARM implementation of KVM hooks, 64 bit specific code
   3 *
   4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
   5 * Copyright Alex Bennée 2014, Linaro
   6 *
   7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   8 * See the COPYING file in the top-level directory.
   9 *
  10 */
  11
  12#include "qemu/osdep.h"
  13#include <sys/ioctl.h>
  14#include <sys/ptrace.h>
  15
  16#include <linux/elf.h>
  17#include <linux/kvm.h>
  18
  19#include "qemu-common.h"
  20#include "qapi/error.h"
  21#include "cpu.h"
  22#include "qemu/timer.h"
  23#include "qemu/error-report.h"
  24#include "qemu/host-utils.h"
  25#include "qemu/main-loop.h"
  26#include "exec/gdbstub.h"
  27#include "sysemu/runstate.h"
  28#include "sysemu/kvm.h"
  29#include "sysemu/kvm_int.h"
  30#include "kvm_arm.h"
  31#include "internals.h"
  32#include "hw/acpi/acpi.h"
  33#include "hw/acpi/ghes.h"
  34#include "hw/arm/virt.h"
  35
  36static bool have_guest_debug;
  37
  38/*
  39 * Although the ARM implementation of hardware assisted debugging
  40 * allows for different breakpoints per-core, the current GDB
  41 * interface treats them as a global pool of registers (which seems to
  42 * be the case for x86, ppc and s390). As a result we store one copy
  43 * of registers which is used for all active cores.
  44 *
  45 * Write access is serialised by virtue of the GDB protocol which
  46 * updates things. Read access (i.e. when the values are copied to the
  47 * vCPU) is also gated by GDB's run control.
  48 *
  49 * This is not unreasonable as most of the time debugging kernels you
  50 * never know which core will eventually execute your function.
  51 */
  52
  53typedef struct {
  54    uint64_t bcr;
  55    uint64_t bvr;
  56} HWBreakpoint;
  57
  58/* The watchpoint registers can cover more area than the requested
  59 * watchpoint so we need to store the additional information
  60 * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
  61 * when the watchpoint is hit.
  62 */
  63typedef struct {
  64    uint64_t wcr;
  65    uint64_t wvr;
  66    CPUWatchpoint details;
  67} HWWatchpoint;
  68
  69/* Maximum and current break/watch point counts */
  70int max_hw_bps, max_hw_wps;
  71GArray *hw_breakpoints, *hw_watchpoints;
  72
  73#define cur_hw_wps      (hw_watchpoints->len)
  74#define cur_hw_bps      (hw_breakpoints->len)
  75#define get_hw_bp(i)    (&g_array_index(hw_breakpoints, HWBreakpoint, i))
  76#define get_hw_wp(i)    (&g_array_index(hw_watchpoints, HWWatchpoint, i))
  77
  78/**
  79 * kvm_arm_init_debug() - check for guest debug capabilities
  80 * @cs: CPUState
  81 *
  82 * kvm_check_extension returns the number of debug registers we have
  83 * or 0 if we have none.
  84 *
  85 */
  86static void kvm_arm_init_debug(CPUState *cs)
  87{
  88    have_guest_debug = kvm_check_extension(cs->kvm_state,
  89                                           KVM_CAP_SET_GUEST_DEBUG);
  90
  91    max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
  92    hw_watchpoints = g_array_sized_new(true, true,
  93                                       sizeof(HWWatchpoint), max_hw_wps);
  94
  95    max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
  96    hw_breakpoints = g_array_sized_new(true, true,
  97                                       sizeof(HWBreakpoint), max_hw_bps);
  98    return;
  99}
 100
 101/**
 102 * insert_hw_breakpoint()
 103 * @addr: address of breakpoint
 104 *
 105 * See ARM ARM D2.9.1 for details but here we are only going to create
 106 * simple un-linked breakpoints (i.e. we don't chain breakpoints
 107 * together to match address and context or vmid). The hardware is
 108 * capable of fancier matching but that will require exposing that
 109 * fanciness to GDB's interface
 110 *
 111 * DBGBCR<n>_EL1, Debug Breakpoint Control Registers
 112 *
 113 *  31  24 23  20 19   16 15 14  13  12   9 8   5 4    3 2   1  0
 114 * +------+------+-------+-----+----+------+-----+------+-----+---+
 115 * | RES0 |  BT  |  LBN  | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
 116 * +------+------+-------+-----+----+------+-----+------+-----+---+
 117 *
 118 * BT: Breakpoint type (0 = unlinked address match)
 119 * LBN: Linked BP number (0 = unused)
 120 * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
 121 * BAS: Byte Address Select (RES1 for AArch64)
 122 * E: Enable bit
 123 *
 124 * DBGBVR<n>_EL1, Debug Breakpoint Value Registers
 125 *
 126 *  63  53 52       49 48       2  1 0
 127 * +------+-----------+----------+-----+
 128 * | RESS | VA[52:49] | VA[48:2] | 0 0 |
 129 * +------+-----------+----------+-----+
 130 *
 131 * Depending on the addressing mode bits the top bits of the register
 132 * are a sign extension of the highest applicable VA bit. Some
 133 * versions of GDB don't do it correctly so we ensure they are correct
 134 * here so future PC comparisons will work properly.
 135 */
 136
 137static int insert_hw_breakpoint(target_ulong addr)
 138{
 139    HWBreakpoint brk = {
 140        .bcr = 0x1,                             /* BCR E=1, enable */
 141        .bvr = sextract64(addr, 0, 53)
 142    };
 143
 144    if (cur_hw_bps >= max_hw_bps) {
 145        return -ENOBUFS;
 146    }
 147
 148    brk.bcr = deposit32(brk.bcr, 1, 2, 0x3);   /* PMC = 11 */
 149    brk.bcr = deposit32(brk.bcr, 5, 4, 0xf);   /* BAS = RES1 */
 150
 151    g_array_append_val(hw_breakpoints, brk);
 152
 153    return 0;
 154}
 155
 156/**
 157 * delete_hw_breakpoint()
 158 * @pc: address of breakpoint
 159 *
 160 * Delete a breakpoint and shuffle any above down
 161 */
 162
 163static int delete_hw_breakpoint(target_ulong pc)
 164{
 165    int i;
 166    for (i = 0; i < hw_breakpoints->len; i++) {
 167        HWBreakpoint *brk = get_hw_bp(i);
 168        if (brk->bvr == pc) {
 169            g_array_remove_index(hw_breakpoints, i);
 170            return 0;
 171        }
 172    }
 173    return -ENOENT;
 174}
 175
 176/**
 177 * insert_hw_watchpoint()
 178 * @addr: address of watch point
 179 * @len: size of area
 180 * @type: type of watch point
 181 *
 182 * See ARM ARM D2.10. As with the breakpoints we can do some advanced
 183 * stuff if we want to. The watch points can be linked with the break
 184 * points above to make them context aware. However for simplicity
 185 * currently we only deal with simple read/write watch points.
 186 *
 187 * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
 188 *
 189 *  31  29 28   24 23  21  20  19 16 15 14  13   12  5 4   3 2   1  0
 190 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
 191 * | RES0 |  MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
 192 * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
 193 *
 194 * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
 195 * WT: 0 - unlinked, 1 - linked (not currently used)
 196 * LBN: Linked BP number (not currently used)
 197 * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
 198 * BAS: Byte Address Select
 199 * LSC: Load/Store control (01: load, 10: store, 11: both)
 200 * E: Enable
 201 *
 202 * The bottom 2 bits of the value register are masked. Therefore to
 203 * break on any sizes smaller than an unaligned word you need to set
 204 * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
 205 * need to ensure you mask the address as required and set BAS=0xff
 206 */
 207
 208static int insert_hw_watchpoint(target_ulong addr,
 209                                target_ulong len, int type)
 210{
 211    HWWatchpoint wp = {
 212        .wcr = 1, /* E=1, enable */
 213        .wvr = addr & (~0x7ULL),
 214        .details = { .vaddr = addr, .len = len }
 215    };
 216
 217    if (cur_hw_wps >= max_hw_wps) {
 218        return -ENOBUFS;
 219    }
 220
 221    /*
 222     * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
 223     * valid whether EL3 is implemented or not
 224     */
 225    wp.wcr = deposit32(wp.wcr, 1, 2, 3);
 226
 227    switch (type) {
 228    case GDB_WATCHPOINT_READ:
 229        wp.wcr = deposit32(wp.wcr, 3, 2, 1);
 230        wp.details.flags = BP_MEM_READ;
 231        break;
 232    case GDB_WATCHPOINT_WRITE:
 233        wp.wcr = deposit32(wp.wcr, 3, 2, 2);
 234        wp.details.flags = BP_MEM_WRITE;
 235        break;
 236    case GDB_WATCHPOINT_ACCESS:
 237        wp.wcr = deposit32(wp.wcr, 3, 2, 3);
 238        wp.details.flags = BP_MEM_ACCESS;
 239        break;
 240    default:
 241        g_assert_not_reached();
 242        break;
 243    }
 244    if (len <= 8) {
 245        /* we align the address and set the bits in BAS */
 246        int off = addr & 0x7;
 247        int bas = (1 << len) - 1;
 248
 249        wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
 250    } else {
 251        /* For ranges above 8 bytes we need to be a power of 2 */
 252        if (is_power_of_2(len)) {
 253            int bits = ctz64(len);
 254
 255            wp.wvr &= ~((1 << bits) - 1);
 256            wp.wcr = deposit32(wp.wcr, 24, 4, bits);
 257            wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
 258        } else {
 259            return -ENOBUFS;
 260        }
 261    }
 262
 263    g_array_append_val(hw_watchpoints, wp);
 264    return 0;
 265}
 266
 267
 268static bool check_watchpoint_in_range(int i, target_ulong addr)
 269{
 270    HWWatchpoint *wp = get_hw_wp(i);
 271    uint64_t addr_top, addr_bottom = wp->wvr;
 272    int bas = extract32(wp->wcr, 5, 8);
 273    int mask = extract32(wp->wcr, 24, 4);
 274
 275    if (mask) {
 276        addr_top = addr_bottom + (1 << mask);
 277    } else {
 278        /* BAS must be contiguous but can offset against the base
 279         * address in DBGWVR */
 280        addr_bottom = addr_bottom + ctz32(bas);
 281        addr_top = addr_bottom + clo32(bas);
 282    }
 283
 284    if (addr >= addr_bottom && addr <= addr_top) {
 285        return true;
 286    }
 287
 288    return false;
 289}
 290
 291/**
 292 * delete_hw_watchpoint()
 293 * @addr: address of breakpoint
 294 *
 295 * Delete a breakpoint and shuffle any above down
 296 */
 297
 298static int delete_hw_watchpoint(target_ulong addr,
 299                                target_ulong len, int type)
 300{
 301    int i;
 302    for (i = 0; i < cur_hw_wps; i++) {
 303        if (check_watchpoint_in_range(i, addr)) {
 304            g_array_remove_index(hw_watchpoints, i);
 305            return 0;
 306        }
 307    }
 308    return -ENOENT;
 309}
 310
 311
 312int kvm_arch_insert_hw_breakpoint(target_ulong addr,
 313                                  target_ulong len, int type)
 314{
 315    switch (type) {
 316    case GDB_BREAKPOINT_HW:
 317        return insert_hw_breakpoint(addr);
 318        break;
 319    case GDB_WATCHPOINT_READ:
 320    case GDB_WATCHPOINT_WRITE:
 321    case GDB_WATCHPOINT_ACCESS:
 322        return insert_hw_watchpoint(addr, len, type);
 323    default:
 324        return -ENOSYS;
 325    }
 326}
 327
 328int kvm_arch_remove_hw_breakpoint(target_ulong addr,
 329                                  target_ulong len, int type)
 330{
 331    switch (type) {
 332    case GDB_BREAKPOINT_HW:
 333        return delete_hw_breakpoint(addr);
 334    case GDB_WATCHPOINT_READ:
 335    case GDB_WATCHPOINT_WRITE:
 336    case GDB_WATCHPOINT_ACCESS:
 337        return delete_hw_watchpoint(addr, len, type);
 338    default:
 339        return -ENOSYS;
 340    }
 341}
 342
 343
 344void kvm_arch_remove_all_hw_breakpoints(void)
 345{
 346    if (cur_hw_wps > 0) {
 347        g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
 348    }
 349    if (cur_hw_bps > 0) {
 350        g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
 351    }
 352}
 353
 354void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
 355{
 356    int i;
 357    memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
 358
 359    for (i = 0; i < max_hw_wps; i++) {
 360        HWWatchpoint *wp = get_hw_wp(i);
 361        ptr->dbg_wcr[i] = wp->wcr;
 362        ptr->dbg_wvr[i] = wp->wvr;
 363    }
 364    for (i = 0; i < max_hw_bps; i++) {
 365        HWBreakpoint *bp = get_hw_bp(i);
 366        ptr->dbg_bcr[i] = bp->bcr;
 367        ptr->dbg_bvr[i] = bp->bvr;
 368    }
 369}
 370
 371bool kvm_arm_hw_debug_active(CPUState *cs)
 372{
 373    return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
 374}
 375
 376static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
 377{
 378    int i;
 379
 380    for (i = 0; i < cur_hw_bps; i++) {
 381        HWBreakpoint *bp = get_hw_bp(i);
 382        if (bp->bvr == pc) {
 383            return true;
 384        }
 385    }
 386    return false;
 387}
 388
 389static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
 390{
 391    int i;
 392
 393    for (i = 0; i < cur_hw_wps; i++) {
 394        if (check_watchpoint_in_range(i, addr)) {
 395            return &get_hw_wp(i)->details;
 396        }
 397    }
 398    return NULL;
 399}
 400
 401static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
 402                                    const char *name)
 403{
 404    int err;
 405
 406    err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
 407    if (err != 0) {
 408        error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
 409        return false;
 410    }
 411
 412    err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
 413    if (err != 0) {
 414        error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
 415        return false;
 416    }
 417
 418    return true;
 419}
 420
 421void kvm_arm_pmu_init(CPUState *cs)
 422{
 423    struct kvm_device_attr attr = {
 424        .group = KVM_ARM_VCPU_PMU_V3_CTRL,
 425        .attr = KVM_ARM_VCPU_PMU_V3_INIT,
 426    };
 427
 428    if (!ARM_CPU(cs)->has_pmu) {
 429        return;
 430    }
 431    if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
 432        error_report("failed to init PMU");
 433        abort();
 434    }
 435}
 436
 437void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
 438{
 439    struct kvm_device_attr attr = {
 440        .group = KVM_ARM_VCPU_PMU_V3_CTRL,
 441        .addr = (intptr_t)&irq,
 442        .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
 443    };
 444
 445    if (!ARM_CPU(cs)->has_pmu) {
 446        return;
 447    }
 448    if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
 449        error_report("failed to set irq for PMU");
 450        abort();
 451    }
 452}
 453
 454void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
 455{
 456    struct kvm_device_attr attr = {
 457        .group = KVM_ARM_VCPU_PVTIME_CTRL,
 458        .attr = KVM_ARM_VCPU_PVTIME_IPA,
 459        .addr = (uint64_t)&ipa,
 460    };
 461
 462    if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
 463        return;
 464    }
 465    if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
 466        error_report("failed to init PVTIME IPA");
 467        abort();
 468    }
 469}
 470
 471static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
 472{
 473    uint64_t ret;
 474    struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
 475    int err;
 476
 477    assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
 478    err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
 479    if (err < 0) {
 480        return -1;
 481    }
 482    *pret = ret;
 483    return 0;
 484}
 485
 486static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
 487{
 488    struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
 489
 490    assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
 491    return ioctl(fd, KVM_GET_ONE_REG, &idreg);
 492}
 493
 494bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
 495{
 496    /* Identify the feature bits corresponding to the host CPU, and
 497     * fill out the ARMHostCPUClass fields accordingly. To do this
 498     * we have to create a scratch VM, create a single CPU inside it,
 499     * and then query that CPU for the relevant ID registers.
 500     */
 501    int fdarray[3];
 502    bool sve_supported;
 503    uint64_t features = 0;
 504    uint64_t t;
 505    int err;
 506
 507    /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
 508     * we know these will only support creating one kind of guest CPU,
 509     * which is its preferred CPU type. Fortunately these old kernels
 510     * support only a very limited number of CPUs.
 511     */
 512    static const uint32_t cpus_to_try[] = {
 513        KVM_ARM_TARGET_AEM_V8,
 514        KVM_ARM_TARGET_FOUNDATION_V8,
 515        KVM_ARM_TARGET_CORTEX_A57,
 516        QEMU_KVM_ARM_TARGET_NONE
 517    };
 518    /*
 519     * target = -1 informs kvm_arm_create_scratch_host_vcpu()
 520     * to use the preferred target
 521     */
 522    struct kvm_vcpu_init init = { .target = -1, };
 523
 524    if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
 525        return false;
 526    }
 527
 528    ahcf->target = init.target;
 529    ahcf->dtb_compatible = "arm,arm-v8";
 530
 531    err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
 532                         ARM64_SYS_REG(3, 0, 0, 4, 0));
 533    if (unlikely(err < 0)) {
 534        /*
 535         * Before v4.15, the kernel only exposed a limited number of system
 536         * registers, not including any of the interesting AArch64 ID regs.
 537         * For the most part we could leave these fields as zero with minimal
 538         * effect, since this does not affect the values seen by the guest.
 539         *
 540         * However, it could cause problems down the line for QEMU,
 541         * so provide a minimal v8.0 default.
 542         *
 543         * ??? Could read MIDR and use knowledge from cpu64.c.
 544         * ??? Could map a page of memory into our temp guest and
 545         *     run the tiniest of hand-crafted kernels to extract
 546         *     the values seen by the guest.
 547         * ??? Either of these sounds like too much effort just
 548         *     to work around running a modern host kernel.
 549         */
 550        ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
 551        err = 0;
 552    } else {
 553        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
 554                              ARM64_SYS_REG(3, 0, 0, 4, 1));
 555        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
 556                              ARM64_SYS_REG(3, 0, 0, 5, 0));
 557        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
 558                              ARM64_SYS_REG(3, 0, 0, 5, 1));
 559        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
 560                              ARM64_SYS_REG(3, 0, 0, 6, 0));
 561        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
 562                              ARM64_SYS_REG(3, 0, 0, 6, 1));
 563        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
 564                              ARM64_SYS_REG(3, 0, 0, 7, 0));
 565        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
 566                              ARM64_SYS_REG(3, 0, 0, 7, 1));
 567        err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
 568                              ARM64_SYS_REG(3, 0, 0, 7, 2));
 569
 570        /*
 571         * Note that if AArch32 support is not present in the host,
 572         * the AArch32 sysregs are present to be read, but will
 573         * return UNKNOWN values.  This is neither better nor worse
 574         * than skipping the reads and leaving 0, as we must avoid
 575         * considering the values in every case.
 576         */
 577        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
 578                              ARM64_SYS_REG(3, 0, 0, 1, 0));
 579        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
 580                              ARM64_SYS_REG(3, 0, 0, 1, 1));
 581        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
 582                              ARM64_SYS_REG(3, 0, 0, 3, 4));
 583        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
 584                              ARM64_SYS_REG(3, 0, 0, 1, 2));
 585        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
 586                              ARM64_SYS_REG(3, 0, 0, 1, 4));
 587        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
 588                              ARM64_SYS_REG(3, 0, 0, 1, 5));
 589        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
 590                              ARM64_SYS_REG(3, 0, 0, 1, 6));
 591        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
 592                              ARM64_SYS_REG(3, 0, 0, 1, 7));
 593        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
 594                              ARM64_SYS_REG(3, 0, 0, 2, 0));
 595        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
 596                              ARM64_SYS_REG(3, 0, 0, 2, 1));
 597        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
 598                              ARM64_SYS_REG(3, 0, 0, 2, 2));
 599        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
 600                              ARM64_SYS_REG(3, 0, 0, 2, 3));
 601        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
 602                              ARM64_SYS_REG(3, 0, 0, 2, 4));
 603        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
 604                              ARM64_SYS_REG(3, 0, 0, 2, 5));
 605        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
 606                              ARM64_SYS_REG(3, 0, 0, 2, 6));
 607        err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
 608                              ARM64_SYS_REG(3, 0, 0, 2, 7));
 609
 610        err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
 611                              ARM64_SYS_REG(3, 0, 0, 3, 0));
 612        err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
 613                              ARM64_SYS_REG(3, 0, 0, 3, 1));
 614        err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
 615                              ARM64_SYS_REG(3, 0, 0, 3, 2));
 616
 617        /*
 618         * DBGDIDR is a bit complicated because the kernel doesn't
 619         * provide an accessor for it in 64-bit mode, which is what this
 620         * scratch VM is in, and there's no architected "64-bit sysreg
 621         * which reads the same as the 32-bit register" the way there is
 622         * for other ID registers. Instead we synthesize a value from the
 623         * AArch64 ID_AA64DFR0, the same way the kernel code in
 624         * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
 625         * We only do this if the CPU supports AArch32 at EL1.
 626         */
 627        if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
 628            int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
 629            int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
 630            int ctx_cmps =
 631                FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
 632            int version = 6; /* ARMv8 debug architecture */
 633            bool has_el3 =
 634                !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
 635            uint32_t dbgdidr = 0;
 636
 637            dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
 638            dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
 639            dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
 640            dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
 641            dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
 642            dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
 643            dbgdidr |= (1 << 15); /* RES1 bit */
 644            ahcf->isar.dbgdidr = dbgdidr;
 645        }
 646    }
 647
 648    sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
 649
 650    kvm_arm_destroy_scratch_host_vcpu(fdarray);
 651
 652    if (err < 0) {
 653        return false;
 654    }
 655
 656    /* Add feature bits that can't appear until after VCPU init. */
 657    if (sve_supported) {
 658        t = ahcf->isar.id_aa64pfr0;
 659        t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1);
 660        ahcf->isar.id_aa64pfr0 = t;
 661    }
 662
 663    /*
 664     * We can assume any KVM supporting CPU is at least a v8
 665     * with VFPv4+Neon; this in turn implies most of the other
 666     * feature bits.
 667     */
 668    features |= 1ULL << ARM_FEATURE_V8;
 669    features |= 1ULL << ARM_FEATURE_NEON;
 670    features |= 1ULL << ARM_FEATURE_AARCH64;
 671    features |= 1ULL << ARM_FEATURE_PMU;
 672    features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
 673
 674    ahcf->features = features;
 675
 676    return true;
 677}
 678
 679void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
 680{
 681    bool has_steal_time = kvm_arm_steal_time_supported();
 682
 683    if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
 684        if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 685            cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
 686        } else {
 687            cpu->kvm_steal_time = ON_OFF_AUTO_ON;
 688        }
 689    } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
 690        if (!has_steal_time) {
 691            error_setg(errp, "'kvm-steal-time' cannot be enabled "
 692                             "on this host");
 693            return;
 694        } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 695            /*
 696             * DEN0057A chapter 2 says "This specification only covers
 697             * systems in which the Execution state of the hypervisor
 698             * as well as EL1 of virtual machines is AArch64.". And,
 699             * to ensure that, the smc/hvc calls are only specified as
 700             * smc64/hvc64.
 701             */
 702            error_setg(errp, "'kvm-steal-time' cannot be enabled "
 703                             "for AArch32 guests");
 704            return;
 705        }
 706    }
 707}
 708
 709bool kvm_arm_aarch32_supported(void)
 710{
 711    return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
 712}
 713
 714bool kvm_arm_sve_supported(void)
 715{
 716    return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
 717}
 718
 719bool kvm_arm_steal_time_supported(void)
 720{
 721    return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
 722}
 723
 724QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
 725
 726void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
 727{
 728    /* Only call this function if kvm_arm_sve_supported() returns true. */
 729    static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
 730    static bool probed;
 731    uint32_t vq = 0;
 732    int i, j;
 733
 734    bitmap_clear(map, 0, ARM_MAX_VQ);
 735
 736    /*
 737     * KVM ensures all host CPUs support the same set of vector lengths.
 738     * So we only need to create the scratch VCPUs once and then cache
 739     * the results.
 740     */
 741    if (!probed) {
 742        struct kvm_vcpu_init init = {
 743            .target = -1,
 744            .features[0] = (1 << KVM_ARM_VCPU_SVE),
 745        };
 746        struct kvm_one_reg reg = {
 747            .id = KVM_REG_ARM64_SVE_VLS,
 748            .addr = (uint64_t)&vls[0],
 749        };
 750        int fdarray[3], ret;
 751
 752        probed = true;
 753
 754        if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
 755            error_report("failed to create scratch VCPU with SVE enabled");
 756            abort();
 757        }
 758        ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &reg);
 759        kvm_arm_destroy_scratch_host_vcpu(fdarray);
 760        if (ret) {
 761            error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
 762                         strerror(errno));
 763            abort();
 764        }
 765
 766        for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
 767            if (vls[i]) {
 768                vq = 64 - clz64(vls[i]) + i * 64;
 769                break;
 770            }
 771        }
 772        if (vq > ARM_MAX_VQ) {
 773            warn_report("KVM supports vector lengths larger than "
 774                        "QEMU can enable");
 775        }
 776    }
 777
 778    for (i = 0; i < KVM_ARM64_SVE_VLS_WORDS; ++i) {
 779        if (!vls[i]) {
 780            continue;
 781        }
 782        for (j = 1; j <= 64; ++j) {
 783            vq = j + i * 64;
 784            if (vq > ARM_MAX_VQ) {
 785                return;
 786            }
 787            if (vls[i] & (1UL << (j - 1))) {
 788                set_bit(vq - 1, map);
 789            }
 790        }
 791    }
 792}
 793
 794static int kvm_arm_sve_set_vls(CPUState *cs)
 795{
 796    uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = {0};
 797    struct kvm_one_reg reg = {
 798        .id = KVM_REG_ARM64_SVE_VLS,
 799        .addr = (uint64_t)&vls[0],
 800    };
 801    ARMCPU *cpu = ARM_CPU(cs);
 802    uint32_t vq;
 803    int i, j;
 804
 805    assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
 806
 807    for (vq = 1; vq <= cpu->sve_max_vq; ++vq) {
 808        if (test_bit(vq - 1, cpu->sve_vq_map)) {
 809            i = (vq - 1) / 64;
 810            j = (vq - 1) % 64;
 811            vls[i] |= 1UL << j;
 812        }
 813    }
 814
 815    return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 816}
 817
 818#define ARM_CPU_ID_MPIDR       3, 0, 0, 0, 5
 819
 820int kvm_arch_init_vcpu(CPUState *cs)
 821{
 822    int ret;
 823    uint64_t mpidr;
 824    ARMCPU *cpu = ARM_CPU(cs);
 825    CPUARMState *env = &cpu->env;
 826
 827    if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
 828        !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
 829        error_report("KVM is not supported for this guest CPU type");
 830        return -EINVAL;
 831    }
 832
 833    qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
 834
 835    /* Determine init features for this CPU */
 836    memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
 837    if (cs->start_powered_off) {
 838        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
 839    }
 840    if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
 841        cpu->psci_version = 2;
 842        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
 843    }
 844    if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
 845        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
 846    }
 847    if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
 848        cpu->has_pmu = false;
 849    }
 850    if (cpu->has_pmu) {
 851        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
 852    } else {
 853        env->features &= ~(1ULL << ARM_FEATURE_PMU);
 854    }
 855    if (cpu_isar_feature(aa64_sve, cpu)) {
 856        assert(kvm_arm_sve_supported());
 857        cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
 858    }
 859
 860    /* Do KVM_ARM_VCPU_INIT ioctl */
 861    ret = kvm_arm_vcpu_init(cs);
 862    if (ret) {
 863        return ret;
 864    }
 865
 866    if (cpu_isar_feature(aa64_sve, cpu)) {
 867        ret = kvm_arm_sve_set_vls(cs);
 868        if (ret) {
 869            return ret;
 870        }
 871        ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
 872        if (ret) {
 873            return ret;
 874        }
 875    }
 876
 877    /*
 878     * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
 879     * Currently KVM has its own idea about MPIDR assignment, so we
 880     * override our defaults with what we get from KVM.
 881     */
 882    ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
 883    if (ret) {
 884        return ret;
 885    }
 886    cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
 887
 888    kvm_arm_init_debug(cs);
 889
 890    /* Check whether user space can specify guest syndrome value */
 891    kvm_arm_init_serror_injection(cs);
 892
 893    return kvm_arm_init_cpreg_list(cpu);
 894}
 895
 896int kvm_arch_destroy_vcpu(CPUState *cs)
 897{
 898    return 0;
 899}
 900
 901bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
 902{
 903    /* Return true if the regidx is a register we should synchronize
 904     * via the cpreg_tuples array (ie is not a core or sve reg that
 905     * we sync by hand in kvm_arch_get/put_registers())
 906     */
 907    switch (regidx & KVM_REG_ARM_COPROC_MASK) {
 908    case KVM_REG_ARM_CORE:
 909    case KVM_REG_ARM64_SVE:
 910        return false;
 911    default:
 912        return true;
 913    }
 914}
 915
 916typedef struct CPRegStateLevel {
 917    uint64_t regidx;
 918    int level;
 919} CPRegStateLevel;
 920
 921/* All system registers not listed in the following table are assumed to be
 922 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
 923 * often, you must add it to this table with a state of either
 924 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
 925 */
 926static const CPRegStateLevel non_runtime_cpregs[] = {
 927    { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
 928};
 929
 930int kvm_arm_cpreg_level(uint64_t regidx)
 931{
 932    int i;
 933
 934    for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
 935        const CPRegStateLevel *l = &non_runtime_cpregs[i];
 936        if (l->regidx == regidx) {
 937            return l->level;
 938        }
 939    }
 940
 941    return KVM_PUT_RUNTIME_STATE;
 942}
 943
 944/* Callers must hold the iothread mutex lock */
 945static void kvm_inject_arm_sea(CPUState *c)
 946{
 947    ARMCPU *cpu = ARM_CPU(c);
 948    CPUARMState *env = &cpu->env;
 949    uint32_t esr;
 950    bool same_el;
 951
 952    c->exception_index = EXCP_DATA_ABORT;
 953    env->exception.target_el = 1;
 954
 955    /*
 956     * Set the DFSC to synchronous external abort and set FnV to not valid,
 957     * this will tell guest the FAR_ELx is UNKNOWN for this abort.
 958     */
 959    same_el = arm_current_el(env) == env->exception.target_el;
 960    esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
 961
 962    env->exception.syndrome = esr;
 963
 964    arm_cpu_do_interrupt(c);
 965}
 966
 967#define AARCH64_CORE_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
 968                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 969
 970#define AARCH64_SIMD_CORE_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
 971                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 972
 973#define AARCH64_SIMD_CTRL_REG(x)   (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
 974                 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
 975
 976static int kvm_arch_put_fpsimd(CPUState *cs)
 977{
 978    CPUARMState *env = &ARM_CPU(cs)->env;
 979    struct kvm_one_reg reg;
 980    int i, ret;
 981
 982    for (i = 0; i < 32; i++) {
 983        uint64_t *q = aa64_vfp_qreg(env, i);
 984#ifdef HOST_WORDS_BIGENDIAN
 985        uint64_t fp_val[2] = { q[1], q[0] };
 986        reg.addr = (uintptr_t)fp_val;
 987#else
 988        reg.addr = (uintptr_t)q;
 989#endif
 990        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
 991        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
 992        if (ret) {
 993            return ret;
 994        }
 995    }
 996
 997    return 0;
 998}
 999
1000/*
1001 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1002 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1003 * code the slice index to zero for now as it's unlikely we'll need more than
1004 * one slice for quite some time.
1005 */
1006static int kvm_arch_put_sve(CPUState *cs)
1007{
1008    ARMCPU *cpu = ARM_CPU(cs);
1009    CPUARMState *env = &cpu->env;
1010    uint64_t tmp[ARM_MAX_VQ * 2];
1011    uint64_t *r;
1012    struct kvm_one_reg reg;
1013    int n, ret;
1014
1015    for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1016        r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
1017        reg.addr = (uintptr_t)r;
1018        reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1019        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1020        if (ret) {
1021            return ret;
1022        }
1023    }
1024
1025    for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1026        r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
1027                        DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1028        reg.addr = (uintptr_t)r;
1029        reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1030        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1031        if (ret) {
1032            return ret;
1033        }
1034    }
1035
1036    r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
1037                    DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1038    reg.addr = (uintptr_t)r;
1039    reg.id = KVM_REG_ARM64_SVE_FFR(0);
1040    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1041    if (ret) {
1042        return ret;
1043    }
1044
1045    return 0;
1046}
1047
1048int kvm_arch_put_registers(CPUState *cs, int level)
1049{
1050    struct kvm_one_reg reg;
1051    uint64_t val;
1052    uint32_t fpr;
1053    int i, ret;
1054    unsigned int el;
1055
1056    ARMCPU *cpu = ARM_CPU(cs);
1057    CPUARMState *env = &cpu->env;
1058
1059    /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
1060     * AArch64 registers before pushing them out to 64-bit KVM.
1061     */
1062    if (!is_a64(env)) {
1063        aarch64_sync_32_to_64(env);
1064    }
1065
1066    for (i = 0; i < 31; i++) {
1067        reg.id = AARCH64_CORE_REG(regs.regs[i]);
1068        reg.addr = (uintptr_t) &env->xregs[i];
1069        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1070        if (ret) {
1071            return ret;
1072        }
1073    }
1074
1075    /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1076     * QEMU side we keep the current SP in xregs[31] as well.
1077     */
1078    aarch64_save_sp(env, 1);
1079
1080    reg.id = AARCH64_CORE_REG(regs.sp);
1081    reg.addr = (uintptr_t) &env->sp_el[0];
1082    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1083    if (ret) {
1084        return ret;
1085    }
1086
1087    reg.id = AARCH64_CORE_REG(sp_el1);
1088    reg.addr = (uintptr_t) &env->sp_el[1];
1089    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1090    if (ret) {
1091        return ret;
1092    }
1093
1094    /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
1095    if (is_a64(env)) {
1096        val = pstate_read(env);
1097    } else {
1098        val = cpsr_read(env);
1099    }
1100    reg.id = AARCH64_CORE_REG(regs.pstate);
1101    reg.addr = (uintptr_t) &val;
1102    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1103    if (ret) {
1104        return ret;
1105    }
1106
1107    reg.id = AARCH64_CORE_REG(regs.pc);
1108    reg.addr = (uintptr_t) &env->pc;
1109    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1110    if (ret) {
1111        return ret;
1112    }
1113
1114    reg.id = AARCH64_CORE_REG(elr_el1);
1115    reg.addr = (uintptr_t) &env->elr_el[1];
1116    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1117    if (ret) {
1118        return ret;
1119    }
1120
1121    /* Saved Program State Registers
1122     *
1123     * Before we restore from the banked_spsr[] array we need to
1124     * ensure that any modifications to env->spsr are correctly
1125     * reflected in the banks.
1126     */
1127    el = arm_current_el(env);
1128    if (el > 0 && !is_a64(env)) {
1129        i = bank_number(env->uncached_cpsr & CPSR_M);
1130        env->banked_spsr[i] = env->spsr;
1131    }
1132
1133    /* KVM 0-4 map to QEMU banks 1-5 */
1134    for (i = 0; i < KVM_NR_SPSR; i++) {
1135        reg.id = AARCH64_CORE_REG(spsr[i]);
1136        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1137        ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1138        if (ret) {
1139            return ret;
1140        }
1141    }
1142
1143    if (cpu_isar_feature(aa64_sve, cpu)) {
1144        ret = kvm_arch_put_sve(cs);
1145    } else {
1146        ret = kvm_arch_put_fpsimd(cs);
1147    }
1148    if (ret) {
1149        return ret;
1150    }
1151
1152    reg.addr = (uintptr_t)(&fpr);
1153    fpr = vfp_get_fpsr(env);
1154    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1155    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1156    if (ret) {
1157        return ret;
1158    }
1159
1160    reg.addr = (uintptr_t)(&fpr);
1161    fpr = vfp_get_fpcr(env);
1162    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1163    ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1164    if (ret) {
1165        return ret;
1166    }
1167
1168    write_cpustate_to_list(cpu, true);
1169
1170    if (!write_list_to_kvmstate(cpu, level)) {
1171        return -EINVAL;
1172    }
1173
1174   /*
1175    * Setting VCPU events should be triggered after syncing the registers
1176    * to avoid overwriting potential changes made by KVM upon calling
1177    * KVM_SET_VCPU_EVENTS ioctl
1178    */
1179    ret = kvm_put_vcpu_events(cpu);
1180    if (ret) {
1181        return ret;
1182    }
1183
1184    kvm_arm_sync_mpstate_to_kvm(cpu);
1185
1186    return ret;
1187}
1188
1189static int kvm_arch_get_fpsimd(CPUState *cs)
1190{
1191    CPUARMState *env = &ARM_CPU(cs)->env;
1192    struct kvm_one_reg reg;
1193    int i, ret;
1194
1195    for (i = 0; i < 32; i++) {
1196        uint64_t *q = aa64_vfp_qreg(env, i);
1197        reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
1198        reg.addr = (uintptr_t)q;
1199        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1200        if (ret) {
1201            return ret;
1202        } else {
1203#ifdef HOST_WORDS_BIGENDIAN
1204            uint64_t t;
1205            t = q[0], q[0] = q[1], q[1] = t;
1206#endif
1207        }
1208    }
1209
1210    return 0;
1211}
1212
1213/*
1214 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
1215 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
1216 * code the slice index to zero for now as it's unlikely we'll need more than
1217 * one slice for quite some time.
1218 */
1219static int kvm_arch_get_sve(CPUState *cs)
1220{
1221    ARMCPU *cpu = ARM_CPU(cs);
1222    CPUARMState *env = &cpu->env;
1223    struct kvm_one_reg reg;
1224    uint64_t *r;
1225    int n, ret;
1226
1227    for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
1228        r = &env->vfp.zregs[n].d[0];
1229        reg.addr = (uintptr_t)r;
1230        reg.id = KVM_REG_ARM64_SVE_ZREG(n, 0);
1231        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1232        if (ret) {
1233            return ret;
1234        }
1235        sve_bswap64(r, r, cpu->sve_max_vq * 2);
1236    }
1237
1238    for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
1239        r = &env->vfp.pregs[n].p[0];
1240        reg.addr = (uintptr_t)r;
1241        reg.id = KVM_REG_ARM64_SVE_PREG(n, 0);
1242        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1243        if (ret) {
1244            return ret;
1245        }
1246        sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1247    }
1248
1249    r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
1250    reg.addr = (uintptr_t)r;
1251    reg.id = KVM_REG_ARM64_SVE_FFR(0);
1252    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1253    if (ret) {
1254        return ret;
1255    }
1256    sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
1257
1258    return 0;
1259}
1260
1261int kvm_arch_get_registers(CPUState *cs)
1262{
1263    struct kvm_one_reg reg;
1264    uint64_t val;
1265    unsigned int el;
1266    uint32_t fpr;
1267    int i, ret;
1268
1269    ARMCPU *cpu = ARM_CPU(cs);
1270    CPUARMState *env = &cpu->env;
1271
1272    for (i = 0; i < 31; i++) {
1273        reg.id = AARCH64_CORE_REG(regs.regs[i]);
1274        reg.addr = (uintptr_t) &env->xregs[i];
1275        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1276        if (ret) {
1277            return ret;
1278        }
1279    }
1280
1281    reg.id = AARCH64_CORE_REG(regs.sp);
1282    reg.addr = (uintptr_t) &env->sp_el[0];
1283    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1284    if (ret) {
1285        return ret;
1286    }
1287
1288    reg.id = AARCH64_CORE_REG(sp_el1);
1289    reg.addr = (uintptr_t) &env->sp_el[1];
1290    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1291    if (ret) {
1292        return ret;
1293    }
1294
1295    reg.id = AARCH64_CORE_REG(regs.pstate);
1296    reg.addr = (uintptr_t) &val;
1297    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1298    if (ret) {
1299        return ret;
1300    }
1301
1302    env->aarch64 = ((val & PSTATE_nRW) == 0);
1303    if (is_a64(env)) {
1304        pstate_write(env, val);
1305    } else {
1306        cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
1307    }
1308
1309    /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1310     * QEMU side we keep the current SP in xregs[31] as well.
1311     */
1312    aarch64_restore_sp(env, 1);
1313
1314    reg.id = AARCH64_CORE_REG(regs.pc);
1315    reg.addr = (uintptr_t) &env->pc;
1316    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1317    if (ret) {
1318        return ret;
1319    }
1320
1321    /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1322     * incoming AArch64 regs received from 64-bit KVM.
1323     * We must perform this after all of the registers have been acquired from
1324     * the kernel.
1325     */
1326    if (!is_a64(env)) {
1327        aarch64_sync_64_to_32(env);
1328    }
1329
1330    reg.id = AARCH64_CORE_REG(elr_el1);
1331    reg.addr = (uintptr_t) &env->elr_el[1];
1332    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1333    if (ret) {
1334        return ret;
1335    }
1336
1337    /* Fetch the SPSR registers
1338     *
1339     * KVM SPSRs 0-4 map to QEMU banks 1-5
1340     */
1341    for (i = 0; i < KVM_NR_SPSR; i++) {
1342        reg.id = AARCH64_CORE_REG(spsr[i]);
1343        reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
1344        ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1345        if (ret) {
1346            return ret;
1347        }
1348    }
1349
1350    el = arm_current_el(env);
1351    if (el > 0 && !is_a64(env)) {
1352        i = bank_number(env->uncached_cpsr & CPSR_M);
1353        env->spsr = env->banked_spsr[i];
1354    }
1355
1356    if (cpu_isar_feature(aa64_sve, cpu)) {
1357        ret = kvm_arch_get_sve(cs);
1358    } else {
1359        ret = kvm_arch_get_fpsimd(cs);
1360    }
1361    if (ret) {
1362        return ret;
1363    }
1364
1365    reg.addr = (uintptr_t)(&fpr);
1366    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
1367    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1368    if (ret) {
1369        return ret;
1370    }
1371    vfp_set_fpsr(env, fpr);
1372
1373    reg.addr = (uintptr_t)(&fpr);
1374    reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
1375    ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
1376    if (ret) {
1377        return ret;
1378    }
1379    vfp_set_fpcr(env, fpr);
1380
1381    ret = kvm_get_vcpu_events(cpu);
1382    if (ret) {
1383        return ret;
1384    }
1385
1386    if (!write_kvmstate_to_list(cpu)) {
1387        return -EINVAL;
1388    }
1389    /* Note that it's OK to have registers which aren't in CPUState,
1390     * so we can ignore a failure return here.
1391     */
1392    write_list_to_cpustate(cpu);
1393
1394    kvm_arm_sync_mpstate_to_qemu(cpu);
1395
1396    /* TODO: other registers */
1397    return ret;
1398}
1399
1400void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
1401{
1402    ram_addr_t ram_addr;
1403    hwaddr paddr;
1404    Object *obj = qdev_get_machine();
1405    VirtMachineState *vms = VIRT_MACHINE(obj);
1406    bool acpi_enabled = virt_is_acpi_enabled(vms);
1407
1408    assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
1409
1410    if (acpi_enabled && addr &&
1411            object_property_get_bool(obj, "ras", NULL)) {
1412        ram_addr = qemu_ram_addr_from_host(addr);
1413        if (ram_addr != RAM_ADDR_INVALID &&
1414            kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
1415            kvm_hwpoison_page_add(ram_addr);
1416            /*
1417             * If this is a BUS_MCEERR_AR, we know we have been called
1418             * synchronously from the vCPU thread, so we can easily
1419             * synchronize the state and inject an error.
1420             *
1421             * TODO: we currently don't tell the guest at all about
1422             * BUS_MCEERR_AO. In that case we might either be being
1423             * called synchronously from the vCPU thread, or a bit
1424             * later from the main thread, so doing the injection of
1425             * the error would be more complicated.
1426             */
1427            if (code == BUS_MCEERR_AR) {
1428                kvm_cpu_synchronize_state(c);
1429                if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
1430                    kvm_inject_arm_sea(c);
1431                } else {
1432                    error_report("failed to record the error");
1433                    abort();
1434                }
1435            }
1436            return;
1437        }
1438        if (code == BUS_MCEERR_AO) {
1439            error_report("Hardware memory error at addr %p for memory used by "
1440                "QEMU itself instead of guest system!", addr);
1441        }
1442    }
1443
1444    if (code == BUS_MCEERR_AR) {
1445        error_report("Hardware memory error!");
1446        exit(1);
1447    }
1448}
1449
1450/* C6.6.29 BRK instruction */
1451static const uint32_t brk_insn = 0xd4200000;
1452
1453int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1454{
1455    if (have_guest_debug) {
1456        if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
1457            cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
1458            return -EINVAL;
1459        }
1460        return 0;
1461    } else {
1462        error_report("guest debug not supported on this kernel");
1463        return -EINVAL;
1464    }
1465}
1466
1467int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1468{
1469    static uint32_t brk;
1470
1471    if (have_guest_debug) {
1472        if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
1473            brk != brk_insn ||
1474            cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
1475            return -EINVAL;
1476        }
1477        return 0;
1478    } else {
1479        error_report("guest debug not supported on this kernel");
1480        return -EINVAL;
1481    }
1482}
1483
1484/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1485 *
1486 * To minimise translating between kernel and user-space the kernel
1487 * ABI just provides user-space with the full exception syndrome
1488 * register value to be decoded in QEMU.
1489 */
1490
1491bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
1492{
1493    int hsr_ec = syn_get_ec(debug_exit->hsr);
1494    ARMCPU *cpu = ARM_CPU(cs);
1495    CPUARMState *env = &cpu->env;
1496
1497    /* Ensure PC is synchronised */
1498    kvm_cpu_synchronize_state(cs);
1499
1500    switch (hsr_ec) {
1501    case EC_SOFTWARESTEP:
1502        if (cs->singlestep_enabled) {
1503            return true;
1504        } else {
1505            /*
1506             * The kernel should have suppressed the guest's ability to
1507             * single step at this point so something has gone wrong.
1508             */
1509            error_report("%s: guest single-step while debugging unsupported"
1510                         " (%"PRIx64", %"PRIx32")",
1511                         __func__, env->pc, debug_exit->hsr);
1512            return false;
1513        }
1514        break;
1515    case EC_AA64_BKPT:
1516        if (kvm_find_sw_breakpoint(cs, env->pc)) {
1517            return true;
1518        }
1519        break;
1520    case EC_BREAKPOINT:
1521        if (find_hw_breakpoint(cs, env->pc)) {
1522            return true;
1523        }
1524        break;
1525    case EC_WATCHPOINT:
1526    {
1527        CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
1528        if (wp) {
1529            cs->watchpoint_hit = wp;
1530            return true;
1531        }
1532        break;
1533    }
1534    default:
1535        error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
1536                     __func__, debug_exit->hsr, env->pc);
1537    }
1538
1539    /* If we are not handling the debug exception it must belong to
1540     * the guest. Let's re-use the existing TCG interrupt code to set
1541     * everything up properly.
1542     */
1543    cs->exception_index = EXCP_BKPT;
1544    env->exception.syndrome = debug_exit->hsr;
1545    env->exception.vaddress = debug_exit->far;
1546    env->exception.target_el = 1;
1547    qemu_mutex_lock_iothread();
1548    arm_cpu_do_interrupt(cs);
1549    qemu_mutex_unlock_iothread();
1550
1551    return false;
1552}
1553
1554#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1555#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1556
1557/*
1558 * ESR_EL1
1559 * ISS encoding
1560 * AARCH64: DFSC,   bits [5:0]
1561 * AARCH32:
1562 *      TTBCR.EAE == 0
1563 *          FS[4]   - DFSR[10]
1564 *          FS[3:0] - DFSR[3:0]
1565 *      TTBCR.EAE == 1
1566 *          FS, bits [5:0]
1567 */
1568#define ESR_DFSC(aarch64, lpae, v)        \
1569    ((aarch64 || (lpae)) ? ((v) & 0x3F)   \
1570               : (((v) >> 6) | ((v) & 0x1F)))
1571
1572#define ESR_DFSC_EXTABT(aarch64, lpae) \
1573    ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1574
1575bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
1576{
1577    uint64_t dfsr_val;
1578
1579    if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
1580        ARMCPU *cpu = ARM_CPU(cs);
1581        CPUARMState *env = &cpu->env;
1582        int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
1583        int lpae = 0;
1584
1585        if (!aarch64_mode) {
1586            uint64_t ttbcr;
1587
1588            if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
1589                lpae = arm_feature(env, ARM_FEATURE_LPAE)
1590                        && (ttbcr & TTBCR_EAE);
1591            }
1592        }
1593        /*
1594         * The verification here is based on the DFSC bits
1595         * of the ESR_EL1 reg only
1596         */
1597         return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
1598                ESR_DFSC_EXTABT(aarch64_mode, lpae));
1599    }
1600    return false;
1601}
1602