qemu/target/arm/helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8#include "qemu/osdep.h"
   9#include "qemu/units.h"
  10#include "target/arm/idau.h"
  11#include "trace.h"
  12#include "cpu.h"
  13#include "internals.h"
  14#include "exec/gdbstub.h"
  15#include "exec/helper-proto.h"
  16#include "qemu/host-utils.h"
  17#include "sysemu/sysemu.h"
  18#include "qemu/bitops.h"
  19#include "qemu/crc32c.h"
  20#include "qemu/qemu-print.h"
  21#include "exec/exec-all.h"
  22#include <zlib.h> /* For crc32 */
  23#include "hw/semihosting/semihost.h"
  24#include "sysemu/cpus.h"
  25#include "sysemu/kvm.h"
  26#include "qemu/range.h"
  27#include "qapi/qapi-commands-machine-target.h"
  28#include "qapi/error.h"
  29#include "qemu/guest-random.h"
  30#ifdef CONFIG_TCG
  31#include "arm_ldst.h"
  32#include "exec/cpu_ldst.h"
  33#endif
  34
  35#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  36
  37#ifndef CONFIG_USER_ONLY
  38
  39static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
  40                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
  41                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  42                               target_ulong *page_size_ptr,
  43                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
  44#endif
  45
  46static void switch_mode(CPUARMState *env, int mode);
  47
  48static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  49{
  50    int nregs;
  51
  52    /* VFP data registers are always little-endian.  */
  53    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  54    if (reg < nregs) {
  55        stq_le_p(buf, *aa32_vfp_dreg(env, reg));
  56        return 8;
  57    }
  58    if (arm_feature(env, ARM_FEATURE_NEON)) {
  59        /* Aliases for Q regs.  */
  60        nregs += 16;
  61        if (reg < nregs) {
  62            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  63            stq_le_p(buf, q[0]);
  64            stq_le_p(buf + 8, q[1]);
  65            return 16;
  66        }
  67    }
  68    switch (reg - nregs) {
  69    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
  70    case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
  71    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
  72    }
  73    return 0;
  74}
  75
  76static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  77{
  78    int nregs;
  79
  80    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  81    if (reg < nregs) {
  82        *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
  83        return 8;
  84    }
  85    if (arm_feature(env, ARM_FEATURE_NEON)) {
  86        nregs += 16;
  87        if (reg < nregs) {
  88            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  89            q[0] = ldq_le_p(buf);
  90            q[1] = ldq_le_p(buf + 8);
  91            return 16;
  92        }
  93    }
  94    switch (reg - nregs) {
  95    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
  96    case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
  97    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
  98    }
  99    return 0;
 100}
 101
 102static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 103{
 104    switch (reg) {
 105    case 0 ... 31:
 106        /* 128 bit FP register */
 107        {
 108            uint64_t *q = aa64_vfp_qreg(env, reg);
 109            stq_le_p(buf, q[0]);
 110            stq_le_p(buf + 8, q[1]);
 111            return 16;
 112        }
 113    case 32:
 114        /* FPSR */
 115        stl_p(buf, vfp_get_fpsr(env));
 116        return 4;
 117    case 33:
 118        /* FPCR */
 119        stl_p(buf, vfp_get_fpcr(env));
 120        return 4;
 121    default:
 122        return 0;
 123    }
 124}
 125
 126static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 127{
 128    switch (reg) {
 129    case 0 ... 31:
 130        /* 128 bit FP register */
 131        {
 132            uint64_t *q = aa64_vfp_qreg(env, reg);
 133            q[0] = ldq_le_p(buf);
 134            q[1] = ldq_le_p(buf + 8);
 135            return 16;
 136        }
 137    case 32:
 138        /* FPSR */
 139        vfp_set_fpsr(env, ldl_p(buf));
 140        return 4;
 141    case 33:
 142        /* FPCR */
 143        vfp_set_fpcr(env, ldl_p(buf));
 144        return 4;
 145    default:
 146        return 0;
 147    }
 148}
 149
 150static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 151{
 152    assert(ri->fieldoffset);
 153    if (cpreg_field_is_64bit(ri)) {
 154        return CPREG_FIELD64(env, ri);
 155    } else {
 156        return CPREG_FIELD32(env, ri);
 157    }
 158}
 159
 160static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 161                      uint64_t value)
 162{
 163    assert(ri->fieldoffset);
 164    if (cpreg_field_is_64bit(ri)) {
 165        CPREG_FIELD64(env, ri) = value;
 166    } else {
 167        CPREG_FIELD32(env, ri) = value;
 168    }
 169}
 170
 171static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 172{
 173    return (char *)env + ri->fieldoffset;
 174}
 175
 176uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 177{
 178    /* Raw read of a coprocessor register (as needed for migration, etc). */
 179    if (ri->type & ARM_CP_CONST) {
 180        return ri->resetvalue;
 181    } else if (ri->raw_readfn) {
 182        return ri->raw_readfn(env, ri);
 183    } else if (ri->readfn) {
 184        return ri->readfn(env, ri);
 185    } else {
 186        return raw_read(env, ri);
 187    }
 188}
 189
 190static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 191                             uint64_t v)
 192{
 193    /* Raw write of a coprocessor register (as needed for migration, etc).
 194     * Note that constant registers are treated as write-ignored; the
 195     * caller should check for success by whether a readback gives the
 196     * value written.
 197     */
 198    if (ri->type & ARM_CP_CONST) {
 199        return;
 200    } else if (ri->raw_writefn) {
 201        ri->raw_writefn(env, ri, v);
 202    } else if (ri->writefn) {
 203        ri->writefn(env, ri, v);
 204    } else {
 205        raw_write(env, ri, v);
 206    }
 207}
 208
 209static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 210{
 211    ARMCPU *cpu = env_archcpu(env);
 212    const ARMCPRegInfo *ri;
 213    uint32_t key;
 214
 215    key = cpu->dyn_xml.cpregs_keys[reg];
 216    ri = get_arm_cp_reginfo(cpu->cp_regs, key);
 217    if (ri) {
 218        if (cpreg_field_is_64bit(ri)) {
 219            return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
 220        } else {
 221            return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
 222        }
 223    }
 224    return 0;
 225}
 226
 227static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 228{
 229    return 0;
 230}
 231
 232static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 233{
 234   /* Return true if the regdef would cause an assertion if you called
 235    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 236    * program bug for it not to have the NO_RAW flag).
 237    * NB that returning false here doesn't necessarily mean that calling
 238    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 239    * read/write access functions which are safe for raw use" from "has
 240    * read/write access functions which have side effects but has forgotten
 241    * to provide raw access functions".
 242    * The tests here line up with the conditions in read/write_raw_cp_reg()
 243    * and assertions in raw_read()/raw_write().
 244    */
 245    if ((ri->type & ARM_CP_CONST) ||
 246        ri->fieldoffset ||
 247        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 248        return false;
 249    }
 250    return true;
 251}
 252
 253bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
 254{
 255    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 256    int i;
 257    bool ok = true;
 258
 259    for (i = 0; i < cpu->cpreg_array_len; i++) {
 260        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 261        const ARMCPRegInfo *ri;
 262        uint64_t newval;
 263
 264        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 265        if (!ri) {
 266            ok = false;
 267            continue;
 268        }
 269        if (ri->type & ARM_CP_NO_RAW) {
 270            continue;
 271        }
 272
 273        newval = read_raw_cp_reg(&cpu->env, ri);
 274        if (kvm_sync) {
 275            /*
 276             * Only sync if the previous list->cpustate sync succeeded.
 277             * Rather than tracking the success/failure state for every
 278             * item in the list, we just recheck "does the raw write we must
 279             * have made in write_list_to_cpustate() read back OK" here.
 280             */
 281            uint64_t oldval = cpu->cpreg_values[i];
 282
 283            if (oldval == newval) {
 284                continue;
 285            }
 286
 287            write_raw_cp_reg(&cpu->env, ri, oldval);
 288            if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
 289                continue;
 290            }
 291
 292            write_raw_cp_reg(&cpu->env, ri, newval);
 293        }
 294        cpu->cpreg_values[i] = newval;
 295    }
 296    return ok;
 297}
 298
 299bool write_list_to_cpustate(ARMCPU *cpu)
 300{
 301    int i;
 302    bool ok = true;
 303
 304    for (i = 0; i < cpu->cpreg_array_len; i++) {
 305        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 306        uint64_t v = cpu->cpreg_values[i];
 307        const ARMCPRegInfo *ri;
 308
 309        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 310        if (!ri) {
 311            ok = false;
 312            continue;
 313        }
 314        if (ri->type & ARM_CP_NO_RAW) {
 315            continue;
 316        }
 317        /* Write value and confirm it reads back as written
 318         * (to catch read-only registers and partially read-only
 319         * registers where the incoming migration value doesn't match)
 320         */
 321        write_raw_cp_reg(&cpu->env, ri, v);
 322        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 323            ok = false;
 324        }
 325    }
 326    return ok;
 327}
 328
 329static void add_cpreg_to_list(gpointer key, gpointer opaque)
 330{
 331    ARMCPU *cpu = opaque;
 332    uint64_t regidx;
 333    const ARMCPRegInfo *ri;
 334
 335    regidx = *(uint32_t *)key;
 336    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 337
 338    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 339        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 340        /* The value array need not be initialized at this point */
 341        cpu->cpreg_array_len++;
 342    }
 343}
 344
 345static void count_cpreg(gpointer key, gpointer opaque)
 346{
 347    ARMCPU *cpu = opaque;
 348    uint64_t regidx;
 349    const ARMCPRegInfo *ri;
 350
 351    regidx = *(uint32_t *)key;
 352    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 353
 354    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 355        cpu->cpreg_array_len++;
 356    }
 357}
 358
 359static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 360{
 361    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 362    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 363
 364    if (aidx > bidx) {
 365        return 1;
 366    }
 367    if (aidx < bidx) {
 368        return -1;
 369    }
 370    return 0;
 371}
 372
 373void init_cpreg_list(ARMCPU *cpu)
 374{
 375    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 376     * Note that we require cpreg_tuples[] to be sorted by key ID.
 377     */
 378    GList *keys;
 379    int arraylen;
 380
 381    keys = g_hash_table_get_keys(cpu->cp_regs);
 382    keys = g_list_sort(keys, cpreg_key_compare);
 383
 384    cpu->cpreg_array_len = 0;
 385
 386    g_list_foreach(keys, count_cpreg, cpu);
 387
 388    arraylen = cpu->cpreg_array_len;
 389    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 390    cpu->cpreg_values = g_new(uint64_t, arraylen);
 391    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 392    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 393    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 394    cpu->cpreg_array_len = 0;
 395
 396    g_list_foreach(keys, add_cpreg_to_list, cpu);
 397
 398    assert(cpu->cpreg_array_len == arraylen);
 399
 400    g_list_free(keys);
 401}
 402
 403/*
 404 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
 405 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
 406 *
 407 * access_el3_aa32ns: Used to check AArch32 register views.
 408 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
 409 */
 410static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 411                                        const ARMCPRegInfo *ri,
 412                                        bool isread)
 413{
 414    bool secure = arm_is_secure_below_el3(env);
 415
 416    assert(!arm_el_is_aa64(env, 3));
 417    if (secure) {
 418        return CP_ACCESS_TRAP_UNCATEGORIZED;
 419    }
 420    return CP_ACCESS_OK;
 421}
 422
 423static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
 424                                                const ARMCPRegInfo *ri,
 425                                                bool isread)
 426{
 427    if (!arm_el_is_aa64(env, 3)) {
 428        return access_el3_aa32ns(env, ri, isread);
 429    }
 430    return CP_ACCESS_OK;
 431}
 432
 433/* Some secure-only AArch32 registers trap to EL3 if used from
 434 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 435 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 436 * We assume that the .access field is set to PL1_RW.
 437 */
 438static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 439                                            const ARMCPRegInfo *ri,
 440                                            bool isread)
 441{
 442    if (arm_current_el(env) == 3) {
 443        return CP_ACCESS_OK;
 444    }
 445    if (arm_is_secure_below_el3(env)) {
 446        return CP_ACCESS_TRAP_EL3;
 447    }
 448    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 449    return CP_ACCESS_TRAP_UNCATEGORIZED;
 450}
 451
 452/* Check for traps to "powerdown debug" registers, which are controlled
 453 * by MDCR.TDOSA
 454 */
 455static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 456                                   bool isread)
 457{
 458    int el = arm_current_el(env);
 459    bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
 460        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 461        (arm_hcr_el2_eff(env) & HCR_TGE);
 462
 463    if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
 464        return CP_ACCESS_TRAP_EL2;
 465    }
 466    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 467        return CP_ACCESS_TRAP_EL3;
 468    }
 469    return CP_ACCESS_OK;
 470}
 471
 472/* Check for traps to "debug ROM" registers, which are controlled
 473 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 474 */
 475static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 476                                  bool isread)
 477{
 478    int el = arm_current_el(env);
 479    bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
 480        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 481        (arm_hcr_el2_eff(env) & HCR_TGE);
 482
 483    if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
 484        return CP_ACCESS_TRAP_EL2;
 485    }
 486    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 487        return CP_ACCESS_TRAP_EL3;
 488    }
 489    return CP_ACCESS_OK;
 490}
 491
 492/* Check for traps to general debug registers, which are controlled
 493 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 494 */
 495static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 496                                  bool isread)
 497{
 498    int el = arm_current_el(env);
 499    bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
 500        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 501        (arm_hcr_el2_eff(env) & HCR_TGE);
 502
 503    if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
 504        return CP_ACCESS_TRAP_EL2;
 505    }
 506    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 507        return CP_ACCESS_TRAP_EL3;
 508    }
 509    return CP_ACCESS_OK;
 510}
 511
 512/* Check for traps to performance monitor registers, which are controlled
 513 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 514 */
 515static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 516                                 bool isread)
 517{
 518    int el = arm_current_el(env);
 519
 520    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 521        && !arm_is_secure_below_el3(env)) {
 522        return CP_ACCESS_TRAP_EL2;
 523    }
 524    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 525        return CP_ACCESS_TRAP_EL3;
 526    }
 527    return CP_ACCESS_OK;
 528}
 529
 530static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 531{
 532    ARMCPU *cpu = env_archcpu(env);
 533
 534    raw_write(env, ri, value);
 535    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
 536}
 537
 538static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 539{
 540    ARMCPU *cpu = env_archcpu(env);
 541
 542    if (raw_read(env, ri) != value) {
 543        /* Unlike real hardware the qemu TLB uses virtual addresses,
 544         * not modified virtual addresses, so this causes a TLB flush.
 545         */
 546        tlb_flush(CPU(cpu));
 547        raw_write(env, ri, value);
 548    }
 549}
 550
 551static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 552                             uint64_t value)
 553{
 554    ARMCPU *cpu = env_archcpu(env);
 555
 556    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
 557        && !extended_addresses_enabled(env)) {
 558        /* For VMSA (when not using the LPAE long descriptor page table
 559         * format) this register includes the ASID, so do a TLB flush.
 560         * For PMSA it is purely a process ID and no action is needed.
 561         */
 562        tlb_flush(CPU(cpu));
 563    }
 564    raw_write(env, ri, value);
 565}
 566
 567/* IS variants of TLB operations must affect all cores */
 568static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 569                             uint64_t value)
 570{
 571    CPUState *cs = env_cpu(env);
 572
 573    tlb_flush_all_cpus_synced(cs);
 574}
 575
 576static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 577                             uint64_t value)
 578{
 579    CPUState *cs = env_cpu(env);
 580
 581    tlb_flush_all_cpus_synced(cs);
 582}
 583
 584static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 585                             uint64_t value)
 586{
 587    CPUState *cs = env_cpu(env);
 588
 589    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 590}
 591
 592static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 593                             uint64_t value)
 594{
 595    CPUState *cs = env_cpu(env);
 596
 597    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 598}
 599
 600/*
 601 * Non-IS variants of TLB operations are upgraded to
 602 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
 603 * force broadcast of these operations.
 604 */
 605static bool tlb_force_broadcast(CPUARMState *env)
 606{
 607    return (env->cp15.hcr_el2 & HCR_FB) &&
 608        arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
 609}
 610
 611static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 612                          uint64_t value)
 613{
 614    /* Invalidate all (TLBIALL) */
 615    ARMCPU *cpu = env_archcpu(env);
 616
 617    if (tlb_force_broadcast(env)) {
 618        tlbiall_is_write(env, NULL, value);
 619        return;
 620    }
 621
 622    tlb_flush(CPU(cpu));
 623}
 624
 625static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 626                          uint64_t value)
 627{
 628    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 629    ARMCPU *cpu = env_archcpu(env);
 630
 631    if (tlb_force_broadcast(env)) {
 632        tlbimva_is_write(env, NULL, value);
 633        return;
 634    }
 635
 636    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 637}
 638
 639static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 640                           uint64_t value)
 641{
 642    /* Invalidate by ASID (TLBIASID) */
 643    ARMCPU *cpu = env_archcpu(env);
 644
 645    if (tlb_force_broadcast(env)) {
 646        tlbiasid_is_write(env, NULL, value);
 647        return;
 648    }
 649
 650    tlb_flush(CPU(cpu));
 651}
 652
 653static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 654                           uint64_t value)
 655{
 656    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 657    ARMCPU *cpu = env_archcpu(env);
 658
 659    if (tlb_force_broadcast(env)) {
 660        tlbimvaa_is_write(env, NULL, value);
 661        return;
 662    }
 663
 664    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 665}
 666
 667static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 668                               uint64_t value)
 669{
 670    CPUState *cs = env_cpu(env);
 671
 672    tlb_flush_by_mmuidx(cs,
 673                        ARMMMUIdxBit_S12NSE1 |
 674                        ARMMMUIdxBit_S12NSE0 |
 675                        ARMMMUIdxBit_S2NS);
 676}
 677
 678static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 679                                  uint64_t value)
 680{
 681    CPUState *cs = env_cpu(env);
 682
 683    tlb_flush_by_mmuidx_all_cpus_synced(cs,
 684                                        ARMMMUIdxBit_S12NSE1 |
 685                                        ARMMMUIdxBit_S12NSE0 |
 686                                        ARMMMUIdxBit_S2NS);
 687}
 688
 689static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
 690                            uint64_t value)
 691{
 692    /* Invalidate by IPA. This has to invalidate any structures that
 693     * contain only stage 2 translation information, but does not need
 694     * to apply to structures that contain combined stage 1 and stage 2
 695     * translation information.
 696     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
 697     */
 698    CPUState *cs = env_cpu(env);
 699    uint64_t pageaddr;
 700
 701    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 702        return;
 703    }
 704
 705    pageaddr = sextract64(value << 12, 0, 40);
 706
 707    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
 708}
 709
 710static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 711                               uint64_t value)
 712{
 713    CPUState *cs = env_cpu(env);
 714    uint64_t pageaddr;
 715
 716    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 717        return;
 718    }
 719
 720    pageaddr = sextract64(value << 12, 0, 40);
 721
 722    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 723                                             ARMMMUIdxBit_S2NS);
 724}
 725
 726static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 727                              uint64_t value)
 728{
 729    CPUState *cs = env_cpu(env);
 730
 731    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
 732}
 733
 734static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 735                                 uint64_t value)
 736{
 737    CPUState *cs = env_cpu(env);
 738
 739    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
 740}
 741
 742static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 743                              uint64_t value)
 744{
 745    CPUState *cs = env_cpu(env);
 746    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 747
 748    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
 749}
 750
 751static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 752                                 uint64_t value)
 753{
 754    CPUState *cs = env_cpu(env);
 755    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 756
 757    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 758                                             ARMMMUIdxBit_S1E2);
 759}
 760
 761static const ARMCPRegInfo cp_reginfo[] = {
 762    /* Define the secure and non-secure FCSE identifier CP registers
 763     * separately because there is no secure bank in V8 (no _EL3).  This allows
 764     * the secure register to be properly reset and migrated. There is also no
 765     * v8 EL1 version of the register so the non-secure instance stands alone.
 766     */
 767    { .name = "FCSEIDR",
 768      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 769      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 770      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 771      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 772    { .name = "FCSEIDR_S",
 773      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 774      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 775      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 776      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 777    /* Define the secure and non-secure context identifier CP registers
 778     * separately because there is no secure bank in V8 (no _EL3).  This allows
 779     * the secure register to be properly reset and migrated.  In the
 780     * non-secure case, the 32-bit register will have reset and migration
 781     * disabled during registration as it is handled by the 64-bit instance.
 782     */
 783    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 784      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 785      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 786      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 787      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 788    { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
 789      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 790      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 791      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 792      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 793    REGINFO_SENTINEL
 794};
 795
 796static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 797    /* NB: Some of these registers exist in v8 but with more precise
 798     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 799     */
 800    /* MMU Domain access control / MPU write buffer control */
 801    { .name = "DACR",
 802      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 803      .access = PL1_RW, .resetvalue = 0,
 804      .writefn = dacr_write, .raw_writefn = raw_write,
 805      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 806                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 807    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 808     * For v6 and v5, these mappings are overly broad.
 809     */
 810    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 811      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 812    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 813      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 814    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 815      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 816    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 817      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 818    /* Cache maintenance ops; some of this space may be overridden later. */
 819    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 820      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 821      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 822    REGINFO_SENTINEL
 823};
 824
 825static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 826    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 827     * over-broad.
 828     */
 829    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 830      .access = PL1_W, .type = ARM_CP_WFI },
 831    REGINFO_SENTINEL
 832};
 833
 834static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 835    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 836     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 837     */
 838    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 839      .access = PL1_W, .type = ARM_CP_WFI },
 840    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 841     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 842     * OMAPCP will override this space.
 843     */
 844    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 845      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 846      .resetvalue = 0 },
 847    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 848      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 849      .resetvalue = 0 },
 850    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 851    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 852      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 853      .resetvalue = 0 },
 854    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 855     * implementing it as RAZ means the "debug architecture version" bits
 856     * will read as a reserved value, which should cause Linux to not try
 857     * to use the debug hardware.
 858     */
 859    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 860      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 861    /* MMU TLB control. Note that the wildcarding means we cover not just
 862     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 863     */
 864    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 865      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 866      .type = ARM_CP_NO_RAW },
 867    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 868      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 869      .type = ARM_CP_NO_RAW },
 870    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 871      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 872      .type = ARM_CP_NO_RAW },
 873    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 874      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 875      .type = ARM_CP_NO_RAW },
 876    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 877      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 878    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 879      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 880    REGINFO_SENTINEL
 881};
 882
 883static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 884                        uint64_t value)
 885{
 886    uint32_t mask = 0;
 887
 888    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 889    if (!arm_feature(env, ARM_FEATURE_V8)) {
 890        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 891         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 892         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 893         */
 894        if (arm_feature(env, ARM_FEATURE_VFP)) {
 895            /* VFP coprocessor: cp10 & cp11 [23:20] */
 896            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 897
 898            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 899                /* ASEDIS [31] bit is RAO/WI */
 900                value |= (1 << 31);
 901            }
 902
 903            /* VFPv3 and upwards with NEON implement 32 double precision
 904             * registers (D0-D31).
 905             */
 906            if (!arm_feature(env, ARM_FEATURE_NEON) ||
 907                    !arm_feature(env, ARM_FEATURE_VFP3)) {
 908                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 909                value |= (1 << 30);
 910            }
 911        }
 912        value &= mask;
 913    }
 914
 915    /*
 916     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
 917     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
 918     */
 919    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
 920        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
 921        value &= ~(0xf << 20);
 922        value |= env->cp15.cpacr_el1 & (0xf << 20);
 923    }
 924
 925    env->cp15.cpacr_el1 = value;
 926}
 927
 928static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 929{
 930    /*
 931     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
 932     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
 933     */
 934    uint64_t value = env->cp15.cpacr_el1;
 935
 936    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
 937        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
 938        value &= ~(0xf << 20);
 939    }
 940    return value;
 941}
 942
 943
 944static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
 945{
 946    /* Call cpacr_write() so that we reset with the correct RAO bits set
 947     * for our CPU features.
 948     */
 949    cpacr_write(env, ri, 0);
 950}
 951
 952static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 953                                   bool isread)
 954{
 955    if (arm_feature(env, ARM_FEATURE_V8)) {
 956        /* Check if CPACR accesses are to be trapped to EL2 */
 957        if (arm_current_el(env) == 1 &&
 958            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
 959            return CP_ACCESS_TRAP_EL2;
 960        /* Check if CPACR accesses are to be trapped to EL3 */
 961        } else if (arm_current_el(env) < 3 &&
 962                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 963            return CP_ACCESS_TRAP_EL3;
 964        }
 965    }
 966
 967    return CP_ACCESS_OK;
 968}
 969
 970static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 971                                  bool isread)
 972{
 973    /* Check if CPTR accesses are set to trap to EL3 */
 974    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 975        return CP_ACCESS_TRAP_EL3;
 976    }
 977
 978    return CP_ACCESS_OK;
 979}
 980
 981static const ARMCPRegInfo v6_cp_reginfo[] = {
 982    /* prefetch by MVA in v6, NOP in v7 */
 983    { .name = "MVA_prefetch",
 984      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 985      .access = PL1_W, .type = ARM_CP_NOP },
 986    /* We need to break the TB after ISB to execute self-modifying code
 987     * correctly and also to take any pending interrupts immediately.
 988     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 989     */
 990    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 991      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 992    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 993      .access = PL0_W, .type = ARM_CP_NOP },
 994    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 995      .access = PL0_W, .type = ARM_CP_NOP },
 996    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 997      .access = PL1_RW,
 998      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 999                             offsetof(CPUARMState, cp15.ifar_ns) },
1000      .resetvalue = 0, },
1001    /* Watchpoint Fault Address Register : should actually only be present
1002     * for 1136, 1176, 11MPCore.
1003     */
1004    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1005      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1006    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1007      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1008      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1009      .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1010    REGINFO_SENTINEL
1011};
1012
1013/* Definitions for the PMU registers */
1014#define PMCRN_MASK  0xf800
1015#define PMCRN_SHIFT 11
1016#define PMCRLC  0x40
1017#define PMCRDP  0x10
1018#define PMCRD   0x8
1019#define PMCRC   0x4
1020#define PMCRP   0x2
1021#define PMCRE   0x1
1022
1023#define PMXEVTYPER_P          0x80000000
1024#define PMXEVTYPER_U          0x40000000
1025#define PMXEVTYPER_NSK        0x20000000
1026#define PMXEVTYPER_NSU        0x10000000
1027#define PMXEVTYPER_NSH        0x08000000
1028#define PMXEVTYPER_M          0x04000000
1029#define PMXEVTYPER_MT         0x02000000
1030#define PMXEVTYPER_EVTCOUNT   0x0000ffff
1031#define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1032                               PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1033                               PMXEVTYPER_M | PMXEVTYPER_MT | \
1034                               PMXEVTYPER_EVTCOUNT)
1035
1036#define PMCCFILTR             0xf8000000
1037#define PMCCFILTR_M           PMXEVTYPER_M
1038#define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1039
1040static inline uint32_t pmu_num_counters(CPUARMState *env)
1041{
1042  return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1043}
1044
1045/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1046static inline uint64_t pmu_counter_mask(CPUARMState *env)
1047{
1048  return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1049}
1050
1051typedef struct pm_event {
1052    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1053    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1054    bool (*supported)(CPUARMState *);
1055    /*
1056     * Retrieve the current count of the underlying event. The programmed
1057     * counters hold a difference from the return value from this function
1058     */
1059    uint64_t (*get_count)(CPUARMState *);
1060    /*
1061     * Return how many nanoseconds it will take (at a minimum) for count events
1062     * to occur. A negative value indicates the counter will never overflow, or
1063     * that the counter has otherwise arranged for the overflow bit to be set
1064     * and the PMU interrupt to be raised on overflow.
1065     */
1066    int64_t (*ns_per_count)(uint64_t);
1067} pm_event;
1068
1069static bool event_always_supported(CPUARMState *env)
1070{
1071    return true;
1072}
1073
1074static uint64_t swinc_get_count(CPUARMState *env)
1075{
1076    /*
1077     * SW_INCR events are written directly to the pmevcntr's by writes to
1078     * PMSWINC, so there is no underlying count maintained by the PMU itself
1079     */
1080    return 0;
1081}
1082
1083static int64_t swinc_ns_per(uint64_t ignored)
1084{
1085    return -1;
1086}
1087
1088/*
1089 * Return the underlying cycle count for the PMU cycle counters. If we're in
1090 * usermode, simply return 0.
1091 */
1092static uint64_t cycles_get_count(CPUARMState *env)
1093{
1094#ifndef CONFIG_USER_ONLY
1095    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1096                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1097#else
1098    return cpu_get_host_ticks();
1099#endif
1100}
1101
1102#ifndef CONFIG_USER_ONLY
1103static int64_t cycles_ns_per(uint64_t cycles)
1104{
1105    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1106}
1107
1108static bool instructions_supported(CPUARMState *env)
1109{
1110    return use_icount == 1 /* Precise instruction counting */;
1111}
1112
1113static uint64_t instructions_get_count(CPUARMState *env)
1114{
1115    return (uint64_t)cpu_get_icount_raw();
1116}
1117
1118static int64_t instructions_ns_per(uint64_t icount)
1119{
1120    return cpu_icount_to_ns((int64_t)icount);
1121}
1122#endif
1123
1124static const pm_event pm_events[] = {
1125    { .number = 0x000, /* SW_INCR */
1126      .supported = event_always_supported,
1127      .get_count = swinc_get_count,
1128      .ns_per_count = swinc_ns_per,
1129    },
1130#ifndef CONFIG_USER_ONLY
1131    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1132      .supported = instructions_supported,
1133      .get_count = instructions_get_count,
1134      .ns_per_count = instructions_ns_per,
1135    },
1136    { .number = 0x011, /* CPU_CYCLES, Cycle */
1137      .supported = event_always_supported,
1138      .get_count = cycles_get_count,
1139      .ns_per_count = cycles_ns_per,
1140    }
1141#endif
1142};
1143
1144/*
1145 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1146 * events (i.e. the statistical profiling extension), this implementation
1147 * should first be updated to something sparse instead of the current
1148 * supported_event_map[] array.
1149 */
1150#define MAX_EVENT_ID 0x11
1151#define UNSUPPORTED_EVENT UINT16_MAX
1152static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1153
1154/*
1155 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1156 * of ARM event numbers to indices in our pm_events array.
1157 *
1158 * Note: Events in the 0x40XX range are not currently supported.
1159 */
1160void pmu_init(ARMCPU *cpu)
1161{
1162    unsigned int i;
1163
1164    /*
1165     * Empty supported_event_map and cpu->pmceid[01] before adding supported
1166     * events to them
1167     */
1168    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1169        supported_event_map[i] = UNSUPPORTED_EVENT;
1170    }
1171    cpu->pmceid0 = 0;
1172    cpu->pmceid1 = 0;
1173
1174    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1175        const pm_event *cnt = &pm_events[i];
1176        assert(cnt->number <= MAX_EVENT_ID);
1177        /* We do not currently support events in the 0x40xx range */
1178        assert(cnt->number <= 0x3f);
1179
1180        if (cnt->supported(&cpu->env)) {
1181            supported_event_map[cnt->number] = i;
1182            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1183            if (cnt->number & 0x20) {
1184                cpu->pmceid1 |= event_mask;
1185            } else {
1186                cpu->pmceid0 |= event_mask;
1187            }
1188        }
1189    }
1190}
1191
1192/*
1193 * Check at runtime whether a PMU event is supported for the current machine
1194 */
1195static bool event_supported(uint16_t number)
1196{
1197    if (number > MAX_EVENT_ID) {
1198        return false;
1199    }
1200    return supported_event_map[number] != UNSUPPORTED_EVENT;
1201}
1202
1203static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1204                                   bool isread)
1205{
1206    /* Performance monitor registers user accessibility is controlled
1207     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1208     * trapping to EL2 or EL3 for other accesses.
1209     */
1210    int el = arm_current_el(env);
1211
1212    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1213        return CP_ACCESS_TRAP;
1214    }
1215    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1216        && !arm_is_secure_below_el3(env)) {
1217        return CP_ACCESS_TRAP_EL2;
1218    }
1219    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1220        return CP_ACCESS_TRAP_EL3;
1221    }
1222
1223    return CP_ACCESS_OK;
1224}
1225
1226static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1227                                           const ARMCPRegInfo *ri,
1228                                           bool isread)
1229{
1230    /* ER: event counter read trap control */
1231    if (arm_feature(env, ARM_FEATURE_V8)
1232        && arm_current_el(env) == 0
1233        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1234        && isread) {
1235        return CP_ACCESS_OK;
1236    }
1237
1238    return pmreg_access(env, ri, isread);
1239}
1240
1241static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1242                                         const ARMCPRegInfo *ri,
1243                                         bool isread)
1244{
1245    /* SW: software increment write trap control */
1246    if (arm_feature(env, ARM_FEATURE_V8)
1247        && arm_current_el(env) == 0
1248        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1249        && !isread) {
1250        return CP_ACCESS_OK;
1251    }
1252
1253    return pmreg_access(env, ri, isread);
1254}
1255
1256static CPAccessResult pmreg_access_selr(CPUARMState *env,
1257                                        const ARMCPRegInfo *ri,
1258                                        bool isread)
1259{
1260    /* ER: event counter read trap control */
1261    if (arm_feature(env, ARM_FEATURE_V8)
1262        && arm_current_el(env) == 0
1263        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1264        return CP_ACCESS_OK;
1265    }
1266
1267    return pmreg_access(env, ri, isread);
1268}
1269
1270static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1271                                         const ARMCPRegInfo *ri,
1272                                         bool isread)
1273{
1274    /* CR: cycle counter read trap control */
1275    if (arm_feature(env, ARM_FEATURE_V8)
1276        && arm_current_el(env) == 0
1277        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1278        && isread) {
1279        return CP_ACCESS_OK;
1280    }
1281
1282    return pmreg_access(env, ri, isread);
1283}
1284
1285/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1286 * the current EL, security state, and register configuration.
1287 */
1288static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1289{
1290    uint64_t filter;
1291    bool e, p, u, nsk, nsu, nsh, m;
1292    bool enabled, prohibited, filtered;
1293    bool secure = arm_is_secure(env);
1294    int el = arm_current_el(env);
1295    uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1296
1297    if (!arm_feature(env, ARM_FEATURE_PMU)) {
1298        return false;
1299    }
1300
1301    if (!arm_feature(env, ARM_FEATURE_EL2) ||
1302            (counter < hpmn || counter == 31)) {
1303        e = env->cp15.c9_pmcr & PMCRE;
1304    } else {
1305        e = env->cp15.mdcr_el2 & MDCR_HPME;
1306    }
1307    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1308
1309    if (!secure) {
1310        if (el == 2 && (counter < hpmn || counter == 31)) {
1311            prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1312        } else {
1313            prohibited = false;
1314        }
1315    } else {
1316        prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1317           (env->cp15.mdcr_el3 & MDCR_SPME);
1318    }
1319
1320    if (prohibited && counter == 31) {
1321        prohibited = env->cp15.c9_pmcr & PMCRDP;
1322    }
1323
1324    if (counter == 31) {
1325        filter = env->cp15.pmccfiltr_el0;
1326    } else {
1327        filter = env->cp15.c14_pmevtyper[counter];
1328    }
1329
1330    p   = filter & PMXEVTYPER_P;
1331    u   = filter & PMXEVTYPER_U;
1332    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1333    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1334    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1335    m   = arm_el_is_aa64(env, 1) &&
1336              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1337
1338    if (el == 0) {
1339        filtered = secure ? u : u != nsu;
1340    } else if (el == 1) {
1341        filtered = secure ? p : p != nsk;
1342    } else if (el == 2) {
1343        filtered = !nsh;
1344    } else { /* EL3 */
1345        filtered = m != p;
1346    }
1347
1348    if (counter != 31) {
1349        /*
1350         * If not checking PMCCNTR, ensure the counter is setup to an event we
1351         * support
1352         */
1353        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1354        if (!event_supported(event)) {
1355            return false;
1356        }
1357    }
1358
1359    return enabled && !prohibited && !filtered;
1360}
1361
1362static void pmu_update_irq(CPUARMState *env)
1363{
1364    ARMCPU *cpu = env_archcpu(env);
1365    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1366            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1367}
1368
1369/*
1370 * Ensure c15_ccnt is the guest-visible count so that operations such as
1371 * enabling/disabling the counter or filtering, modifying the count itself,
1372 * etc. can be done logically. This is essentially a no-op if the counter is
1373 * not enabled at the time of the call.
1374 */
1375static void pmccntr_op_start(CPUARMState *env)
1376{
1377    uint64_t cycles = cycles_get_count(env);
1378
1379    if (pmu_counter_enabled(env, 31)) {
1380        uint64_t eff_cycles = cycles;
1381        if (env->cp15.c9_pmcr & PMCRD) {
1382            /* Increment once every 64 processor clock cycles */
1383            eff_cycles /= 64;
1384        }
1385
1386        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1387
1388        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1389                                 1ull << 63 : 1ull << 31;
1390        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1391            env->cp15.c9_pmovsr |= (1 << 31);
1392            pmu_update_irq(env);
1393        }
1394
1395        env->cp15.c15_ccnt = new_pmccntr;
1396    }
1397    env->cp15.c15_ccnt_delta = cycles;
1398}
1399
1400/*
1401 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1402 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1403 * pmccntr_op_start.
1404 */
1405static void pmccntr_op_finish(CPUARMState *env)
1406{
1407    if (pmu_counter_enabled(env, 31)) {
1408#ifndef CONFIG_USER_ONLY
1409        /* Calculate when the counter will next overflow */
1410        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1411        if (!(env->cp15.c9_pmcr & PMCRLC)) {
1412            remaining_cycles = (uint32_t)remaining_cycles;
1413        }
1414        int64_t overflow_in = cycles_ns_per(remaining_cycles);
1415
1416        if (overflow_in > 0) {
1417            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1418                overflow_in;
1419            ARMCPU *cpu = env_archcpu(env);
1420            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1421        }
1422#endif
1423
1424        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1425        if (env->cp15.c9_pmcr & PMCRD) {
1426            /* Increment once every 64 processor clock cycles */
1427            prev_cycles /= 64;
1428        }
1429        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1430    }
1431}
1432
1433static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1434{
1435
1436    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1437    uint64_t count = 0;
1438    if (event_supported(event)) {
1439        uint16_t event_idx = supported_event_map[event];
1440        count = pm_events[event_idx].get_count(env);
1441    }
1442
1443    if (pmu_counter_enabled(env, counter)) {
1444        uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1445
1446        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1447            env->cp15.c9_pmovsr |= (1 << counter);
1448            pmu_update_irq(env);
1449        }
1450        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1451    }
1452    env->cp15.c14_pmevcntr_delta[counter] = count;
1453}
1454
1455static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1456{
1457    if (pmu_counter_enabled(env, counter)) {
1458#ifndef CONFIG_USER_ONLY
1459        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1460        uint16_t event_idx = supported_event_map[event];
1461        uint64_t delta = UINT32_MAX -
1462            (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1463        int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1464
1465        if (overflow_in > 0) {
1466            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1467                overflow_in;
1468            ARMCPU *cpu = env_archcpu(env);
1469            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1470        }
1471#endif
1472
1473        env->cp15.c14_pmevcntr_delta[counter] -=
1474            env->cp15.c14_pmevcntr[counter];
1475    }
1476}
1477
1478void pmu_op_start(CPUARMState *env)
1479{
1480    unsigned int i;
1481    pmccntr_op_start(env);
1482    for (i = 0; i < pmu_num_counters(env); i++) {
1483        pmevcntr_op_start(env, i);
1484    }
1485}
1486
1487void pmu_op_finish(CPUARMState *env)
1488{
1489    unsigned int i;
1490    pmccntr_op_finish(env);
1491    for (i = 0; i < pmu_num_counters(env); i++) {
1492        pmevcntr_op_finish(env, i);
1493    }
1494}
1495
1496void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1497{
1498    pmu_op_start(&cpu->env);
1499}
1500
1501void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1502{
1503    pmu_op_finish(&cpu->env);
1504}
1505
1506void arm_pmu_timer_cb(void *opaque)
1507{
1508    ARMCPU *cpu = opaque;
1509
1510    /*
1511     * Update all the counter values based on the current underlying counts,
1512     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1513     * has the effect of setting the cpu->pmu_timer to the next earliest time a
1514     * counter may expire.
1515     */
1516    pmu_op_start(&cpu->env);
1517    pmu_op_finish(&cpu->env);
1518}
1519
1520static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1521                       uint64_t value)
1522{
1523    pmu_op_start(env);
1524
1525    if (value & PMCRC) {
1526        /* The counter has been reset */
1527        env->cp15.c15_ccnt = 0;
1528    }
1529
1530    if (value & PMCRP) {
1531        unsigned int i;
1532        for (i = 0; i < pmu_num_counters(env); i++) {
1533            env->cp15.c14_pmevcntr[i] = 0;
1534        }
1535    }
1536
1537    /* only the DP, X, D and E bits are writable */
1538    env->cp15.c9_pmcr &= ~0x39;
1539    env->cp15.c9_pmcr |= (value & 0x39);
1540
1541    pmu_op_finish(env);
1542}
1543
1544static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1545                          uint64_t value)
1546{
1547    unsigned int i;
1548    for (i = 0; i < pmu_num_counters(env); i++) {
1549        /* Increment a counter's count iff: */
1550        if ((value & (1 << i)) && /* counter's bit is set */
1551                /* counter is enabled and not filtered */
1552                pmu_counter_enabled(env, i) &&
1553                /* counter is SW_INCR */
1554                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1555            pmevcntr_op_start(env, i);
1556
1557            /*
1558             * Detect if this write causes an overflow since we can't predict
1559             * PMSWINC overflows like we can for other events
1560             */
1561            uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1562
1563            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1564                env->cp15.c9_pmovsr |= (1 << i);
1565                pmu_update_irq(env);
1566            }
1567
1568            env->cp15.c14_pmevcntr[i] = new_pmswinc;
1569
1570            pmevcntr_op_finish(env, i);
1571        }
1572    }
1573}
1574
1575static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1576{
1577    uint64_t ret;
1578    pmccntr_op_start(env);
1579    ret = env->cp15.c15_ccnt;
1580    pmccntr_op_finish(env);
1581    return ret;
1582}
1583
1584static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1585                         uint64_t value)
1586{
1587    /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1588     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1589     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1590     * accessed.
1591     */
1592    env->cp15.c9_pmselr = value & 0x1f;
1593}
1594
1595static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1596                        uint64_t value)
1597{
1598    pmccntr_op_start(env);
1599    env->cp15.c15_ccnt = value;
1600    pmccntr_op_finish(env);
1601}
1602
1603static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1604                            uint64_t value)
1605{
1606    uint64_t cur_val = pmccntr_read(env, NULL);
1607
1608    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1609}
1610
1611static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1612                            uint64_t value)
1613{
1614    pmccntr_op_start(env);
1615    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1616    pmccntr_op_finish(env);
1617}
1618
1619static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1620                            uint64_t value)
1621{
1622    pmccntr_op_start(env);
1623    /* M is not accessible from AArch32 */
1624    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1625        (value & PMCCFILTR);
1626    pmccntr_op_finish(env);
1627}
1628
1629static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1630{
1631    /* M is not visible in AArch32 */
1632    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1633}
1634
1635static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1636                            uint64_t value)
1637{
1638    value &= pmu_counter_mask(env);
1639    env->cp15.c9_pmcnten |= value;
1640}
1641
1642static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643                             uint64_t value)
1644{
1645    value &= pmu_counter_mask(env);
1646    env->cp15.c9_pmcnten &= ~value;
1647}
1648
1649static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1650                         uint64_t value)
1651{
1652    value &= pmu_counter_mask(env);
1653    env->cp15.c9_pmovsr &= ~value;
1654    pmu_update_irq(env);
1655}
1656
1657static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1658                         uint64_t value)
1659{
1660    value &= pmu_counter_mask(env);
1661    env->cp15.c9_pmovsr |= value;
1662    pmu_update_irq(env);
1663}
1664
1665static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1666                             uint64_t value, const uint8_t counter)
1667{
1668    if (counter == 31) {
1669        pmccfiltr_write(env, ri, value);
1670    } else if (counter < pmu_num_counters(env)) {
1671        pmevcntr_op_start(env, counter);
1672
1673        /*
1674         * If this counter's event type is changing, store the current
1675         * underlying count for the new type in c14_pmevcntr_delta[counter] so
1676         * pmevcntr_op_finish has the correct baseline when it converts back to
1677         * a delta.
1678         */
1679        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1680            PMXEVTYPER_EVTCOUNT;
1681        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1682        if (old_event != new_event) {
1683            uint64_t count = 0;
1684            if (event_supported(new_event)) {
1685                uint16_t event_idx = supported_event_map[new_event];
1686                count = pm_events[event_idx].get_count(env);
1687            }
1688            env->cp15.c14_pmevcntr_delta[counter] = count;
1689        }
1690
1691        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1692        pmevcntr_op_finish(env, counter);
1693    }
1694    /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1695     * PMSELR value is equal to or greater than the number of implemented
1696     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1697     */
1698}
1699
1700static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1701                               const uint8_t counter)
1702{
1703    if (counter == 31) {
1704        return env->cp15.pmccfiltr_el0;
1705    } else if (counter < pmu_num_counters(env)) {
1706        return env->cp15.c14_pmevtyper[counter];
1707    } else {
1708      /*
1709       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1710       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1711       */
1712        return 0;
1713    }
1714}
1715
1716static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1717                              uint64_t value)
1718{
1719    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1720    pmevtyper_write(env, ri, value, counter);
1721}
1722
1723static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1724                               uint64_t value)
1725{
1726    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1727    env->cp15.c14_pmevtyper[counter] = value;
1728
1729    /*
1730     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1731     * pmu_op_finish calls when loading saved state for a migration. Because
1732     * we're potentially updating the type of event here, the value written to
1733     * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1734     * different counter type. Therefore, we need to set this value to the
1735     * current count for the counter type we're writing so that pmu_op_finish
1736     * has the correct count for its calculation.
1737     */
1738    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1739    if (event_supported(event)) {
1740        uint16_t event_idx = supported_event_map[event];
1741        env->cp15.c14_pmevcntr_delta[counter] =
1742            pm_events[event_idx].get_count(env);
1743    }
1744}
1745
1746static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1747{
1748    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1749    return pmevtyper_read(env, ri, counter);
1750}
1751
1752static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1753                             uint64_t value)
1754{
1755    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1756}
1757
1758static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1759{
1760    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1761}
1762
1763static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1764                             uint64_t value, uint8_t counter)
1765{
1766    if (counter < pmu_num_counters(env)) {
1767        pmevcntr_op_start(env, counter);
1768        env->cp15.c14_pmevcntr[counter] = value;
1769        pmevcntr_op_finish(env, counter);
1770    }
1771    /*
1772     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1773     * are CONSTRAINED UNPREDICTABLE.
1774     */
1775}
1776
1777static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1778                              uint8_t counter)
1779{
1780    if (counter < pmu_num_counters(env)) {
1781        uint64_t ret;
1782        pmevcntr_op_start(env, counter);
1783        ret = env->cp15.c14_pmevcntr[counter];
1784        pmevcntr_op_finish(env, counter);
1785        return ret;
1786    } else {
1787      /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1788       * are CONSTRAINED UNPREDICTABLE. */
1789        return 0;
1790    }
1791}
1792
1793static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1794                             uint64_t value)
1795{
1796    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1797    pmevcntr_write(env, ri, value, counter);
1798}
1799
1800static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1801{
1802    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1803    return pmevcntr_read(env, ri, counter);
1804}
1805
1806static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1807                             uint64_t value)
1808{
1809    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1810    assert(counter < pmu_num_counters(env));
1811    env->cp15.c14_pmevcntr[counter] = value;
1812    pmevcntr_write(env, ri, value, counter);
1813}
1814
1815static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1816{
1817    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1818    assert(counter < pmu_num_counters(env));
1819    return env->cp15.c14_pmevcntr[counter];
1820}
1821
1822static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1823                             uint64_t value)
1824{
1825    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1826}
1827
1828static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829{
1830    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1831}
1832
1833static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1834                            uint64_t value)
1835{
1836    if (arm_feature(env, ARM_FEATURE_V8)) {
1837        env->cp15.c9_pmuserenr = value & 0xf;
1838    } else {
1839        env->cp15.c9_pmuserenr = value & 1;
1840    }
1841}
1842
1843static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1844                             uint64_t value)
1845{
1846    /* We have no event counters so only the C bit can be changed */
1847    value &= pmu_counter_mask(env);
1848    env->cp15.c9_pminten |= value;
1849    pmu_update_irq(env);
1850}
1851
1852static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1853                             uint64_t value)
1854{
1855    value &= pmu_counter_mask(env);
1856    env->cp15.c9_pminten &= ~value;
1857    pmu_update_irq(env);
1858}
1859
1860static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1861                       uint64_t value)
1862{
1863    /* Note that even though the AArch64 view of this register has bits
1864     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1865     * architectural requirements for bits which are RES0 only in some
1866     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1867     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1868     */
1869    raw_write(env, ri, value & ~0x1FULL);
1870}
1871
1872static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1873{
1874    /* Begin with base v8.0 state.  */
1875    uint32_t valid_mask = 0x3fff;
1876    ARMCPU *cpu = env_archcpu(env);
1877
1878    if (arm_el_is_aa64(env, 3)) {
1879        value |= SCR_FW | SCR_AW;   /* these two bits are RES1.  */
1880        valid_mask &= ~SCR_NET;
1881    } else {
1882        valid_mask &= ~(SCR_RW | SCR_ST);
1883    }
1884
1885    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1886        valid_mask &= ~SCR_HCE;
1887
1888        /* On ARMv7, SMD (or SCD as it is called in v7) is only
1889         * supported if EL2 exists. The bit is UNK/SBZP when
1890         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1891         * when EL2 is unavailable.
1892         * On ARMv8, this bit is always available.
1893         */
1894        if (arm_feature(env, ARM_FEATURE_V7) &&
1895            !arm_feature(env, ARM_FEATURE_V8)) {
1896            valid_mask &= ~SCR_SMD;
1897        }
1898    }
1899    if (cpu_isar_feature(aa64_lor, cpu)) {
1900        valid_mask |= SCR_TLOR;
1901    }
1902    if (cpu_isar_feature(aa64_pauth, cpu)) {
1903        valid_mask |= SCR_API | SCR_APK;
1904    }
1905
1906    /* Clear all-context RES0 bits.  */
1907    value &= valid_mask;
1908    raw_write(env, ri, value);
1909}
1910
1911static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1912{
1913    ARMCPU *cpu = env_archcpu(env);
1914
1915    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1916     * bank
1917     */
1918    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1919                                        ri->secure & ARM_CP_SECSTATE_S);
1920
1921    return cpu->ccsidr[index];
1922}
1923
1924static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925                         uint64_t value)
1926{
1927    raw_write(env, ri, value & 0xf);
1928}
1929
1930static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1931{
1932    CPUState *cs = env_cpu(env);
1933    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1934    uint64_t ret = 0;
1935
1936    if (hcr_el2 & HCR_IMO) {
1937        if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1938            ret |= CPSR_I;
1939        }
1940    } else {
1941        if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1942            ret |= CPSR_I;
1943        }
1944    }
1945
1946    if (hcr_el2 & HCR_FMO) {
1947        if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1948            ret |= CPSR_F;
1949        }
1950    } else {
1951        if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1952            ret |= CPSR_F;
1953        }
1954    }
1955
1956    /* External aborts are not possible in QEMU so A bit is always clear */
1957    return ret;
1958}
1959
1960static const ARMCPRegInfo v7_cp_reginfo[] = {
1961    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1962    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1963      .access = PL1_W, .type = ARM_CP_NOP },
1964    /* Performance monitors are implementation defined in v7,
1965     * but with an ARM recommended set of registers, which we
1966     * follow.
1967     *
1968     * Performance registers fall into three categories:
1969     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1970     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1971     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1972     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1973     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1974     */
1975    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1976      .access = PL0_RW, .type = ARM_CP_ALIAS,
1977      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1978      .writefn = pmcntenset_write,
1979      .accessfn = pmreg_access,
1980      .raw_writefn = raw_write },
1981    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1982      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1983      .access = PL0_RW, .accessfn = pmreg_access,
1984      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1985      .writefn = pmcntenset_write, .raw_writefn = raw_write },
1986    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1987      .access = PL0_RW,
1988      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1989      .accessfn = pmreg_access,
1990      .writefn = pmcntenclr_write,
1991      .type = ARM_CP_ALIAS },
1992    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1993      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1994      .access = PL0_RW, .accessfn = pmreg_access,
1995      .type = ARM_CP_ALIAS,
1996      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1997      .writefn = pmcntenclr_write },
1998    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1999      .access = PL0_RW, .type = ARM_CP_IO,
2000      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2001      .accessfn = pmreg_access,
2002      .writefn = pmovsr_write,
2003      .raw_writefn = raw_write },
2004    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2005      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2006      .access = PL0_RW, .accessfn = pmreg_access,
2007      .type = ARM_CP_ALIAS | ARM_CP_IO,
2008      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2009      .writefn = pmovsr_write,
2010      .raw_writefn = raw_write },
2011    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2012      .access = PL0_W, .accessfn = pmreg_access_swinc,
2013      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2014      .writefn = pmswinc_write },
2015    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2016      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2017      .access = PL0_W, .accessfn = pmreg_access_swinc,
2018      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2019      .writefn = pmswinc_write },
2020    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2021      .access = PL0_RW, .type = ARM_CP_ALIAS,
2022      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2023      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2024      .raw_writefn = raw_write},
2025    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2026      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2027      .access = PL0_RW, .accessfn = pmreg_access_selr,
2028      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2029      .writefn = pmselr_write, .raw_writefn = raw_write, },
2030    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2031      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2032      .readfn = pmccntr_read, .writefn = pmccntr_write32,
2033      .accessfn = pmreg_access_ccntr },
2034    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2035      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2036      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2037      .type = ARM_CP_IO,
2038      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2039      .readfn = pmccntr_read, .writefn = pmccntr_write,
2040      .raw_readfn = raw_read, .raw_writefn = raw_write, },
2041    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2042      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2043      .access = PL0_RW, .accessfn = pmreg_access,
2044      .type = ARM_CP_ALIAS | ARM_CP_IO,
2045      .resetvalue = 0, },
2046    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2047      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2048      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2049      .access = PL0_RW, .accessfn = pmreg_access,
2050      .type = ARM_CP_IO,
2051      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2052      .resetvalue = 0, },
2053    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2054      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2055      .accessfn = pmreg_access,
2056      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2057    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2058      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2059      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2060      .accessfn = pmreg_access,
2061      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2062    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2063      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2064      .accessfn = pmreg_access_xevcntr,
2065      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2066    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2067      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2068      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2069      .accessfn = pmreg_access_xevcntr,
2070      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2071    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2072      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2073      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2074      .resetvalue = 0,
2075      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2076    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2077      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2078      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2079      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2080      .resetvalue = 0,
2081      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2082    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2083      .access = PL1_RW, .accessfn = access_tpm,
2084      .type = ARM_CP_ALIAS | ARM_CP_IO,
2085      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2086      .resetvalue = 0,
2087      .writefn = pmintenset_write, .raw_writefn = raw_write },
2088    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2089      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2090      .access = PL1_RW, .accessfn = access_tpm,
2091      .type = ARM_CP_IO,
2092      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2093      .writefn = pmintenset_write, .raw_writefn = raw_write,
2094      .resetvalue = 0x0 },
2095    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2096      .access = PL1_RW, .accessfn = access_tpm,
2097      .type = ARM_CP_ALIAS | ARM_CP_IO,
2098      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2099      .writefn = pmintenclr_write, },
2100    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2101      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2102      .access = PL1_RW, .accessfn = access_tpm,
2103      .type = ARM_CP_ALIAS | ARM_CP_IO,
2104      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2105      .writefn = pmintenclr_write },
2106    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2107      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2108      .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2109    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2110      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2111      .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
2112      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2113                             offsetof(CPUARMState, cp15.csselr_ns) } },
2114    /* Auxiliary ID register: this actually has an IMPDEF value but for now
2115     * just RAZ for all cores:
2116     */
2117    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2118      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2119      .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2120    /* Auxiliary fault status registers: these also are IMPDEF, and we
2121     * choose to RAZ/WI for all cores.
2122     */
2123    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2124      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2125      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2126    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2127      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2128      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2129    /* MAIR can just read-as-written because we don't implement caches
2130     * and so don't need to care about memory attributes.
2131     */
2132    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2133      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2134      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2135      .resetvalue = 0 },
2136    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2137      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2138      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2139      .resetvalue = 0 },
2140    /* For non-long-descriptor page tables these are PRRR and NMRR;
2141     * regardless they still act as reads-as-written for QEMU.
2142     */
2143     /* MAIR0/1 are defined separately from their 64-bit counterpart which
2144      * allows them to assign the correct fieldoffset based on the endianness
2145      * handled in the field definitions.
2146      */
2147    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2148      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
2149      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2150                             offsetof(CPUARMState, cp15.mair0_ns) },
2151      .resetfn = arm_cp_reset_ignore },
2152    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2153      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
2154      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2155                             offsetof(CPUARMState, cp15.mair1_ns) },
2156      .resetfn = arm_cp_reset_ignore },
2157    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2158      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2159      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2160    /* 32 bit ITLB invalidates */
2161    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2162      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2163    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2164      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2165    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2166      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2167    /* 32 bit DTLB invalidates */
2168    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2169      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2170    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2171      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2172    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2173      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2174    /* 32 bit TLB invalidates */
2175    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2176      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2177    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2178      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2179    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2180      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2181    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2182      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2183    REGINFO_SENTINEL
2184};
2185
2186static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2187    /* 32 bit TLB invalidates, Inner Shareable */
2188    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2189      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
2190    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2191      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2192    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2193      .type = ARM_CP_NO_RAW, .access = PL1_W,
2194      .writefn = tlbiasid_is_write },
2195    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2196      .type = ARM_CP_NO_RAW, .access = PL1_W,
2197      .writefn = tlbimvaa_is_write },
2198    REGINFO_SENTINEL
2199};
2200
2201static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2202    /* PMOVSSET is not implemented in v7 before v7ve */
2203    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2204      .access = PL0_RW, .accessfn = pmreg_access,
2205      .type = ARM_CP_ALIAS | ARM_CP_IO,
2206      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2207      .writefn = pmovsset_write,
2208      .raw_writefn = raw_write },
2209    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2210      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2211      .access = PL0_RW, .accessfn = pmreg_access,
2212      .type = ARM_CP_ALIAS | ARM_CP_IO,
2213      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2214      .writefn = pmovsset_write,
2215      .raw_writefn = raw_write },
2216    REGINFO_SENTINEL
2217};
2218
2219static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2220                        uint64_t value)
2221{
2222    value &= 1;
2223    env->teecr = value;
2224}
2225
2226static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2227                                    bool isread)
2228{
2229    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2230        return CP_ACCESS_TRAP;
2231    }
2232    return CP_ACCESS_OK;
2233}
2234
2235static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2236    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2237      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2238      .resetvalue = 0,
2239      .writefn = teecr_write },
2240    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2241      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2242      .accessfn = teehbr_access, .resetvalue = 0 },
2243    REGINFO_SENTINEL
2244};
2245
2246static const ARMCPRegInfo v6k_cp_reginfo[] = {
2247    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2248      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2249      .access = PL0_RW,
2250      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2251    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2252      .access = PL0_RW,
2253      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2254                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2255      .resetfn = arm_cp_reset_ignore },
2256    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2257      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2258      .access = PL0_R|PL1_W,
2259      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2260      .resetvalue = 0},
2261    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2262      .access = PL0_R|PL1_W,
2263      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2264                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2265      .resetfn = arm_cp_reset_ignore },
2266    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2267      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2268      .access = PL1_RW,
2269      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2270    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2271      .access = PL1_RW,
2272      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2273                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2274      .resetvalue = 0 },
2275    REGINFO_SENTINEL
2276};
2277
2278#ifndef CONFIG_USER_ONLY
2279
2280static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2281                                       bool isread)
2282{
2283    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2284     * Writable only at the highest implemented exception level.
2285     */
2286    int el = arm_current_el(env);
2287
2288    switch (el) {
2289    case 0:
2290        if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
2291            return CP_ACCESS_TRAP;
2292        }
2293        break;
2294    case 1:
2295        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2296            arm_is_secure_below_el3(env)) {
2297            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2298            return CP_ACCESS_TRAP_UNCATEGORIZED;
2299        }
2300        break;
2301    case 2:
2302    case 3:
2303        break;
2304    }
2305
2306    if (!isread && el < arm_highest_el(env)) {
2307        return CP_ACCESS_TRAP_UNCATEGORIZED;
2308    }
2309
2310    return CP_ACCESS_OK;
2311}
2312
2313static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2314                                        bool isread)
2315{
2316    unsigned int cur_el = arm_current_el(env);
2317    bool secure = arm_is_secure(env);
2318
2319    /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2320    if (cur_el == 0 &&
2321        !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2322        return CP_ACCESS_TRAP;
2323    }
2324
2325    if (arm_feature(env, ARM_FEATURE_EL2) &&
2326        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2327        !extract32(env->cp15.cnthctl_el2, 0, 1)) {
2328        return CP_ACCESS_TRAP_EL2;
2329    }
2330    return CP_ACCESS_OK;
2331}
2332
2333static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2334                                      bool isread)
2335{
2336    unsigned int cur_el = arm_current_el(env);
2337    bool secure = arm_is_secure(env);
2338
2339    /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2340     * EL0[PV]TEN is zero.
2341     */
2342    if (cur_el == 0 &&
2343        !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2344        return CP_ACCESS_TRAP;
2345    }
2346
2347    if (arm_feature(env, ARM_FEATURE_EL2) &&
2348        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2349        !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2350        return CP_ACCESS_TRAP_EL2;
2351    }
2352    return CP_ACCESS_OK;
2353}
2354
2355static CPAccessResult gt_pct_access(CPUARMState *env,
2356                                    const ARMCPRegInfo *ri,
2357                                    bool isread)
2358{
2359    return gt_counter_access(env, GTIMER_PHYS, isread);
2360}
2361
2362static CPAccessResult gt_vct_access(CPUARMState *env,
2363                                    const ARMCPRegInfo *ri,
2364                                    bool isread)
2365{
2366    return gt_counter_access(env, GTIMER_VIRT, isread);
2367}
2368
2369static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2370                                       bool isread)
2371{
2372    return gt_timer_access(env, GTIMER_PHYS, isread);
2373}
2374
2375static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2376                                       bool isread)
2377{
2378    return gt_timer_access(env, GTIMER_VIRT, isread);
2379}
2380
2381static CPAccessResult gt_stimer_access(CPUARMState *env,
2382                                       const ARMCPRegInfo *ri,
2383                                       bool isread)
2384{
2385    /* The AArch64 register view of the secure physical timer is
2386     * always accessible from EL3, and configurably accessible from
2387     * Secure EL1.
2388     */
2389    switch (arm_current_el(env)) {
2390    case 1:
2391        if (!arm_is_secure(env)) {
2392            return CP_ACCESS_TRAP;
2393        }
2394        if (!(env->cp15.scr_el3 & SCR_ST)) {
2395            return CP_ACCESS_TRAP_EL3;
2396        }
2397        return CP_ACCESS_OK;
2398    case 0:
2399    case 2:
2400        return CP_ACCESS_TRAP;
2401    case 3:
2402        return CP_ACCESS_OK;
2403    default:
2404        g_assert_not_reached();
2405    }
2406}
2407
2408static uint64_t gt_get_countervalue(CPUARMState *env)
2409{
2410    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
2411}
2412
2413static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2414{
2415    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2416
2417    if (gt->ctl & 1) {
2418        /* Timer enabled: calculate and set current ISTATUS, irq, and
2419         * reset timer to when ISTATUS next has to change
2420         */
2421        uint64_t offset = timeridx == GTIMER_VIRT ?
2422                                      cpu->env.cp15.cntvoff_el2 : 0;
2423        uint64_t count = gt_get_countervalue(&cpu->env);
2424        /* Note that this must be unsigned 64 bit arithmetic: */
2425        int istatus = count - offset >= gt->cval;
2426        uint64_t nexttick;
2427        int irqstate;
2428
2429        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2430
2431        irqstate = (istatus && !(gt->ctl & 2));
2432        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2433
2434        if (istatus) {
2435            /* Next transition is when count rolls back over to zero */
2436            nexttick = UINT64_MAX;
2437        } else {
2438            /* Next transition is when we hit cval */
2439            nexttick = gt->cval + offset;
2440        }
2441        /* Note that the desired next expiry time might be beyond the
2442         * signed-64-bit range of a QEMUTimer -- in this case we just
2443         * set the timer for as far in the future as possible. When the
2444         * timer expires we will reset the timer for any remaining period.
2445         */
2446        if (nexttick > INT64_MAX / GTIMER_SCALE) {
2447            nexttick = INT64_MAX / GTIMER_SCALE;
2448        }
2449        timer_mod(cpu->gt_timer[timeridx], nexttick);
2450        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2451    } else {
2452        /* Timer disabled: ISTATUS and timer output always clear */
2453        gt->ctl &= ~4;
2454        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2455        timer_del(cpu->gt_timer[timeridx]);
2456        trace_arm_gt_recalc_disabled(timeridx);
2457    }
2458}
2459
2460static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2461                           int timeridx)
2462{
2463    ARMCPU *cpu = env_archcpu(env);
2464
2465    timer_del(cpu->gt_timer[timeridx]);
2466}
2467
2468static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2469{
2470    return gt_get_countervalue(env);
2471}
2472
2473static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2474{
2475    return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
2476}
2477
2478static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2479                          int timeridx,
2480                          uint64_t value)
2481{
2482    trace_arm_gt_cval_write(timeridx, value);
2483    env->cp15.c14_timer[timeridx].cval = value;
2484    gt_recalc_timer(env_archcpu(env), timeridx);
2485}
2486
2487static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2488                             int timeridx)
2489{
2490    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2491
2492    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2493                      (gt_get_countervalue(env) - offset));
2494}
2495
2496static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2497                          int timeridx,
2498                          uint64_t value)
2499{
2500    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2501
2502    trace_arm_gt_tval_write(timeridx, value);
2503    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2504                                         sextract64(value, 0, 32);
2505    gt_recalc_timer(env_archcpu(env), timeridx);
2506}
2507
2508static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2509                         int timeridx,
2510                         uint64_t value)
2511{
2512    ARMCPU *cpu = env_archcpu(env);
2513    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2514
2515    trace_arm_gt_ctl_write(timeridx, value);
2516    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2517    if ((oldval ^ value) & 1) {
2518        /* Enable toggled */
2519        gt_recalc_timer(cpu, timeridx);
2520    } else if ((oldval ^ value) & 2) {
2521        /* IMASK toggled: don't need to recalculate,
2522         * just set the interrupt line based on ISTATUS
2523         */
2524        int irqstate = (oldval & 4) && !(value & 2);
2525
2526        trace_arm_gt_imask_toggle(timeridx, irqstate);
2527        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2528    }
2529}
2530
2531static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2532{
2533    gt_timer_reset(env, ri, GTIMER_PHYS);
2534}
2535
2536static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2537                               uint64_t value)
2538{
2539    gt_cval_write(env, ri, GTIMER_PHYS, value);
2540}
2541
2542static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2543{
2544    return gt_tval_read(env, ri, GTIMER_PHYS);
2545}
2546
2547static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2548                               uint64_t value)
2549{
2550    gt_tval_write(env, ri, GTIMER_PHYS, value);
2551}
2552
2553static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2554                              uint64_t value)
2555{
2556    gt_ctl_write(env, ri, GTIMER_PHYS, value);
2557}
2558
2559static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2560{
2561    gt_timer_reset(env, ri, GTIMER_VIRT);
2562}
2563
2564static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2565                               uint64_t value)
2566{
2567    gt_cval_write(env, ri, GTIMER_VIRT, value);
2568}
2569
2570static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2571{
2572    return gt_tval_read(env, ri, GTIMER_VIRT);
2573}
2574
2575static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2576                               uint64_t value)
2577{
2578    gt_tval_write(env, ri, GTIMER_VIRT, value);
2579}
2580
2581static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2582                              uint64_t value)
2583{
2584    gt_ctl_write(env, ri, GTIMER_VIRT, value);
2585}
2586
2587static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2588                              uint64_t value)
2589{
2590    ARMCPU *cpu = env_archcpu(env);
2591
2592    trace_arm_gt_cntvoff_write(value);
2593    raw_write(env, ri, value);
2594    gt_recalc_timer(cpu, GTIMER_VIRT);
2595}
2596
2597static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2598{
2599    gt_timer_reset(env, ri, GTIMER_HYP);
2600}
2601
2602static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2603                              uint64_t value)
2604{
2605    gt_cval_write(env, ri, GTIMER_HYP, value);
2606}
2607
2608static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2609{
2610    return gt_tval_read(env, ri, GTIMER_HYP);
2611}
2612
2613static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2614                              uint64_t value)
2615{
2616    gt_tval_write(env, ri, GTIMER_HYP, value);
2617}
2618
2619static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2620                              uint64_t value)
2621{
2622    gt_ctl_write(env, ri, GTIMER_HYP, value);
2623}
2624
2625static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2626{
2627    gt_timer_reset(env, ri, GTIMER_SEC);
2628}
2629
2630static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2631                              uint64_t value)
2632{
2633    gt_cval_write(env, ri, GTIMER_SEC, value);
2634}
2635
2636static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2637{
2638    return gt_tval_read(env, ri, GTIMER_SEC);
2639}
2640
2641static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2642                              uint64_t value)
2643{
2644    gt_tval_write(env, ri, GTIMER_SEC, value);
2645}
2646
2647static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2648                              uint64_t value)
2649{
2650    gt_ctl_write(env, ri, GTIMER_SEC, value);
2651}
2652
2653void arm_gt_ptimer_cb(void *opaque)
2654{
2655    ARMCPU *cpu = opaque;
2656
2657    gt_recalc_timer(cpu, GTIMER_PHYS);
2658}
2659
2660void arm_gt_vtimer_cb(void *opaque)
2661{
2662    ARMCPU *cpu = opaque;
2663
2664    gt_recalc_timer(cpu, GTIMER_VIRT);
2665}
2666
2667void arm_gt_htimer_cb(void *opaque)
2668{
2669    ARMCPU *cpu = opaque;
2670
2671    gt_recalc_timer(cpu, GTIMER_HYP);
2672}
2673
2674void arm_gt_stimer_cb(void *opaque)
2675{
2676    ARMCPU *cpu = opaque;
2677
2678    gt_recalc_timer(cpu, GTIMER_SEC);
2679}
2680
2681static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2682    /* Note that CNTFRQ is purely reads-as-written for the benefit
2683     * of software; writing it doesn't actually change the timer frequency.
2684     * Our reset value matches the fixed frequency we implement the timer at.
2685     */
2686    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2687      .type = ARM_CP_ALIAS,
2688      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2689      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2690    },
2691    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2692      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2693      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2694      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2695      .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2696    },
2697    /* overall control: mostly access permissions */
2698    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2699      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2700      .access = PL1_RW,
2701      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2702      .resetvalue = 0,
2703    },
2704    /* per-timer control */
2705    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2706      .secure = ARM_CP_SECSTATE_NS,
2707      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2708      .accessfn = gt_ptimer_access,
2709      .fieldoffset = offsetoflow32(CPUARMState,
2710                                   cp15.c14_timer[GTIMER_PHYS].ctl),
2711      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2712    },
2713    { .name = "CNTP_CTL_S",
2714      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2715      .secure = ARM_CP_SECSTATE_S,
2716      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2717      .accessfn = gt_ptimer_access,
2718      .fieldoffset = offsetoflow32(CPUARMState,
2719                                   cp15.c14_timer[GTIMER_SEC].ctl),
2720      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2721    },
2722    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2723      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2724      .type = ARM_CP_IO, .access = PL0_RW,
2725      .accessfn = gt_ptimer_access,
2726      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2727      .resetvalue = 0,
2728      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2729    },
2730    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2731      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2732      .accessfn = gt_vtimer_access,
2733      .fieldoffset = offsetoflow32(CPUARMState,
2734                                   cp15.c14_timer[GTIMER_VIRT].ctl),
2735      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2736    },
2737    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2738      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2739      .type = ARM_CP_IO, .access = PL0_RW,
2740      .accessfn = gt_vtimer_access,
2741      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2742      .resetvalue = 0,
2743      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2744    },
2745    /* TimerValue views: a 32 bit downcounting view of the underlying state */
2746    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2747      .secure = ARM_CP_SECSTATE_NS,
2748      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2749      .accessfn = gt_ptimer_access,
2750      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2751    },
2752    { .name = "CNTP_TVAL_S",
2753      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2754      .secure = ARM_CP_SECSTATE_S,
2755      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2756      .accessfn = gt_ptimer_access,
2757      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2758    },
2759    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2760      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2761      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2762      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2763      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2764    },
2765    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2766      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2767      .accessfn = gt_vtimer_access,
2768      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2769    },
2770    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2771      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2772      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2773      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2774      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2775    },
2776    /* The counter itself */
2777    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2778      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2779      .accessfn = gt_pct_access,
2780      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2781    },
2782    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2783      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2784      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2785      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2786    },
2787    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2788      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2789      .accessfn = gt_vct_access,
2790      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2791    },
2792    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2793      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2794      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2795      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2796    },
2797    /* Comparison value, indicating when the timer goes off */
2798    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2799      .secure = ARM_CP_SECSTATE_NS,
2800      .access = PL0_RW,
2801      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2802      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2803      .accessfn = gt_ptimer_access,
2804      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2805    },
2806    { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2807      .secure = ARM_CP_SECSTATE_S,
2808      .access = PL0_RW,
2809      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2810      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2811      .accessfn = gt_ptimer_access,
2812      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2813    },
2814    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2815      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2816      .access = PL0_RW,
2817      .type = ARM_CP_IO,
2818      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2819      .resetvalue = 0, .accessfn = gt_ptimer_access,
2820      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2821    },
2822    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2823      .access = PL0_RW,
2824      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2825      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2826      .accessfn = gt_vtimer_access,
2827      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2828    },
2829    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2830      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2831      .access = PL0_RW,
2832      .type = ARM_CP_IO,
2833      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2834      .resetvalue = 0, .accessfn = gt_vtimer_access,
2835      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2836    },
2837    /* Secure timer -- this is actually restricted to only EL3
2838     * and configurably Secure-EL1 via the accessfn.
2839     */
2840    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2841      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2842      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2843      .accessfn = gt_stimer_access,
2844      .readfn = gt_sec_tval_read,
2845      .writefn = gt_sec_tval_write,
2846      .resetfn = gt_sec_timer_reset,
2847    },
2848    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2849      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2850      .type = ARM_CP_IO, .access = PL1_RW,
2851      .accessfn = gt_stimer_access,
2852      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2853      .resetvalue = 0,
2854      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2855    },
2856    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2857      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2858      .type = ARM_CP_IO, .access = PL1_RW,
2859      .accessfn = gt_stimer_access,
2860      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2861      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2862    },
2863    REGINFO_SENTINEL
2864};
2865
2866#else
2867
2868/* In user-mode most of the generic timer registers are inaccessible
2869 * however modern kernels (4.12+) allow access to cntvct_el0
2870 */
2871
2872static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2873{
2874    /* Currently we have no support for QEMUTimer in linux-user so we
2875     * can't call gt_get_countervalue(env), instead we directly
2876     * call the lower level functions.
2877     */
2878    return cpu_get_clock() / GTIMER_SCALE;
2879}
2880
2881static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2882    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2883      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2884      .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2885      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2886      .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2887    },
2888    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2889      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2890      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2891      .readfn = gt_virt_cnt_read,
2892    },
2893    REGINFO_SENTINEL
2894};
2895
2896#endif
2897
2898static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2899{
2900    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2901        raw_write(env, ri, value);
2902    } else if (arm_feature(env, ARM_FEATURE_V7)) {
2903        raw_write(env, ri, value & 0xfffff6ff);
2904    } else {
2905        raw_write(env, ri, value & 0xfffff1ff);
2906    }
2907}
2908
2909#ifndef CONFIG_USER_ONLY
2910/* get_phys_addr() isn't present for user-mode-only targets */
2911
2912static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2913                                 bool isread)
2914{
2915    if (ri->opc2 & 4) {
2916        /* The ATS12NSO* operations must trap to EL3 if executed in
2917         * Secure EL1 (which can only happen if EL3 is AArch64).
2918         * They are simply UNDEF if executed from NS EL1.
2919         * They function normally from EL2 or EL3.
2920         */
2921        if (arm_current_el(env) == 1) {
2922            if (arm_is_secure_below_el3(env)) {
2923                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2924            }
2925            return CP_ACCESS_TRAP_UNCATEGORIZED;
2926        }
2927    }
2928    return CP_ACCESS_OK;
2929}
2930
2931static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2932                             MMUAccessType access_type, ARMMMUIdx mmu_idx)
2933{
2934    hwaddr phys_addr;
2935    target_ulong page_size;
2936    int prot;
2937    bool ret;
2938    uint64_t par64;
2939    bool format64 = false;
2940    MemTxAttrs attrs = {};
2941    ARMMMUFaultInfo fi = {};
2942    ARMCacheAttrs cacheattrs = {};
2943
2944    ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2945                        &prot, &page_size, &fi, &cacheattrs);
2946
2947    if (is_a64(env)) {
2948        format64 = true;
2949    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2950        /*
2951         * ATS1Cxx:
2952         * * TTBCR.EAE determines whether the result is returned using the
2953         *   32-bit or the 64-bit PAR format
2954         * * Instructions executed in Hyp mode always use the 64bit format
2955         *
2956         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2957         * * The Non-secure TTBCR.EAE bit is set to 1
2958         * * The implementation includes EL2, and the value of HCR.VM is 1
2959         *
2960         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2961         *
2962         * ATS1Hx always uses the 64bit format.
2963         */
2964        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2965
2966        if (arm_feature(env, ARM_FEATURE_EL2)) {
2967            if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2968                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2969            } else {
2970                format64 |= arm_current_el(env) == 2;
2971            }
2972        }
2973    }
2974
2975    if (format64) {
2976        /* Create a 64-bit PAR */
2977        par64 = (1 << 11); /* LPAE bit always set */
2978        if (!ret) {
2979            par64 |= phys_addr & ~0xfffULL;
2980            if (!attrs.secure) {
2981                par64 |= (1 << 9); /* NS */
2982            }
2983            par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2984            par64 |= cacheattrs.shareability << 7; /* SH */
2985        } else {
2986            uint32_t fsr = arm_fi_to_lfsc(&fi);
2987
2988            par64 |= 1; /* F */
2989            par64 |= (fsr & 0x3f) << 1; /* FS */
2990            if (fi.stage2) {
2991                par64 |= (1 << 9); /* S */
2992            }
2993            if (fi.s1ptw) {
2994                par64 |= (1 << 8); /* PTW */
2995            }
2996        }
2997    } else {
2998        /* fsr is a DFSR/IFSR value for the short descriptor
2999         * translation table format (with WnR always clear).
3000         * Convert it to a 32-bit PAR.
3001         */
3002        if (!ret) {
3003            /* We do not set any attribute bits in the PAR */
3004            if (page_size == (1 << 24)
3005                && arm_feature(env, ARM_FEATURE_V7)) {
3006                par64 = (phys_addr & 0xff000000) | (1 << 1);
3007            } else {
3008                par64 = phys_addr & 0xfffff000;
3009            }
3010            if (!attrs.secure) {
3011                par64 |= (1 << 9); /* NS */
3012            }
3013        } else {
3014            uint32_t fsr = arm_fi_to_sfsc(&fi);
3015
3016            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3017                    ((fsr & 0xf) << 1) | 1;
3018        }
3019    }
3020    return par64;
3021}
3022
3023static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3024{
3025    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3026    uint64_t par64;
3027    ARMMMUIdx mmu_idx;
3028    int el = arm_current_el(env);
3029    bool secure = arm_is_secure_below_el3(env);
3030
3031    switch (ri->opc2 & 6) {
3032    case 0:
3033        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3034        switch (el) {
3035        case 3:
3036            mmu_idx = ARMMMUIdx_S1E3;
3037            break;
3038        case 2:
3039            mmu_idx = ARMMMUIdx_S1NSE1;
3040            break;
3041        case 1:
3042            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3043            break;
3044        default:
3045            g_assert_not_reached();
3046        }
3047        break;
3048    case 2:
3049        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3050        switch (el) {
3051        case 3:
3052            mmu_idx = ARMMMUIdx_S1SE0;
3053            break;
3054        case 2:
3055            mmu_idx = ARMMMUIdx_S1NSE0;
3056            break;
3057        case 1:
3058            mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3059            break;
3060        default:
3061            g_assert_not_reached();
3062        }
3063        break;
3064    case 4:
3065        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3066        mmu_idx = ARMMMUIdx_S12NSE1;
3067        break;
3068    case 6:
3069        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3070        mmu_idx = ARMMMUIdx_S12NSE0;
3071        break;
3072    default:
3073        g_assert_not_reached();
3074    }
3075
3076    par64 = do_ats_write(env, value, access_type, mmu_idx);
3077
3078    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3079}
3080
3081static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082                        uint64_t value)
3083{
3084    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3085    uint64_t par64;
3086
3087    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
3088
3089    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3090}
3091
3092static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3093                                     bool isread)
3094{
3095    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3096        return CP_ACCESS_TRAP;
3097    }
3098    return CP_ACCESS_OK;
3099}
3100
3101static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3102                        uint64_t value)
3103{
3104    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3105    ARMMMUIdx mmu_idx;
3106    int secure = arm_is_secure_below_el3(env);
3107
3108    switch (ri->opc2 & 6) {
3109    case 0:
3110        switch (ri->opc1) {
3111        case 0: /* AT S1E1R, AT S1E1W */
3112            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3113            break;
3114        case 4: /* AT S1E2R, AT S1E2W */
3115            mmu_idx = ARMMMUIdx_S1E2;
3116            break;
3117        case 6: /* AT S1E3R, AT S1E3W */
3118            mmu_idx = ARMMMUIdx_S1E3;
3119            break;
3120        default:
3121            g_assert_not_reached();
3122        }
3123        break;
3124    case 2: /* AT S1E0R, AT S1E0W */
3125        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3126        break;
3127    case 4: /* AT S12E1R, AT S12E1W */
3128        mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
3129        break;
3130    case 6: /* AT S12E0R, AT S12E0W */
3131        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
3132        break;
3133    default:
3134        g_assert_not_reached();
3135    }
3136
3137    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3138}
3139#endif
3140
3141static const ARMCPRegInfo vapa_cp_reginfo[] = {
3142    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3143      .access = PL1_RW, .resetvalue = 0,
3144      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3145                             offsetoflow32(CPUARMState, cp15.par_ns) },
3146      .writefn = par_write },
3147#ifndef CONFIG_USER_ONLY
3148    /* This underdecoding is safe because the reginfo is NO_RAW. */
3149    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3150      .access = PL1_W, .accessfn = ats_access,
3151      .writefn = ats_write, .type = ARM_CP_NO_RAW },
3152#endif
3153    REGINFO_SENTINEL
3154};
3155
3156/* Return basic MPU access permission bits.  */
3157static uint32_t simple_mpu_ap_bits(uint32_t val)
3158{
3159    uint32_t ret;
3160    uint32_t mask;
3161    int i;
3162    ret = 0;
3163    mask = 3;
3164    for (i = 0; i < 16; i += 2) {
3165        ret |= (val >> i) & mask;
3166        mask <<= 2;
3167    }
3168    return ret;
3169}
3170
3171/* Pad basic MPU access permission bits to extended format.  */
3172static uint32_t extended_mpu_ap_bits(uint32_t val)
3173{
3174    uint32_t ret;
3175    uint32_t mask;
3176    int i;
3177    ret = 0;
3178    mask = 3;
3179    for (i = 0; i < 16; i += 2) {
3180        ret |= (val & mask) << i;
3181        mask <<= 2;
3182    }
3183    return ret;
3184}
3185
3186static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3187                                 uint64_t value)
3188{
3189    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3190}
3191
3192static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3193{
3194    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3195}
3196
3197static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3198                                 uint64_t value)
3199{
3200    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3201}
3202
3203static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3204{
3205    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3206}
3207
3208static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3209{
3210    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3211
3212    if (!u32p) {
3213        return 0;
3214    }
3215
3216    u32p += env->pmsav7.rnr[M_REG_NS];
3217    return *u32p;
3218}
3219
3220static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3221                         uint64_t value)
3222{
3223    ARMCPU *cpu = env_archcpu(env);
3224    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3225
3226    if (!u32p) {
3227        return;
3228    }
3229
3230    u32p += env->pmsav7.rnr[M_REG_NS];
3231    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3232    *u32p = value;
3233}
3234
3235static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3236                              uint64_t value)
3237{
3238    ARMCPU *cpu = env_archcpu(env);
3239    uint32_t nrgs = cpu->pmsav7_dregion;
3240
3241    if (value >= nrgs) {
3242        qemu_log_mask(LOG_GUEST_ERROR,
3243                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3244                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3245        return;
3246    }
3247
3248    raw_write(env, ri, value);
3249}
3250
3251static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3252    /* Reset for all these registers is handled in arm_cpu_reset(),
3253     * because the PMSAv7 is also used by M-profile CPUs, which do
3254     * not register cpregs but still need the state to be reset.
3255     */
3256    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3257      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3258      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3259      .readfn = pmsav7_read, .writefn = pmsav7_write,
3260      .resetfn = arm_cp_reset_ignore },
3261    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3262      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3263      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3264      .readfn = pmsav7_read, .writefn = pmsav7_write,
3265      .resetfn = arm_cp_reset_ignore },
3266    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3267      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3268      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3269      .readfn = pmsav7_read, .writefn = pmsav7_write,
3270      .resetfn = arm_cp_reset_ignore },
3271    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3272      .access = PL1_RW,
3273      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3274      .writefn = pmsav7_rgnr_write,
3275      .resetfn = arm_cp_reset_ignore },
3276    REGINFO_SENTINEL
3277};
3278
3279static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3280    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3281      .access = PL1_RW, .type = ARM_CP_ALIAS,
3282      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3283      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3284    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3285      .access = PL1_RW, .type = ARM_CP_ALIAS,
3286      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3287      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3288    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3289      .access = PL1_RW,
3290      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3291      .resetvalue = 0, },
3292    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3293      .access = PL1_RW,
3294      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3295      .resetvalue = 0, },
3296    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3297      .access = PL1_RW,
3298      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3299    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3300      .access = PL1_RW,
3301      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3302    /* Protection region base and size registers */
3303    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3304      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3305      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3306    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3307      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3308      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3309    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3310      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3311      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3312    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3313      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3314      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3315    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3316      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3317      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3318    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3319      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3320      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3321    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3322      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3323      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3324    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3325      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3326      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3327    REGINFO_SENTINEL
3328};
3329
3330static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3331                                 uint64_t value)
3332{
3333    TCR *tcr = raw_ptr(env, ri);
3334    int maskshift = extract32(value, 0, 3);
3335
3336    if (!arm_feature(env, ARM_FEATURE_V8)) {
3337        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3338            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3339             * using Long-desciptor translation table format */
3340            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3341        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3342            /* In an implementation that includes the Security Extensions
3343             * TTBCR has additional fields PD0 [4] and PD1 [5] for
3344             * Short-descriptor translation table format.
3345             */
3346            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3347        } else {
3348            value &= TTBCR_N;
3349        }
3350    }
3351
3352    /* Update the masks corresponding to the TCR bank being written
3353     * Note that we always calculate mask and base_mask, but
3354     * they are only used for short-descriptor tables (ie if EAE is 0);
3355     * for long-descriptor tables the TCR fields are used differently
3356     * and the mask and base_mask values are meaningless.
3357     */
3358    tcr->raw_tcr = value;
3359    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3360    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3361}
3362
3363static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3364                             uint64_t value)
3365{
3366    ARMCPU *cpu = env_archcpu(env);
3367    TCR *tcr = raw_ptr(env, ri);
3368
3369    if (arm_feature(env, ARM_FEATURE_LPAE)) {
3370        /* With LPAE the TTBCR could result in a change of ASID
3371         * via the TTBCR.A1 bit, so do a TLB flush.
3372         */
3373        tlb_flush(CPU(cpu));
3374    }
3375    /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
3376    value = deposit64(tcr->raw_tcr, 0, 32, value);
3377    vmsa_ttbcr_raw_write(env, ri, value);
3378}
3379
3380static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3381{
3382    TCR *tcr = raw_ptr(env, ri);
3383
3384    /* Reset both the TCR as well as the masks corresponding to the bank of
3385     * the TCR being reset.
3386     */
3387    tcr->raw_tcr = 0;
3388    tcr->mask = 0;
3389    tcr->base_mask = 0xffffc000u;
3390}
3391
3392static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3393                               uint64_t value)
3394{
3395    ARMCPU *cpu = env_archcpu(env);
3396    TCR *tcr = raw_ptr(env, ri);
3397
3398    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3399    tlb_flush(CPU(cpu));
3400    tcr->raw_tcr = value;
3401}
3402
3403static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3404                            uint64_t value)
3405{
3406    /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
3407    if (cpreg_field_is_64bit(ri) &&
3408        extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3409        ARMCPU *cpu = env_archcpu(env);
3410        tlb_flush(CPU(cpu));
3411    }
3412    raw_write(env, ri, value);
3413}
3414
3415static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3416                        uint64_t value)
3417{
3418    ARMCPU *cpu = env_archcpu(env);
3419    CPUState *cs = CPU(cpu);
3420
3421    /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
3422    if (raw_read(env, ri) != value) {
3423        tlb_flush_by_mmuidx(cs,
3424                            ARMMMUIdxBit_S12NSE1 |
3425                            ARMMMUIdxBit_S12NSE0 |
3426                            ARMMMUIdxBit_S2NS);
3427        raw_write(env, ri, value);
3428    }
3429}
3430
3431static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3432    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3433      .access = PL1_RW, .type = ARM_CP_ALIAS,
3434      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3435                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3436    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3437      .access = PL1_RW, .resetvalue = 0,
3438      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3439                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3440    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3441      .access = PL1_RW, .resetvalue = 0,
3442      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3443                             offsetof(CPUARMState, cp15.dfar_ns) } },
3444    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3445      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3446      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3447      .resetvalue = 0, },
3448    REGINFO_SENTINEL
3449};
3450
3451static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3452    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3453      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3454      .access = PL1_RW,
3455      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3456    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3457      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3458      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3459      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3460                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
3461    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3462      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3463      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3464      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3465                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
3466    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3467      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3468      .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
3469      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3470      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3471    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3472      .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3473      .raw_writefn = vmsa_ttbcr_raw_write,
3474      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3475                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
3476    REGINFO_SENTINEL
3477};
3478
3479/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3480 * qemu tlbs nor adjusting cached masks.
3481 */
3482static const ARMCPRegInfo ttbcr2_reginfo = {
3483    .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3484    .access = PL1_RW, .type = ARM_CP_ALIAS,
3485    .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3486                           offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3487};
3488
3489static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3490                                uint64_t value)
3491{
3492    env->cp15.c15_ticonfig = value & 0xe7;
3493    /* The OS_TYPE bit in this register changes the reported CPUID! */
3494    env->cp15.c0_cpuid = (value & (1 << 5)) ?
3495        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3496}
3497
3498static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3499                                uint64_t value)
3500{
3501    env->cp15.c15_threadid = value & 0xffff;
3502}
3503
3504static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3505                           uint64_t value)
3506{
3507    /* Wait-for-interrupt (deprecated) */
3508    cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
3509}
3510
3511static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3512                                  uint64_t value)
3513{
3514    /* On OMAP there are registers indicating the max/min index of dcache lines
3515     * containing a dirty line; cache flush operations have to reset these.
3516     */
3517    env->cp15.c15_i_max = 0x000;
3518    env->cp15.c15_i_min = 0xff0;
3519}
3520
3521static const ARMCPRegInfo omap_cp_reginfo[] = {
3522    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3523      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3524      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3525      .resetvalue = 0, },
3526    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3527      .access = PL1_RW, .type = ARM_CP_NOP },
3528    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3529      .access = PL1_RW,
3530      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3531      .writefn = omap_ticonfig_write },
3532    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3533      .access = PL1_RW,
3534      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3535    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3536      .access = PL1_RW, .resetvalue = 0xff0,
3537      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3538    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3539      .access = PL1_RW,
3540      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3541      .writefn = omap_threadid_write },
3542    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3543      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3544      .type = ARM_CP_NO_RAW,
3545      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3546    /* TODO: Peripheral port remap register:
3547     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3548     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3549     * when MMU is off.
3550     */
3551    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3552      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3553      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3554      .writefn = omap_cachemaint_write },
3555    { .name = "C9", .cp = 15, .crn = 9,
3556      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3557      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3558    REGINFO_SENTINEL
3559};
3560
3561static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3562                              uint64_t value)
3563{
3564    env->cp15.c15_cpar = value & 0x3fff;
3565}
3566
3567static const ARMCPRegInfo xscale_cp_reginfo[] = {
3568    { .name = "XSCALE_CPAR",
3569      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3570      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3571      .writefn = xscale_cpar_write, },
3572    { .name = "XSCALE_AUXCR",
3573      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3574      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3575      .resetvalue = 0, },
3576    /* XScale specific cache-lockdown: since we have no cache we NOP these
3577     * and hope the guest does not really rely on cache behaviour.
3578     */
3579    { .name = "XSCALE_LOCK_ICACHE_LINE",
3580      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3581      .access = PL1_W, .type = ARM_CP_NOP },
3582    { .name = "XSCALE_UNLOCK_ICACHE",
3583      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3584      .access = PL1_W, .type = ARM_CP_NOP },
3585    { .name = "XSCALE_DCACHE_LOCK",
3586      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3587      .access = PL1_RW, .type = ARM_CP_NOP },
3588    { .name = "XSCALE_UNLOCK_DCACHE",
3589      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3590      .access = PL1_W, .type = ARM_CP_NOP },
3591    REGINFO_SENTINEL
3592};
3593
3594static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3595    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3596     * implementation of this implementation-defined space.
3597     * Ideally this should eventually disappear in favour of actually
3598     * implementing the correct behaviour for all cores.
3599     */
3600    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3601      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3602      .access = PL1_RW,
3603      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3604      .resetvalue = 0 },
3605    REGINFO_SENTINEL
3606};
3607
3608static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3609    /* Cache status: RAZ because we have no cache so it's always clean */
3610    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3611      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3612      .resetvalue = 0 },
3613    REGINFO_SENTINEL
3614};
3615
3616static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3617    /* We never have a a block transfer operation in progress */
3618    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3619      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3620      .resetvalue = 0 },
3621    /* The cache ops themselves: these all NOP for QEMU */
3622    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3623      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3624    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3625      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3626    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3627      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3628    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3629      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3630    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3631      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3632    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3633      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3634    REGINFO_SENTINEL
3635};
3636
3637static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3638    /* The cache test-and-clean instructions always return (1 << 30)
3639     * to indicate that there are no dirty cache lines.
3640     */
3641    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3642      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3643      .resetvalue = (1 << 30) },
3644    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3645      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3646      .resetvalue = (1 << 30) },
3647    REGINFO_SENTINEL
3648};
3649
3650static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3651    /* Ignore ReadBuffer accesses */
3652    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3653      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3654      .access = PL1_RW, .resetvalue = 0,
3655      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3656    REGINFO_SENTINEL
3657};
3658
3659static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3660{
3661    ARMCPU *cpu = env_archcpu(env);
3662    unsigned int cur_el = arm_current_el(env);
3663    bool secure = arm_is_secure(env);
3664
3665    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3666        return env->cp15.vpidr_el2;
3667    }
3668    return raw_read(env, ri);
3669}
3670
3671static uint64_t mpidr_read_val(CPUARMState *env)
3672{
3673    ARMCPU *cpu = env_archcpu(env);
3674    uint64_t mpidr = cpu->mp_affinity;
3675
3676    if (arm_feature(env, ARM_FEATURE_V7MP)) {
3677        mpidr |= (1U << 31);
3678        /* Cores which are uniprocessor (non-coherent)
3679         * but still implement the MP extensions set
3680         * bit 30. (For instance, Cortex-R5).
3681         */
3682        if (cpu->mp_is_up) {
3683            mpidr |= (1u << 30);
3684        }
3685    }
3686    return mpidr;
3687}
3688
3689static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3690{
3691    unsigned int cur_el = arm_current_el(env);
3692    bool secure = arm_is_secure(env);
3693
3694    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3695        return env->cp15.vmpidr_el2;
3696    }
3697    return mpidr_read_val(env);
3698}
3699
3700static const ARMCPRegInfo lpae_cp_reginfo[] = {
3701    /* NOP AMAIR0/1 */
3702    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3703      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3704      .access = PL1_RW, .type = ARM_CP_CONST,
3705      .resetvalue = 0 },
3706    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3707    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3708      .access = PL1_RW, .type = ARM_CP_CONST,
3709      .resetvalue = 0 },
3710    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3711      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3712      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3713                             offsetof(CPUARMState, cp15.par_ns)} },
3714    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3715      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3716      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3717                             offsetof(CPUARMState, cp15.ttbr0_ns) },
3718      .writefn = vmsa_ttbr_write, },
3719    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3720      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3721      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3722                             offsetof(CPUARMState, cp15.ttbr1_ns) },
3723      .writefn = vmsa_ttbr_write, },
3724    REGINFO_SENTINEL
3725};
3726
3727static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3728{
3729    return vfp_get_fpcr(env);
3730}
3731
3732static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3733                            uint64_t value)
3734{
3735    vfp_set_fpcr(env, value);
3736}
3737
3738static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3739{
3740    return vfp_get_fpsr(env);
3741}
3742
3743static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3744                            uint64_t value)
3745{
3746    vfp_set_fpsr(env, value);
3747}
3748
3749static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3750                                       bool isread)
3751{
3752    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3753        return CP_ACCESS_TRAP;
3754    }
3755    return CP_ACCESS_OK;
3756}
3757
3758static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3759                            uint64_t value)
3760{
3761    env->daif = value & PSTATE_DAIF;
3762}
3763
3764static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3765                                          const ARMCPRegInfo *ri,
3766                                          bool isread)
3767{
3768    /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3769     * SCTLR_EL1.UCI is set.
3770     */
3771    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3772        return CP_ACCESS_TRAP;
3773    }
3774    return CP_ACCESS_OK;
3775}
3776
3777/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3778 * Page D4-1736 (DDI0487A.b)
3779 */
3780
3781static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3782                                      uint64_t value)
3783{
3784    CPUState *cs = env_cpu(env);
3785    bool sec = arm_is_secure_below_el3(env);
3786
3787    if (sec) {
3788        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3789                                            ARMMMUIdxBit_S1SE1 |
3790                                            ARMMMUIdxBit_S1SE0);
3791    } else {
3792        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3793                                            ARMMMUIdxBit_S12NSE1 |
3794                                            ARMMMUIdxBit_S12NSE0);
3795    }
3796}
3797
3798static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3799                                    uint64_t value)
3800{
3801    CPUState *cs = env_cpu(env);
3802
3803    if (tlb_force_broadcast(env)) {
3804        tlbi_aa64_vmalle1is_write(env, NULL, value);
3805        return;
3806    }
3807
3808    if (arm_is_secure_below_el3(env)) {
3809        tlb_flush_by_mmuidx(cs,
3810                            ARMMMUIdxBit_S1SE1 |
3811                            ARMMMUIdxBit_S1SE0);
3812    } else {
3813        tlb_flush_by_mmuidx(cs,
3814                            ARMMMUIdxBit_S12NSE1 |
3815                            ARMMMUIdxBit_S12NSE0);
3816    }
3817}
3818
3819static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3820                                  uint64_t value)
3821{
3822    /* Note that the 'ALL' scope must invalidate both stage 1 and
3823     * stage 2 translations, whereas most other scopes only invalidate
3824     * stage 1 translations.
3825     */
3826    ARMCPU *cpu = env_archcpu(env);
3827    CPUState *cs = CPU(cpu);
3828
3829    if (arm_is_secure_below_el3(env)) {
3830        tlb_flush_by_mmuidx(cs,
3831                            ARMMMUIdxBit_S1SE1 |
3832                            ARMMMUIdxBit_S1SE0);
3833    } else {
3834        if (arm_feature(env, ARM_FEATURE_EL2)) {
3835            tlb_flush_by_mmuidx(cs,
3836                                ARMMMUIdxBit_S12NSE1 |
3837                                ARMMMUIdxBit_S12NSE0 |
3838                                ARMMMUIdxBit_S2NS);
3839        } else {
3840            tlb_flush_by_mmuidx(cs,
3841                                ARMMMUIdxBit_S12NSE1 |
3842                                ARMMMUIdxBit_S12NSE0);
3843        }
3844    }
3845}
3846
3847static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3848                                  uint64_t value)
3849{
3850    ARMCPU *cpu = env_archcpu(env);
3851    CPUState *cs = CPU(cpu);
3852
3853    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3854}
3855
3856static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3857                                  uint64_t value)
3858{
3859    ARMCPU *cpu = env_archcpu(env);
3860    CPUState *cs = CPU(cpu);
3861
3862    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3863}
3864
3865static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3866                                    uint64_t value)
3867{
3868    /* Note that the 'ALL' scope must invalidate both stage 1 and
3869     * stage 2 translations, whereas most other scopes only invalidate
3870     * stage 1 translations.
3871     */
3872    CPUState *cs = env_cpu(env);
3873    bool sec = arm_is_secure_below_el3(env);
3874    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3875
3876    if (sec) {
3877        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3878                                            ARMMMUIdxBit_S1SE1 |
3879                                            ARMMMUIdxBit_S1SE0);
3880    } else if (has_el2) {
3881        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3882                                            ARMMMUIdxBit_S12NSE1 |
3883                                            ARMMMUIdxBit_S12NSE0 |
3884                                            ARMMMUIdxBit_S2NS);
3885    } else {
3886          tlb_flush_by_mmuidx_all_cpus_synced(cs,
3887                                              ARMMMUIdxBit_S12NSE1 |
3888                                              ARMMMUIdxBit_S12NSE0);
3889    }
3890}
3891
3892static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3893                                    uint64_t value)
3894{
3895    CPUState *cs = env_cpu(env);
3896
3897    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3898}
3899
3900static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3901                                    uint64_t value)
3902{
3903    CPUState *cs = env_cpu(env);
3904
3905    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3906}
3907
3908static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3909                                 uint64_t value)
3910{
3911    /* Invalidate by VA, EL2
3912     * Currently handles both VAE2 and VALE2, since we don't support
3913     * flush-last-level-only.
3914     */
3915    ARMCPU *cpu = env_archcpu(env);
3916    CPUState *cs = CPU(cpu);
3917    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3918
3919    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3920}
3921
3922static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3923                                 uint64_t value)
3924{
3925    /* Invalidate by VA, EL3
3926     * Currently handles both VAE3 and VALE3, since we don't support
3927     * flush-last-level-only.
3928     */
3929    ARMCPU *cpu = env_archcpu(env);
3930    CPUState *cs = CPU(cpu);
3931    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3932
3933    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3934}
3935
3936static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3937                                   uint64_t value)
3938{
3939    ARMCPU *cpu = env_archcpu(env);
3940    CPUState *cs = CPU(cpu);
3941    bool sec = arm_is_secure_below_el3(env);
3942    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3943
3944    if (sec) {
3945        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3946                                                 ARMMMUIdxBit_S1SE1 |
3947                                                 ARMMMUIdxBit_S1SE0);
3948    } else {
3949        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3950                                                 ARMMMUIdxBit_S12NSE1 |
3951                                                 ARMMMUIdxBit_S12NSE0);
3952    }
3953}
3954
3955static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3956                                 uint64_t value)
3957{
3958    /* Invalidate by VA, EL1&0 (AArch64 version).
3959     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3960     * since we don't support flush-for-specific-ASID-only or
3961     * flush-last-level-only.
3962     */
3963    ARMCPU *cpu = env_archcpu(env);
3964    CPUState *cs = CPU(cpu);
3965    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3966
3967    if (tlb_force_broadcast(env)) {
3968        tlbi_aa64_vae1is_write(env, NULL, value);
3969        return;
3970    }
3971
3972    if (arm_is_secure_below_el3(env)) {
3973        tlb_flush_page_by_mmuidx(cs, pageaddr,
3974                                 ARMMMUIdxBit_S1SE1 |
3975                                 ARMMMUIdxBit_S1SE0);
3976    } else {
3977        tlb_flush_page_by_mmuidx(cs, pageaddr,
3978                                 ARMMMUIdxBit_S12NSE1 |
3979                                 ARMMMUIdxBit_S12NSE0);
3980    }
3981}
3982
3983static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3984                                   uint64_t value)
3985{
3986    CPUState *cs = env_cpu(env);
3987    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3988
3989    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3990                                             ARMMMUIdxBit_S1E2);
3991}
3992
3993static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3994                                   uint64_t value)
3995{
3996    CPUState *cs = env_cpu(env);
3997    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3998
3999    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4000                                             ARMMMUIdxBit_S1E3);
4001}
4002
4003static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4004                                    uint64_t value)
4005{
4006    /* Invalidate by IPA. This has to invalidate any structures that
4007     * contain only stage 2 translation information, but does not need
4008     * to apply to structures that contain combined stage 1 and stage 2
4009     * translation information.
4010     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4011     */
4012    ARMCPU *cpu = env_archcpu(env);
4013    CPUState *cs = CPU(cpu);
4014    uint64_t pageaddr;
4015
4016    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4017        return;
4018    }
4019
4020    pageaddr = sextract64(value << 12, 0, 48);
4021
4022    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
4023}
4024
4025static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4026                                      uint64_t value)
4027{
4028    CPUState *cs = env_cpu(env);
4029    uint64_t pageaddr;
4030
4031    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4032        return;
4033    }
4034
4035    pageaddr = sextract64(value << 12, 0, 48);
4036
4037    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4038                                             ARMMMUIdxBit_S2NS);
4039}
4040
4041static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4042                                      bool isread)
4043{
4044    /* We don't implement EL2, so the only control on DC ZVA is the
4045     * bit in the SCTLR which can prohibit access for EL0.
4046     */
4047    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4048        return CP_ACCESS_TRAP;
4049    }
4050    return CP_ACCESS_OK;
4051}
4052
4053static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4054{
4055    ARMCPU *cpu = env_archcpu(env);
4056    int dzp_bit = 1 << 4;
4057
4058    /* DZP indicates whether DC ZVA access is allowed */
4059    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4060        dzp_bit = 0;
4061    }
4062    return cpu->dcz_blocksize | dzp_bit;
4063}
4064
4065static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4066                                    bool isread)
4067{
4068    if (!(env->pstate & PSTATE_SP)) {
4069        /* Access to SP_EL0 is undefined if it's being used as
4070         * the stack pointer.
4071         */
4072        return CP_ACCESS_TRAP_UNCATEGORIZED;
4073    }
4074    return CP_ACCESS_OK;
4075}
4076
4077static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4078{
4079    return env->pstate & PSTATE_SP;
4080}
4081
4082static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4083{
4084    update_spsel(env, val);
4085}
4086
4087static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4088                        uint64_t value)
4089{
4090    ARMCPU *cpu = env_archcpu(env);
4091
4092    if (raw_read(env, ri) == value) {
4093        /* Skip the TLB flush if nothing actually changed; Linux likes
4094         * to do a lot of pointless SCTLR writes.
4095         */
4096        return;
4097    }
4098
4099    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4100        /* M bit is RAZ/WI for PMSA with no MPU implemented */
4101        value &= ~SCTLR_M;
4102    }
4103
4104    raw_write(env, ri, value);
4105    /* ??? Lots of these bits are not implemented.  */
4106    /* This may enable/disable the MMU, so do a TLB flush.  */
4107    tlb_flush(CPU(cpu));
4108}
4109
4110static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4111                                     bool isread)
4112{
4113    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4114        return CP_ACCESS_TRAP_FP_EL2;
4115    }
4116    if (env->cp15.cptr_el[3] & CPTR_TFP) {
4117        return CP_ACCESS_TRAP_FP_EL3;
4118    }
4119    return CP_ACCESS_OK;
4120}
4121
4122static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4123                       uint64_t value)
4124{
4125    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4126}
4127
4128static const ARMCPRegInfo v8_cp_reginfo[] = {
4129    /* Minimal set of EL0-visible registers. This will need to be expanded
4130     * significantly for system emulation of AArch64 CPUs.
4131     */
4132    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4133      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4134      .access = PL0_RW, .type = ARM_CP_NZCV },
4135    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4136      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4137      .type = ARM_CP_NO_RAW,
4138      .access = PL0_RW, .accessfn = aa64_daif_access,
4139      .fieldoffset = offsetof(CPUARMState, daif),
4140      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4141    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4142      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4143      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4144      .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4145    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4146      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4147      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4148      .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4149    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4150      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4151      .access = PL0_R, .type = ARM_CP_NO_RAW,
4152      .readfn = aa64_dczid_read },
4153    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4154      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4155      .access = PL0_W, .type = ARM_CP_DC_ZVA,
4156#ifndef CONFIG_USER_ONLY
4157      /* Avoid overhead of an access check that always passes in user-mode */
4158      .accessfn = aa64_zva_access,
4159#endif
4160    },
4161    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4162      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4163      .access = PL1_R, .type = ARM_CP_CURRENTEL },
4164    /* Cache ops: all NOPs since we don't emulate caches */
4165    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4166      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4167      .access = PL1_W, .type = ARM_CP_NOP },
4168    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4169      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4170      .access = PL1_W, .type = ARM_CP_NOP },
4171    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4172      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4173      .access = PL0_W, .type = ARM_CP_NOP,
4174      .accessfn = aa64_cacheop_access },
4175    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4176      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4177      .access = PL1_W, .type = ARM_CP_NOP },
4178    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4179      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4180      .access = PL1_W, .type = ARM_CP_NOP },
4181    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4182      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4183      .access = PL0_W, .type = ARM_CP_NOP,
4184      .accessfn = aa64_cacheop_access },
4185    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4186      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4187      .access = PL1_W, .type = ARM_CP_NOP },
4188    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4189      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4190      .access = PL0_W, .type = ARM_CP_NOP,
4191      .accessfn = aa64_cacheop_access },
4192    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4193      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4194      .access = PL0_W, .type = ARM_CP_NOP,
4195      .accessfn = aa64_cacheop_access },
4196    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4197      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4198      .access = PL1_W, .type = ARM_CP_NOP },
4199    /* TLBI operations */
4200    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4201      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4202      .access = PL1_W, .type = ARM_CP_NO_RAW,
4203      .writefn = tlbi_aa64_vmalle1is_write },
4204    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4205      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4206      .access = PL1_W, .type = ARM_CP_NO_RAW,
4207      .writefn = tlbi_aa64_vae1is_write },
4208    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4209      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4210      .access = PL1_W, .type = ARM_CP_NO_RAW,
4211      .writefn = tlbi_aa64_vmalle1is_write },
4212    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4213      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4214      .access = PL1_W, .type = ARM_CP_NO_RAW,
4215      .writefn = tlbi_aa64_vae1is_write },
4216    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4217      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4218      .access = PL1_W, .type = ARM_CP_NO_RAW,
4219      .writefn = tlbi_aa64_vae1is_write },
4220    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4221      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4222      .access = PL1_W, .type = ARM_CP_NO_RAW,
4223      .writefn = tlbi_aa64_vae1is_write },
4224    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4225      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4226      .access = PL1_W, .type = ARM_CP_NO_RAW,
4227      .writefn = tlbi_aa64_vmalle1_write },
4228    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4229      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4230      .access = PL1_W, .type = ARM_CP_NO_RAW,
4231      .writefn = tlbi_aa64_vae1_write },
4232    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4233      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4234      .access = PL1_W, .type = ARM_CP_NO_RAW,
4235      .writefn = tlbi_aa64_vmalle1_write },
4236    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4237      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4238      .access = PL1_W, .type = ARM_CP_NO_RAW,
4239      .writefn = tlbi_aa64_vae1_write },
4240    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4241      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4242      .access = PL1_W, .type = ARM_CP_NO_RAW,
4243      .writefn = tlbi_aa64_vae1_write },
4244    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4245      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4246      .access = PL1_W, .type = ARM_CP_NO_RAW,
4247      .writefn = tlbi_aa64_vae1_write },
4248    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4249      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4250      .access = PL2_W, .type = ARM_CP_NO_RAW,
4251      .writefn = tlbi_aa64_ipas2e1is_write },
4252    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4253      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4254      .access = PL2_W, .type = ARM_CP_NO_RAW,
4255      .writefn = tlbi_aa64_ipas2e1is_write },
4256    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4257      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4258      .access = PL2_W, .type = ARM_CP_NO_RAW,
4259      .writefn = tlbi_aa64_alle1is_write },
4260    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4261      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4262      .access = PL2_W, .type = ARM_CP_NO_RAW,
4263      .writefn = tlbi_aa64_alle1is_write },
4264    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4265      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4266      .access = PL2_W, .type = ARM_CP_NO_RAW,
4267      .writefn = tlbi_aa64_ipas2e1_write },
4268    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4269      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4270      .access = PL2_W, .type = ARM_CP_NO_RAW,
4271      .writefn = tlbi_aa64_ipas2e1_write },
4272    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4273      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4274      .access = PL2_W, .type = ARM_CP_NO_RAW,
4275      .writefn = tlbi_aa64_alle1_write },
4276    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4277      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4278      .access = PL2_W, .type = ARM_CP_NO_RAW,
4279      .writefn = tlbi_aa64_alle1is_write },
4280#ifndef CONFIG_USER_ONLY
4281    /* 64 bit address translation operations */
4282    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4283      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4284      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4285    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4286      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4287      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4288    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4289      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4290      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4291    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4292      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4293      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4294    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4295      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4296      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4297    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4298      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4299      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4300    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4301      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4302      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4303    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4304      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4305      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4306    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4307    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4308      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4309      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4310    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4311      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4312      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4313    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4314      .type = ARM_CP_ALIAS,
4315      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4316      .access = PL1_RW, .resetvalue = 0,
4317      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4318      .writefn = par_write },
4319#endif
4320    /* TLB invalidate last level of translation table walk */
4321    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4322      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
4323    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4324      .type = ARM_CP_NO_RAW, .access = PL1_W,
4325      .writefn = tlbimvaa_is_write },
4326    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4327      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
4328    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4329      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
4330    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4331      .type = ARM_CP_NO_RAW, .access = PL2_W,
4332      .writefn = tlbimva_hyp_write },
4333    { .name = "TLBIMVALHIS",
4334      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4335      .type = ARM_CP_NO_RAW, .access = PL2_W,
4336      .writefn = tlbimva_hyp_is_write },
4337    { .name = "TLBIIPAS2",
4338      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4339      .type = ARM_CP_NO_RAW, .access = PL2_W,
4340      .writefn = tlbiipas2_write },
4341    { .name = "TLBIIPAS2IS",
4342      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4343      .type = ARM_CP_NO_RAW, .access = PL2_W,
4344      .writefn = tlbiipas2_is_write },
4345    { .name = "TLBIIPAS2L",
4346      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4347      .type = ARM_CP_NO_RAW, .access = PL2_W,
4348      .writefn = tlbiipas2_write },
4349    { .name = "TLBIIPAS2LIS",
4350      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4351      .type = ARM_CP_NO_RAW, .access = PL2_W,
4352      .writefn = tlbiipas2_is_write },
4353    /* 32 bit cache operations */
4354    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4355      .type = ARM_CP_NOP, .access = PL1_W },
4356    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4357      .type = ARM_CP_NOP, .access = PL1_W },
4358    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4359      .type = ARM_CP_NOP, .access = PL1_W },
4360    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4361      .type = ARM_CP_NOP, .access = PL1_W },
4362    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4363      .type = ARM_CP_NOP, .access = PL1_W },
4364    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4365      .type = ARM_CP_NOP, .access = PL1_W },
4366    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4367      .type = ARM_CP_NOP, .access = PL1_W },
4368    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4369      .type = ARM_CP_NOP, .access = PL1_W },
4370    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4371      .type = ARM_CP_NOP, .access = PL1_W },
4372    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4373      .type = ARM_CP_NOP, .access = PL1_W },
4374    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4375      .type = ARM_CP_NOP, .access = PL1_W },
4376    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4377      .type = ARM_CP_NOP, .access = PL1_W },
4378    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4379      .type = ARM_CP_NOP, .access = PL1_W },
4380    /* MMU Domain access control / MPU write buffer control */
4381    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4382      .access = PL1_RW, .resetvalue = 0,
4383      .writefn = dacr_write, .raw_writefn = raw_write,
4384      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4385                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
4386    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
4387      .type = ARM_CP_ALIAS,
4388      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
4389      .access = PL1_RW,
4390      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
4391    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
4392      .type = ARM_CP_ALIAS,
4393      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
4394      .access = PL1_RW,
4395      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
4396    /* We rely on the access checks not allowing the guest to write to the
4397     * state field when SPSel indicates that it's being used as the stack
4398     * pointer.
4399     */
4400    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4401      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4402      .access = PL1_RW, .accessfn = sp_el0_access,
4403      .type = ARM_CP_ALIAS,
4404      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
4405    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4406      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
4407      .access = PL2_RW, .type = ARM_CP_ALIAS,
4408      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
4409    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4410      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
4411      .type = ARM_CP_NO_RAW,
4412      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
4413    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4414      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4415      .type = ARM_CP_ALIAS,
4416      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4417      .access = PL2_RW, .accessfn = fpexc32_access },
4418    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4419      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4420      .access = PL2_RW, .resetvalue = 0,
4421      .writefn = dacr_write, .raw_writefn = raw_write,
4422      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4423    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4424      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4425      .access = PL2_RW, .resetvalue = 0,
4426      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4427    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4428      .type = ARM_CP_ALIAS,
4429      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4430      .access = PL2_RW,
4431      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4432    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4433      .type = ARM_CP_ALIAS,
4434      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4435      .access = PL2_RW,
4436      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4437    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4438      .type = ARM_CP_ALIAS,
4439      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4440      .access = PL2_RW,
4441      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4442    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4443      .type = ARM_CP_ALIAS,
4444      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4445      .access = PL2_RW,
4446      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
4447    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4448      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4449      .resetvalue = 0,
4450      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4451    { .name = "SDCR", .type = ARM_CP_ALIAS,
4452      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4453      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4454      .writefn = sdcr_write,
4455      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
4456    REGINFO_SENTINEL
4457};
4458
4459/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
4460static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
4461    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4462      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4463      .access = PL2_RW,
4464      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
4465    { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
4466      .type = ARM_CP_NO_RAW,
4467      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4468      .access = PL2_RW,
4469      .type = ARM_CP_CONST, .resetvalue = 0 },
4470    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4471      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4472      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4473    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4474      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4475      .access = PL2_RW,
4476      .type = ARM_CP_CONST, .resetvalue = 0 },
4477    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4478      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4479      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4480    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4481      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4482      .access = PL2_RW, .type = ARM_CP_CONST,
4483      .resetvalue = 0 },
4484    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4485      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4486      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4487    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4488      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4489      .access = PL2_RW, .type = ARM_CP_CONST,
4490      .resetvalue = 0 },
4491    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4492      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4493      .access = PL2_RW, .type = ARM_CP_CONST,
4494      .resetvalue = 0 },
4495    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4496      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4497      .access = PL2_RW, .type = ARM_CP_CONST,
4498      .resetvalue = 0 },
4499    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4500      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4501      .access = PL2_RW, .type = ARM_CP_CONST,
4502      .resetvalue = 0 },
4503    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4504      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4505      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4506    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
4507      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4508      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4509      .type = ARM_CP_CONST, .resetvalue = 0 },
4510    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4511      .cp = 15, .opc1 = 6, .crm = 2,
4512      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4513      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
4514    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4515      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4516      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4517    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4518      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4519      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4520    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4521      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4522      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4523    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4524      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4525      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4526    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4527      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4528      .resetvalue = 0 },
4529    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4530      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4531      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4532    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4533      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4534      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4535    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4536      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4537      .resetvalue = 0 },
4538    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4539      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4540      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4541    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4542      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4543      .resetvalue = 0 },
4544    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4545      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4546      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4547    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4548      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4549      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4550    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4551      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4552      .access = PL2_RW, .accessfn = access_tda,
4553      .type = ARM_CP_CONST, .resetvalue = 0 },
4554    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
4555      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4556      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4557      .type = ARM_CP_CONST, .resetvalue = 0 },
4558    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4559      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4560      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4561    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4562      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4563      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4564    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4565      .type = ARM_CP_CONST,
4566      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4567      .access = PL2_RW, .resetvalue = 0 },
4568    REGINFO_SENTINEL
4569};
4570
4571/* Ditto, but for registers which exist in ARMv8 but not v7 */
4572static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
4573    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4574      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4575      .access = PL2_RW,
4576      .type = ARM_CP_CONST, .resetvalue = 0 },
4577    REGINFO_SENTINEL
4578};
4579
4580static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4581{
4582    ARMCPU *cpu = env_archcpu(env);
4583    uint64_t valid_mask = HCR_MASK;
4584
4585    if (arm_feature(env, ARM_FEATURE_EL3)) {
4586        valid_mask &= ~HCR_HCD;
4587    } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
4588        /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4589         * However, if we're using the SMC PSCI conduit then QEMU is
4590         * effectively acting like EL3 firmware and so the guest at
4591         * EL2 should retain the ability to prevent EL1 from being
4592         * able to make SMC calls into the ersatz firmware, so in
4593         * that case HCR.TSC should be read/write.
4594         */
4595        valid_mask &= ~HCR_TSC;
4596    }
4597    if (cpu_isar_feature(aa64_lor, cpu)) {
4598        valid_mask |= HCR_TLOR;
4599    }
4600    if (cpu_isar_feature(aa64_pauth, cpu)) {
4601        valid_mask |= HCR_API | HCR_APK;
4602    }
4603
4604    /* Clear RES0 bits.  */
4605    value &= valid_mask;
4606
4607    /* These bits change the MMU setup:
4608     * HCR_VM enables stage 2 translation
4609     * HCR_PTW forbids certain page-table setups
4610     * HCR_DC Disables stage1 and enables stage2 translation
4611     */
4612    if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
4613        tlb_flush(CPU(cpu));
4614    }
4615    env->cp15.hcr_el2 = value;
4616
4617    /*
4618     * Updates to VI and VF require us to update the status of
4619     * virtual interrupts, which are the logical OR of these bits
4620     * and the state of the input lines from the GIC. (This requires
4621     * that we have the iothread lock, which is done by marking the
4622     * reginfo structs as ARM_CP_IO.)
4623     * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4624     * possible for it to be taken immediately, because VIRQ and
4625     * VFIQ are masked unless running at EL0 or EL1, and HCR
4626     * can only be written at EL2.
4627     */
4628    g_assert(qemu_mutex_iothread_locked());
4629    arm_cpu_update_virq(cpu);
4630    arm_cpu_update_vfiq(cpu);
4631}
4632
4633static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
4634                          uint64_t value)
4635{
4636    /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4637    value = deposit64(env->cp15.hcr_el2, 32, 32, value);
4638    hcr_write(env, NULL, value);
4639}
4640
4641static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
4642                         uint64_t value)
4643{
4644    /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4645    value = deposit64(env->cp15.hcr_el2, 0, 32, value);
4646    hcr_write(env, NULL, value);
4647}
4648
4649/*
4650 * Return the effective value of HCR_EL2.
4651 * Bits that are not included here:
4652 * RW       (read from SCR_EL3.RW as needed)
4653 */
4654uint64_t arm_hcr_el2_eff(CPUARMState *env)
4655{
4656    uint64_t ret = env->cp15.hcr_el2;
4657
4658    if (arm_is_secure_below_el3(env)) {
4659        /*
4660         * "This register has no effect if EL2 is not enabled in the
4661         * current Security state".  This is ARMv8.4-SecEL2 speak for
4662         * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4663         *
4664         * Prior to that, the language was "In an implementation that
4665         * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4666         * as if this field is 0 for all purposes other than a direct
4667         * read or write access of HCR_EL2".  With lots of enumeration
4668         * on a per-field basis.  In current QEMU, this is condition
4669         * is arm_is_secure_below_el3.
4670         *
4671         * Since the v8.4 language applies to the entire register, and
4672         * appears to be backward compatible, use that.
4673         */
4674        ret = 0;
4675    } else if (ret & HCR_TGE) {
4676        /* These bits are up-to-date as of ARMv8.4.  */
4677        if (ret & HCR_E2H) {
4678            ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
4679                     HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
4680                     HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4681                     HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
4682        } else {
4683            ret |= HCR_FMO | HCR_IMO | HCR_AMO;
4684        }
4685        ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
4686                 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
4687                 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
4688                 HCR_TLOR);
4689    }
4690
4691    return ret;
4692}
4693
4694static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4695                           uint64_t value)
4696{
4697    /*
4698     * For A-profile AArch32 EL3, if NSACR.CP10
4699     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4700     */
4701    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4702        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4703        value &= ~(0x3 << 10);
4704        value |= env->cp15.cptr_el[2] & (0x3 << 10);
4705    }
4706    env->cp15.cptr_el[2] = value;
4707}
4708
4709static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4710{
4711    /*
4712     * For A-profile AArch32 EL3, if NSACR.CP10
4713     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4714     */
4715    uint64_t value = env->cp15.cptr_el[2];
4716
4717    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4718        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4719        value |= 0x3 << 10;
4720    }
4721    return value;
4722}
4723
4724static const ARMCPRegInfo el2_cp_reginfo[] = {
4725    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4726      .type = ARM_CP_IO,
4727      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4728      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4729      .writefn = hcr_write },
4730    { .name = "HCR", .state = ARM_CP_STATE_AA32,
4731      .type = ARM_CP_ALIAS | ARM_CP_IO,
4732      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4733      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4734      .writefn = hcr_writelow },
4735    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4736      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4737      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4738    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4739      .type = ARM_CP_ALIAS,
4740      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4741      .access = PL2_RW,
4742      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4743    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4744      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4745      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4746    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4747      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4748      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4749    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4750      .type = ARM_CP_ALIAS,
4751      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4752      .access = PL2_RW,
4753      .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4754    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4755      .type = ARM_CP_ALIAS,
4756      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4757      .access = PL2_RW,
4758      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4759    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4760      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4761      .access = PL2_RW, .writefn = vbar_write,
4762      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4763      .resetvalue = 0 },
4764    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4765      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4766      .access = PL3_RW, .type = ARM_CP_ALIAS,
4767      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4768    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4769      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4770      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4771      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4772      .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4773    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4774      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4775      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4776      .resetvalue = 0 },
4777    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4778      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4779      .access = PL2_RW, .type = ARM_CP_ALIAS,
4780      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4781    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4782      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4783      .access = PL2_RW, .type = ARM_CP_CONST,
4784      .resetvalue = 0 },
4785    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4786    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4787      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4788      .access = PL2_RW, .type = ARM_CP_CONST,
4789      .resetvalue = 0 },
4790    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4791      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4792      .access = PL2_RW, .type = ARM_CP_CONST,
4793      .resetvalue = 0 },
4794    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4795      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4796      .access = PL2_RW, .type = ARM_CP_CONST,
4797      .resetvalue = 0 },
4798    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4799      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4800      .access = PL2_RW,
4801      /* no .writefn needed as this can't cause an ASID change;
4802       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4803       */
4804      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4805    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4806      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4807      .type = ARM_CP_ALIAS,
4808      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4809      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4810    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4811      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4812      .access = PL2_RW,
4813      /* no .writefn needed as this can't cause an ASID change;
4814       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4815       */
4816      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4817    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4818      .cp = 15, .opc1 = 6, .crm = 2,
4819      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4820      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4821      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4822      .writefn = vttbr_write },
4823    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4824      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4825      .access = PL2_RW, .writefn = vttbr_write,
4826      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4827    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4828      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4829      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4830      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4831    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4832      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4833      .access = PL2_RW, .resetvalue = 0,
4834      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4835    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4836      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4837      .access = PL2_RW, .resetvalue = 0,
4838      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4839    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4840      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4841      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4842    { .name = "TLBIALLNSNH",
4843      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4844      .type = ARM_CP_NO_RAW, .access = PL2_W,
4845      .writefn = tlbiall_nsnh_write },
4846    { .name = "TLBIALLNSNHIS",
4847      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4848      .type = ARM_CP_NO_RAW, .access = PL2_W,
4849      .writefn = tlbiall_nsnh_is_write },
4850    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4851      .type = ARM_CP_NO_RAW, .access = PL2_W,
4852      .writefn = tlbiall_hyp_write },
4853    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4854      .type = ARM_CP_NO_RAW, .access = PL2_W,
4855      .writefn = tlbiall_hyp_is_write },
4856    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4857      .type = ARM_CP_NO_RAW, .access = PL2_W,
4858      .writefn = tlbimva_hyp_write },
4859    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4860      .type = ARM_CP_NO_RAW, .access = PL2_W,
4861      .writefn = tlbimva_hyp_is_write },
4862    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4863      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4864      .type = ARM_CP_NO_RAW, .access = PL2_W,
4865      .writefn = tlbi_aa64_alle2_write },
4866    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4867      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4868      .type = ARM_CP_NO_RAW, .access = PL2_W,
4869      .writefn = tlbi_aa64_vae2_write },
4870    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4871      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4872      .access = PL2_W, .type = ARM_CP_NO_RAW,
4873      .writefn = tlbi_aa64_vae2_write },
4874    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4875      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4876      .access = PL2_W, .type = ARM_CP_NO_RAW,
4877      .writefn = tlbi_aa64_alle2is_write },
4878    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4879      .opc0 = 1, .