qemu/target/arm/helper.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "target/arm/idau.h"
   3#include "trace.h"
   4#include "cpu.h"
   5#include "internals.h"
   6#include "exec/gdbstub.h"
   7#include "exec/helper-proto.h"
   8#include "qemu/host-utils.h"
   9#include "sysemu/arch_init.h"
  10#include "sysemu/sysemu.h"
  11#include "qemu/bitops.h"
  12#include "qemu/crc32c.h"
  13#include "exec/exec-all.h"
  14#include "exec/cpu_ldst.h"
  15#include "arm_ldst.h"
  16#include <zlib.h> /* For crc32 */
  17#include "exec/semihost.h"
  18#include "sysemu/kvm.h"
  19#include "fpu/softfloat.h"
  20#include "qemu/range.h"
  21
  22#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  23
  24#ifndef CONFIG_USER_ONLY
  25/* Cacheability and shareability attributes for a memory access */
  26typedef struct ARMCacheAttrs {
  27    unsigned int attrs:8; /* as in the MAIR register encoding */
  28    unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
  29} ARMCacheAttrs;
  30
  31static bool get_phys_addr(CPUARMState *env, target_ulong address,
  32                          MMUAccessType access_type, ARMMMUIdx mmu_idx,
  33                          hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  34                          target_ulong *page_size,
  35                          ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
  36
  37static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
  38                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
  39                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  40                               target_ulong *page_size_ptr,
  41                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
  42
  43/* Security attributes for an address, as returned by v8m_security_lookup. */
  44typedef struct V8M_SAttributes {
  45    bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
  46    bool ns;
  47    bool nsc;
  48    uint8_t sregion;
  49    bool srvalid;
  50    uint8_t iregion;
  51    bool irvalid;
  52} V8M_SAttributes;
  53
  54static void v8m_security_lookup(CPUARMState *env, uint32_t address,
  55                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
  56                                V8M_SAttributes *sattrs);
  57#endif
  58
  59static void switch_mode(CPUARMState *env, int mode);
  60
  61static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  62{
  63    int nregs;
  64
  65    /* VFP data registers are always little-endian.  */
  66    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  67    if (reg < nregs) {
  68        stq_le_p(buf, *aa32_vfp_dreg(env, reg));
  69        return 8;
  70    }
  71    if (arm_feature(env, ARM_FEATURE_NEON)) {
  72        /* Aliases for Q regs.  */
  73        nregs += 16;
  74        if (reg < nregs) {
  75            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  76            stq_le_p(buf, q[0]);
  77            stq_le_p(buf + 8, q[1]);
  78            return 16;
  79        }
  80    }
  81    switch (reg - nregs) {
  82    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
  83    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
  84    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
  85    }
  86    return 0;
  87}
  88
  89static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  90{
  91    int nregs;
  92
  93    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  94    if (reg < nregs) {
  95        *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
  96        return 8;
  97    }
  98    if (arm_feature(env, ARM_FEATURE_NEON)) {
  99        nregs += 16;
 100        if (reg < nregs) {
 101            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
 102            q[0] = ldq_le_p(buf);
 103            q[1] = ldq_le_p(buf + 8);
 104            return 16;
 105        }
 106    }
 107    switch (reg - nregs) {
 108    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
 109    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
 110    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
 111    }
 112    return 0;
 113}
 114
 115static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 116{
 117    switch (reg) {
 118    case 0 ... 31:
 119        /* 128 bit FP register */
 120        {
 121            uint64_t *q = aa64_vfp_qreg(env, reg);
 122            stq_le_p(buf, q[0]);
 123            stq_le_p(buf + 8, q[1]);
 124            return 16;
 125        }
 126    case 32:
 127        /* FPSR */
 128        stl_p(buf, vfp_get_fpsr(env));
 129        return 4;
 130    case 33:
 131        /* FPCR */
 132        stl_p(buf, vfp_get_fpcr(env));
 133        return 4;
 134    default:
 135        return 0;
 136    }
 137}
 138
 139static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 140{
 141    switch (reg) {
 142    case 0 ... 31:
 143        /* 128 bit FP register */
 144        {
 145            uint64_t *q = aa64_vfp_qreg(env, reg);
 146            q[0] = ldq_le_p(buf);
 147            q[1] = ldq_le_p(buf + 8);
 148            return 16;
 149        }
 150    case 32:
 151        /* FPSR */
 152        vfp_set_fpsr(env, ldl_p(buf));
 153        return 4;
 154    case 33:
 155        /* FPCR */
 156        vfp_set_fpcr(env, ldl_p(buf));
 157        return 4;
 158    default:
 159        return 0;
 160    }
 161}
 162
 163static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 164{
 165    assert(ri->fieldoffset);
 166    if (cpreg_field_is_64bit(ri)) {
 167        return CPREG_FIELD64(env, ri);
 168    } else {
 169        return CPREG_FIELD32(env, ri);
 170    }
 171}
 172
 173static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 174                      uint64_t value)
 175{
 176    assert(ri->fieldoffset);
 177    if (cpreg_field_is_64bit(ri)) {
 178        CPREG_FIELD64(env, ri) = value;
 179    } else {
 180        CPREG_FIELD32(env, ri) = value;
 181    }
 182}
 183
 184static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 185{
 186    return (char *)env + ri->fieldoffset;
 187}
 188
 189uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 190{
 191    /* Raw read of a coprocessor register (as needed for migration, etc). */
 192    if (ri->type & ARM_CP_CONST) {
 193        return ri->resetvalue;
 194    } else if (ri->raw_readfn) {
 195        return ri->raw_readfn(env, ri);
 196    } else if (ri->readfn) {
 197        return ri->readfn(env, ri);
 198    } else {
 199        return raw_read(env, ri);
 200    }
 201}
 202
 203static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 204                             uint64_t v)
 205{
 206    /* Raw write of a coprocessor register (as needed for migration, etc).
 207     * Note that constant registers are treated as write-ignored; the
 208     * caller should check for success by whether a readback gives the
 209     * value written.
 210     */
 211    if (ri->type & ARM_CP_CONST) {
 212        return;
 213    } else if (ri->raw_writefn) {
 214        ri->raw_writefn(env, ri, v);
 215    } else if (ri->writefn) {
 216        ri->writefn(env, ri, v);
 217    } else {
 218        raw_write(env, ri, v);
 219    }
 220}
 221
 222static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 223{
 224    ARMCPU *cpu = arm_env_get_cpu(env);
 225    const ARMCPRegInfo *ri;
 226    uint32_t key;
 227
 228    key = cpu->dyn_xml.cpregs_keys[reg];
 229    ri = get_arm_cp_reginfo(cpu->cp_regs, key);
 230    if (ri) {
 231        if (cpreg_field_is_64bit(ri)) {
 232            return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
 233        } else {
 234            return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
 235        }
 236    }
 237    return 0;
 238}
 239
 240static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 241{
 242    return 0;
 243}
 244
 245static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 246{
 247   /* Return true if the regdef would cause an assertion if you called
 248    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 249    * program bug for it not to have the NO_RAW flag).
 250    * NB that returning false here doesn't necessarily mean that calling
 251    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 252    * read/write access functions which are safe for raw use" from "has
 253    * read/write access functions which have side effects but has forgotten
 254    * to provide raw access functions".
 255    * The tests here line up with the conditions in read/write_raw_cp_reg()
 256    * and assertions in raw_read()/raw_write().
 257    */
 258    if ((ri->type & ARM_CP_CONST) ||
 259        ri->fieldoffset ||
 260        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 261        return false;
 262    }
 263    return true;
 264}
 265
 266bool write_cpustate_to_list(ARMCPU *cpu)
 267{
 268    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 269    int i;
 270    bool ok = true;
 271
 272    for (i = 0; i < cpu->cpreg_array_len; i++) {
 273        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 274        const ARMCPRegInfo *ri;
 275
 276        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 277        if (!ri) {
 278            ok = false;
 279            continue;
 280        }
 281        if (ri->type & ARM_CP_NO_RAW) {
 282            continue;
 283        }
 284        cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
 285    }
 286    return ok;
 287}
 288
 289bool write_list_to_cpustate(ARMCPU *cpu)
 290{
 291    int i;
 292    bool ok = true;
 293
 294    for (i = 0; i < cpu->cpreg_array_len; i++) {
 295        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 296        uint64_t v = cpu->cpreg_values[i];
 297        const ARMCPRegInfo *ri;
 298
 299        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 300        if (!ri) {
 301            ok = false;
 302            continue;
 303        }
 304        if (ri->type & ARM_CP_NO_RAW) {
 305            continue;
 306        }
 307        /* Write value and confirm it reads back as written
 308         * (to catch read-only registers and partially read-only
 309         * registers where the incoming migration value doesn't match)
 310         */
 311        write_raw_cp_reg(&cpu->env, ri, v);
 312        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 313            ok = false;
 314        }
 315    }
 316    return ok;
 317}
 318
 319static void add_cpreg_to_list(gpointer key, gpointer opaque)
 320{
 321    ARMCPU *cpu = opaque;
 322    uint64_t regidx;
 323    const ARMCPRegInfo *ri;
 324
 325    regidx = *(uint32_t *)key;
 326    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 327
 328    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 329        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 330        /* The value array need not be initialized at this point */
 331        cpu->cpreg_array_len++;
 332    }
 333}
 334
 335static void count_cpreg(gpointer key, gpointer opaque)
 336{
 337    ARMCPU *cpu = opaque;
 338    uint64_t regidx;
 339    const ARMCPRegInfo *ri;
 340
 341    regidx = *(uint32_t *)key;
 342    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 343
 344    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 345        cpu->cpreg_array_len++;
 346    }
 347}
 348
 349static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 350{
 351    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 352    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 353
 354    if (aidx > bidx) {
 355        return 1;
 356    }
 357    if (aidx < bidx) {
 358        return -1;
 359    }
 360    return 0;
 361}
 362
 363void init_cpreg_list(ARMCPU *cpu)
 364{
 365    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 366     * Note that we require cpreg_tuples[] to be sorted by key ID.
 367     */
 368    GList *keys;
 369    int arraylen;
 370
 371    keys = g_hash_table_get_keys(cpu->cp_regs);
 372    keys = g_list_sort(keys, cpreg_key_compare);
 373
 374    cpu->cpreg_array_len = 0;
 375
 376    g_list_foreach(keys, count_cpreg, cpu);
 377
 378    arraylen = cpu->cpreg_array_len;
 379    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 380    cpu->cpreg_values = g_new(uint64_t, arraylen);
 381    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 382    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 383    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 384    cpu->cpreg_array_len = 0;
 385
 386    g_list_foreach(keys, add_cpreg_to_list, cpu);
 387
 388    assert(cpu->cpreg_array_len == arraylen);
 389
 390    g_list_free(keys);
 391}
 392
 393/*
 394 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
 395 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
 396 *
 397 * access_el3_aa32ns: Used to check AArch32 register views.
 398 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
 399 */
 400static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 401                                        const ARMCPRegInfo *ri,
 402                                        bool isread)
 403{
 404    bool secure = arm_is_secure_below_el3(env);
 405
 406    assert(!arm_el_is_aa64(env, 3));
 407    if (secure) {
 408        return CP_ACCESS_TRAP_UNCATEGORIZED;
 409    }
 410    return CP_ACCESS_OK;
 411}
 412
 413static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
 414                                                const ARMCPRegInfo *ri,
 415                                                bool isread)
 416{
 417    if (!arm_el_is_aa64(env, 3)) {
 418        return access_el3_aa32ns(env, ri, isread);
 419    }
 420    return CP_ACCESS_OK;
 421}
 422
 423/* Some secure-only AArch32 registers trap to EL3 if used from
 424 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 425 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 426 * We assume that the .access field is set to PL1_RW.
 427 */
 428static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 429                                            const ARMCPRegInfo *ri,
 430                                            bool isread)
 431{
 432    if (arm_current_el(env) == 3) {
 433        return CP_ACCESS_OK;
 434    }
 435    if (arm_is_secure_below_el3(env)) {
 436        return CP_ACCESS_TRAP_EL3;
 437    }
 438    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 439    return CP_ACCESS_TRAP_UNCATEGORIZED;
 440}
 441
 442/* Check for traps to "powerdown debug" registers, which are controlled
 443 * by MDCR.TDOSA
 444 */
 445static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 446                                   bool isread)
 447{
 448    int el = arm_current_el(env);
 449    bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
 450        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 451        (env->cp15.hcr_el2 & HCR_TGE);
 452
 453    if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
 454        return CP_ACCESS_TRAP_EL2;
 455    }
 456    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 457        return CP_ACCESS_TRAP_EL3;
 458    }
 459    return CP_ACCESS_OK;
 460}
 461
 462/* Check for traps to "debug ROM" registers, which are controlled
 463 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 464 */
 465static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 466                                  bool isread)
 467{
 468    int el = arm_current_el(env);
 469    bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
 470        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 471        (env->cp15.hcr_el2 & HCR_TGE);
 472
 473    if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
 474        return CP_ACCESS_TRAP_EL2;
 475    }
 476    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 477        return CP_ACCESS_TRAP_EL3;
 478    }
 479    return CP_ACCESS_OK;
 480}
 481
 482/* Check for traps to general debug registers, which are controlled
 483 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 484 */
 485static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 486                                  bool isread)
 487{
 488    int el = arm_current_el(env);
 489    bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
 490        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 491        (env->cp15.hcr_el2 & HCR_TGE);
 492
 493    if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
 494        return CP_ACCESS_TRAP_EL2;
 495    }
 496    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 497        return CP_ACCESS_TRAP_EL3;
 498    }
 499    return CP_ACCESS_OK;
 500}
 501
 502/* Check for traps to performance monitor registers, which are controlled
 503 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 504 */
 505static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 506                                 bool isread)
 507{
 508    int el = arm_current_el(env);
 509
 510    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 511        && !arm_is_secure_below_el3(env)) {
 512        return CP_ACCESS_TRAP_EL2;
 513    }
 514    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 515        return CP_ACCESS_TRAP_EL3;
 516    }
 517    return CP_ACCESS_OK;
 518}
 519
 520static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 521{
 522    ARMCPU *cpu = arm_env_get_cpu(env);
 523
 524    raw_write(env, ri, value);
 525    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
 526}
 527
 528static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 529{
 530    ARMCPU *cpu = arm_env_get_cpu(env);
 531
 532    if (raw_read(env, ri) != value) {
 533        /* Unlike real hardware the qemu TLB uses virtual addresses,
 534         * not modified virtual addresses, so this causes a TLB flush.
 535         */
 536        tlb_flush(CPU(cpu));
 537        raw_write(env, ri, value);
 538    }
 539}
 540
 541static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 542                             uint64_t value)
 543{
 544    ARMCPU *cpu = arm_env_get_cpu(env);
 545
 546    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
 547        && !extended_addresses_enabled(env)) {
 548        /* For VMSA (when not using the LPAE long descriptor page table
 549         * format) this register includes the ASID, so do a TLB flush.
 550         * For PMSA it is purely a process ID and no action is needed.
 551         */
 552        tlb_flush(CPU(cpu));
 553    }
 554    raw_write(env, ri, value);
 555}
 556
 557/* IS variants of TLB operations must affect all cores */
 558static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 559                             uint64_t value)
 560{
 561    CPUState *cs = ENV_GET_CPU(env);
 562
 563    tlb_flush_all_cpus_synced(cs);
 564}
 565
 566static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 567                             uint64_t value)
 568{
 569    CPUState *cs = ENV_GET_CPU(env);
 570
 571    tlb_flush_all_cpus_synced(cs);
 572}
 573
 574static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 575                             uint64_t value)
 576{
 577    CPUState *cs = ENV_GET_CPU(env);
 578
 579    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 580}
 581
 582static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 583                             uint64_t value)
 584{
 585    CPUState *cs = ENV_GET_CPU(env);
 586
 587    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 588}
 589
 590/*
 591 * Non-IS variants of TLB operations are upgraded to
 592 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
 593 * force broadcast of these operations.
 594 */
 595static bool tlb_force_broadcast(CPUARMState *env)
 596{
 597    return (env->cp15.hcr_el2 & HCR_FB) &&
 598        arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
 599}
 600
 601static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 602                          uint64_t value)
 603{
 604    /* Invalidate all (TLBIALL) */
 605    ARMCPU *cpu = arm_env_get_cpu(env);
 606
 607    if (tlb_force_broadcast(env)) {
 608        tlbiall_is_write(env, NULL, value);
 609        return;
 610    }
 611
 612    tlb_flush(CPU(cpu));
 613}
 614
 615static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 616                          uint64_t value)
 617{
 618    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 619    ARMCPU *cpu = arm_env_get_cpu(env);
 620
 621    if (tlb_force_broadcast(env)) {
 622        tlbimva_is_write(env, NULL, value);
 623        return;
 624    }
 625
 626    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 627}
 628
 629static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 630                           uint64_t value)
 631{
 632    /* Invalidate by ASID (TLBIASID) */
 633    ARMCPU *cpu = arm_env_get_cpu(env);
 634
 635    if (tlb_force_broadcast(env)) {
 636        tlbiasid_is_write(env, NULL, value);
 637        return;
 638    }
 639
 640    tlb_flush(CPU(cpu));
 641}
 642
 643static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 644                           uint64_t value)
 645{
 646    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 647    ARMCPU *cpu = arm_env_get_cpu(env);
 648
 649    if (tlb_force_broadcast(env)) {
 650        tlbimvaa_is_write(env, NULL, value);
 651        return;
 652    }
 653
 654    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 655}
 656
 657static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 658                               uint64_t value)
 659{
 660    CPUState *cs = ENV_GET_CPU(env);
 661
 662    tlb_flush_by_mmuidx(cs,
 663                        ARMMMUIdxBit_S12NSE1 |
 664                        ARMMMUIdxBit_S12NSE0 |
 665                        ARMMMUIdxBit_S2NS);
 666}
 667
 668static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 669                                  uint64_t value)
 670{
 671    CPUState *cs = ENV_GET_CPU(env);
 672
 673    tlb_flush_by_mmuidx_all_cpus_synced(cs,
 674                                        ARMMMUIdxBit_S12NSE1 |
 675                                        ARMMMUIdxBit_S12NSE0 |
 676                                        ARMMMUIdxBit_S2NS);
 677}
 678
 679static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
 680                            uint64_t value)
 681{
 682    /* Invalidate by IPA. This has to invalidate any structures that
 683     * contain only stage 2 translation information, but does not need
 684     * to apply to structures that contain combined stage 1 and stage 2
 685     * translation information.
 686     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
 687     */
 688    CPUState *cs = ENV_GET_CPU(env);
 689    uint64_t pageaddr;
 690
 691    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 692        return;
 693    }
 694
 695    pageaddr = sextract64(value << 12, 0, 40);
 696
 697    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
 698}
 699
 700static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 701                               uint64_t value)
 702{
 703    CPUState *cs = ENV_GET_CPU(env);
 704    uint64_t pageaddr;
 705
 706    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 707        return;
 708    }
 709
 710    pageaddr = sextract64(value << 12, 0, 40);
 711
 712    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 713                                             ARMMMUIdxBit_S2NS);
 714}
 715
 716static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 717                              uint64_t value)
 718{
 719    CPUState *cs = ENV_GET_CPU(env);
 720
 721    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
 722}
 723
 724static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 725                                 uint64_t value)
 726{
 727    CPUState *cs = ENV_GET_CPU(env);
 728
 729    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
 730}
 731
 732static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 733                              uint64_t value)
 734{
 735    CPUState *cs = ENV_GET_CPU(env);
 736    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 737
 738    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
 739}
 740
 741static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 742                                 uint64_t value)
 743{
 744    CPUState *cs = ENV_GET_CPU(env);
 745    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 746
 747    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 748                                             ARMMMUIdxBit_S1E2);
 749}
 750
 751static const ARMCPRegInfo cp_reginfo[] = {
 752    /* Define the secure and non-secure FCSE identifier CP registers
 753     * separately because there is no secure bank in V8 (no _EL3).  This allows
 754     * the secure register to be properly reset and migrated. There is also no
 755     * v8 EL1 version of the register so the non-secure instance stands alone.
 756     */
 757    { .name = "FCSEIDR",
 758      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 759      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 760      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 761      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 762    { .name = "FCSEIDR_S",
 763      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 764      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 765      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 766      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 767    /* Define the secure and non-secure context identifier CP registers
 768     * separately because there is no secure bank in V8 (no _EL3).  This allows
 769     * the secure register to be properly reset and migrated.  In the
 770     * non-secure case, the 32-bit register will have reset and migration
 771     * disabled during registration as it is handled by the 64-bit instance.
 772     */
 773    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 774      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 775      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 776      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 777      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 778    { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
 779      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 780      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 781      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 782      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 783    REGINFO_SENTINEL
 784};
 785
 786static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 787    /* NB: Some of these registers exist in v8 but with more precise
 788     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 789     */
 790    /* MMU Domain access control / MPU write buffer control */
 791    { .name = "DACR",
 792      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 793      .access = PL1_RW, .resetvalue = 0,
 794      .writefn = dacr_write, .raw_writefn = raw_write,
 795      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 796                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 797    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 798     * For v6 and v5, these mappings are overly broad.
 799     */
 800    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 801      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 802    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 803      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 804    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 805      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 806    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 807      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 808    /* Cache maintenance ops; some of this space may be overridden later. */
 809    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 810      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 811      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 812    REGINFO_SENTINEL
 813};
 814
 815static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 816    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 817     * over-broad.
 818     */
 819    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 820      .access = PL1_W, .type = ARM_CP_WFI },
 821    REGINFO_SENTINEL
 822};
 823
 824static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 825    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 826     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 827     */
 828    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 829      .access = PL1_W, .type = ARM_CP_WFI },
 830    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 831     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 832     * OMAPCP will override this space.
 833     */
 834    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 835      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 836      .resetvalue = 0 },
 837    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 838      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 839      .resetvalue = 0 },
 840    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 841    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 842      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 843      .resetvalue = 0 },
 844    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 845     * implementing it as RAZ means the "debug architecture version" bits
 846     * will read as a reserved value, which should cause Linux to not try
 847     * to use the debug hardware.
 848     */
 849    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 850      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 851    /* MMU TLB control. Note that the wildcarding means we cover not just
 852     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 853     */
 854    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 855      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 856      .type = ARM_CP_NO_RAW },
 857    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 858      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 859      .type = ARM_CP_NO_RAW },
 860    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 861      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 862      .type = ARM_CP_NO_RAW },
 863    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 864      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 865      .type = ARM_CP_NO_RAW },
 866    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 867      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 868    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 869      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 870    REGINFO_SENTINEL
 871};
 872
 873static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 874                        uint64_t value)
 875{
 876    uint32_t mask = 0;
 877
 878    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 879    if (!arm_feature(env, ARM_FEATURE_V8)) {
 880        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 881         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 882         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 883         */
 884        if (arm_feature(env, ARM_FEATURE_VFP)) {
 885            /* VFP coprocessor: cp10 & cp11 [23:20] */
 886            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 887
 888            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 889                /* ASEDIS [31] bit is RAO/WI */
 890                value |= (1 << 31);
 891            }
 892
 893            /* VFPv3 and upwards with NEON implement 32 double precision
 894             * registers (D0-D31).
 895             */
 896            if (!arm_feature(env, ARM_FEATURE_NEON) ||
 897                    !arm_feature(env, ARM_FEATURE_VFP3)) {
 898                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 899                value |= (1 << 30);
 900            }
 901        }
 902        value &= mask;
 903    }
 904    env->cp15.cpacr_el1 = value;
 905}
 906
 907static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
 908{
 909    /* Call cpacr_write() so that we reset with the correct RAO bits set
 910     * for our CPU features.
 911     */
 912    cpacr_write(env, ri, 0);
 913}
 914
 915static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 916                                   bool isread)
 917{
 918    if (arm_feature(env, ARM_FEATURE_V8)) {
 919        /* Check if CPACR accesses are to be trapped to EL2 */
 920        if (arm_current_el(env) == 1 &&
 921            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
 922            return CP_ACCESS_TRAP_EL2;
 923        /* Check if CPACR accesses are to be trapped to EL3 */
 924        } else if (arm_current_el(env) < 3 &&
 925                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 926            return CP_ACCESS_TRAP_EL3;
 927        }
 928    }
 929
 930    return CP_ACCESS_OK;
 931}
 932
 933static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 934                                  bool isread)
 935{
 936    /* Check if CPTR accesses are set to trap to EL3 */
 937    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 938        return CP_ACCESS_TRAP_EL3;
 939    }
 940
 941    return CP_ACCESS_OK;
 942}
 943
 944static const ARMCPRegInfo v6_cp_reginfo[] = {
 945    /* prefetch by MVA in v6, NOP in v7 */
 946    { .name = "MVA_prefetch",
 947      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 948      .access = PL1_W, .type = ARM_CP_NOP },
 949    /* We need to break the TB after ISB to execute self-modifying code
 950     * correctly and also to take any pending interrupts immediately.
 951     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 952     */
 953    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 954      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 955    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 956      .access = PL0_W, .type = ARM_CP_NOP },
 957    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 958      .access = PL0_W, .type = ARM_CP_NOP },
 959    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 960      .access = PL1_RW,
 961      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 962                             offsetof(CPUARMState, cp15.ifar_ns) },
 963      .resetvalue = 0, },
 964    /* Watchpoint Fault Address Register : should actually only be present
 965     * for 1136, 1176, 11MPCore.
 966     */
 967    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
 968      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
 969    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
 970      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
 971      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
 972      .resetfn = cpacr_reset, .writefn = cpacr_write },
 973    REGINFO_SENTINEL
 974};
 975
 976/* Definitions for the PMU registers */
 977#define PMCRN_MASK  0xf800
 978#define PMCRN_SHIFT 11
 979#define PMCRD   0x8
 980#define PMCRC   0x4
 981#define PMCRE   0x1
 982
 983static inline uint32_t pmu_num_counters(CPUARMState *env)
 984{
 985  return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
 986}
 987
 988/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
 989static inline uint64_t pmu_counter_mask(CPUARMState *env)
 990{
 991  return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
 992}
 993
 994static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
 995                                   bool isread)
 996{
 997    /* Performance monitor registers user accessibility is controlled
 998     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
 999     * trapping to EL2 or EL3 for other accesses.
1000     */
1001    int el = arm_current_el(env);
1002
1003    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1004        return CP_ACCESS_TRAP;
1005    }
1006    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1007        && !arm_is_secure_below_el3(env)) {
1008        return CP_ACCESS_TRAP_EL2;
1009    }
1010    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1011        return CP_ACCESS_TRAP_EL3;
1012    }
1013
1014    return CP_ACCESS_OK;
1015}
1016
1017static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1018                                           const ARMCPRegInfo *ri,
1019                                           bool isread)
1020{
1021    /* ER: event counter read trap control */
1022    if (arm_feature(env, ARM_FEATURE_V8)
1023        && arm_current_el(env) == 0
1024        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1025        && isread) {
1026        return CP_ACCESS_OK;
1027    }
1028
1029    return pmreg_access(env, ri, isread);
1030}
1031
1032static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1033                                         const ARMCPRegInfo *ri,
1034                                         bool isread)
1035{
1036    /* SW: software increment write trap control */
1037    if (arm_feature(env, ARM_FEATURE_V8)
1038        && arm_current_el(env) == 0
1039        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1040        && !isread) {
1041        return CP_ACCESS_OK;
1042    }
1043
1044    return pmreg_access(env, ri, isread);
1045}
1046
1047#ifndef CONFIG_USER_ONLY
1048
1049static CPAccessResult pmreg_access_selr(CPUARMState *env,
1050                                        const ARMCPRegInfo *ri,
1051                                        bool isread)
1052{
1053    /* ER: event counter read trap control */
1054    if (arm_feature(env, ARM_FEATURE_V8)
1055        && arm_current_el(env) == 0
1056        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1057        return CP_ACCESS_OK;
1058    }
1059
1060    return pmreg_access(env, ri, isread);
1061}
1062
1063static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1064                                         const ARMCPRegInfo *ri,
1065                                         bool isread)
1066{
1067    /* CR: cycle counter read trap control */
1068    if (arm_feature(env, ARM_FEATURE_V8)
1069        && arm_current_el(env) == 0
1070        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1071        && isread) {
1072        return CP_ACCESS_OK;
1073    }
1074
1075    return pmreg_access(env, ri, isread);
1076}
1077
1078static inline bool arm_ccnt_enabled(CPUARMState *env)
1079{
1080    /* This does not support checking PMCCFILTR_EL0 register */
1081
1082    if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) {
1083        return false;
1084    }
1085
1086    return true;
1087}
1088
1089void pmccntr_sync(CPUARMState *env)
1090{
1091    uint64_t temp_ticks;
1092
1093    temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1094                          ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1095
1096    if (env->cp15.c9_pmcr & PMCRD) {
1097        /* Increment once every 64 processor clock cycles */
1098        temp_ticks /= 64;
1099    }
1100
1101    if (arm_ccnt_enabled(env)) {
1102        env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
1103    }
1104}
1105
1106static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1107                       uint64_t value)
1108{
1109    pmccntr_sync(env);
1110
1111    if (value & PMCRC) {
1112        /* The counter has been reset */
1113        env->cp15.c15_ccnt = 0;
1114    }
1115
1116    /* only the DP, X, D and E bits are writable */
1117    env->cp15.c9_pmcr &= ~0x39;
1118    env->cp15.c9_pmcr |= (value & 0x39);
1119
1120    pmccntr_sync(env);
1121}
1122
1123static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1124{
1125    uint64_t total_ticks;
1126
1127    if (!arm_ccnt_enabled(env)) {
1128        /* Counter is disabled, do not change value */
1129        return env->cp15.c15_ccnt;
1130    }
1131
1132    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1133                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1134
1135    if (env->cp15.c9_pmcr & PMCRD) {
1136        /* Increment once every 64 processor clock cycles */
1137        total_ticks /= 64;
1138    }
1139    return total_ticks - env->cp15.c15_ccnt;
1140}
1141
1142static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1143                         uint64_t value)
1144{
1145    /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1146     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1147     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1148     * accessed.
1149     */
1150    env->cp15.c9_pmselr = value & 0x1f;
1151}
1152
1153static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1154                        uint64_t value)
1155{
1156    uint64_t total_ticks;
1157
1158    if (!arm_ccnt_enabled(env)) {
1159        /* Counter is disabled, set the absolute value */
1160        env->cp15.c15_ccnt = value;
1161        return;
1162    }
1163
1164    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1165                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1166
1167    if (env->cp15.c9_pmcr & PMCRD) {
1168        /* Increment once every 64 processor clock cycles */
1169        total_ticks /= 64;
1170    }
1171    env->cp15.c15_ccnt = total_ticks - value;
1172}
1173
1174static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1175                            uint64_t value)
1176{
1177    uint64_t cur_val = pmccntr_read(env, NULL);
1178
1179    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1180}
1181
1182#else /* CONFIG_USER_ONLY */
1183
1184void pmccntr_sync(CPUARMState *env)
1185{
1186}
1187
1188#endif
1189
1190static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1191                            uint64_t value)
1192{
1193    pmccntr_sync(env);
1194    env->cp15.pmccfiltr_el0 = value & 0xfc000000;
1195    pmccntr_sync(env);
1196}
1197
1198static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1199                            uint64_t value)
1200{
1201    value &= pmu_counter_mask(env);
1202    env->cp15.c9_pmcnten |= value;
1203}
1204
1205static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1206                             uint64_t value)
1207{
1208    value &= pmu_counter_mask(env);
1209    env->cp15.c9_pmcnten &= ~value;
1210}
1211
1212static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1213                         uint64_t value)
1214{
1215    value &= pmu_counter_mask(env);
1216    env->cp15.c9_pmovsr &= ~value;
1217}
1218
1219static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1220                             uint64_t value)
1221{
1222    /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1223     * PMSELR value is equal to or greater than the number of implemented
1224     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1225     */
1226    if (env->cp15.c9_pmselr == 0x1f) {
1227        pmccfiltr_write(env, ri, value);
1228    }
1229}
1230
1231static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1232{
1233    /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1234     * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1235     */
1236    if (env->cp15.c9_pmselr == 0x1f) {
1237        return env->cp15.pmccfiltr_el0;
1238    } else {
1239        return 0;
1240    }
1241}
1242
1243static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1244                            uint64_t value)
1245{
1246    if (arm_feature(env, ARM_FEATURE_V8)) {
1247        env->cp15.c9_pmuserenr = value & 0xf;
1248    } else {
1249        env->cp15.c9_pmuserenr = value & 1;
1250    }
1251}
1252
1253static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1254                             uint64_t value)
1255{
1256    /* We have no event counters so only the C bit can be changed */
1257    value &= pmu_counter_mask(env);
1258    env->cp15.c9_pminten |= value;
1259}
1260
1261static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1262                             uint64_t value)
1263{
1264    value &= pmu_counter_mask(env);
1265    env->cp15.c9_pminten &= ~value;
1266}
1267
1268static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1269                       uint64_t value)
1270{
1271    /* Note that even though the AArch64 view of this register has bits
1272     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1273     * architectural requirements for bits which are RES0 only in some
1274     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1275     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1276     */
1277    raw_write(env, ri, value & ~0x1FULL);
1278}
1279
1280static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1281{
1282    /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1283     * For bits that vary between AArch32/64, code needs to check the
1284     * current execution mode before directly using the feature bit.
1285     */
1286    uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1287
1288    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1289        valid_mask &= ~SCR_HCE;
1290
1291        /* On ARMv7, SMD (or SCD as it is called in v7) is only
1292         * supported if EL2 exists. The bit is UNK/SBZP when
1293         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1294         * when EL2 is unavailable.
1295         * On ARMv8, this bit is always available.
1296         */
1297        if (arm_feature(env, ARM_FEATURE_V7) &&
1298            !arm_feature(env, ARM_FEATURE_V8)) {
1299            valid_mask &= ~SCR_SMD;
1300        }
1301    }
1302
1303    /* Clear all-context RES0 bits.  */
1304    value &= valid_mask;
1305    raw_write(env, ri, value);
1306}
1307
1308static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1309{
1310    ARMCPU *cpu = arm_env_get_cpu(env);
1311
1312    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1313     * bank
1314     */
1315    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1316                                        ri->secure & ARM_CP_SECSTATE_S);
1317
1318    return cpu->ccsidr[index];
1319}
1320
1321static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1322                         uint64_t value)
1323{
1324    raw_write(env, ri, value & 0xf);
1325}
1326
1327static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1328{
1329    CPUState *cs = ENV_GET_CPU(env);
1330    uint64_t ret = 0;
1331
1332    if (arm_hcr_el2_imo(env)) {
1333        if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1334            ret |= CPSR_I;
1335        }
1336    } else {
1337        if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1338            ret |= CPSR_I;
1339        }
1340    }
1341
1342    if (arm_hcr_el2_fmo(env)) {
1343        if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1344            ret |= CPSR_F;
1345        }
1346    } else {
1347        if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1348            ret |= CPSR_F;
1349        }
1350    }
1351
1352    /* External aborts are not possible in QEMU so A bit is always clear */
1353    return ret;
1354}
1355
1356static const ARMCPRegInfo v7_cp_reginfo[] = {
1357    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1358    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1359      .access = PL1_W, .type = ARM_CP_NOP },
1360    /* Performance monitors are implementation defined in v7,
1361     * but with an ARM recommended set of registers, which we
1362     * follow (although we don't actually implement any counters)
1363     *
1364     * Performance registers fall into three categories:
1365     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1366     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1367     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1368     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1369     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1370     */
1371    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1372      .access = PL0_RW, .type = ARM_CP_ALIAS,
1373      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1374      .writefn = pmcntenset_write,
1375      .accessfn = pmreg_access,
1376      .raw_writefn = raw_write },
1377    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1378      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1379      .access = PL0_RW, .accessfn = pmreg_access,
1380      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1381      .writefn = pmcntenset_write, .raw_writefn = raw_write },
1382    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1383      .access = PL0_RW,
1384      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1385      .accessfn = pmreg_access,
1386      .writefn = pmcntenclr_write,
1387      .type = ARM_CP_ALIAS },
1388    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1389      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1390      .access = PL0_RW, .accessfn = pmreg_access,
1391      .type = ARM_CP_ALIAS,
1392      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1393      .writefn = pmcntenclr_write },
1394    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1395      .access = PL0_RW,
1396      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
1397      .accessfn = pmreg_access,
1398      .writefn = pmovsr_write,
1399      .raw_writefn = raw_write },
1400    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1401      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1402      .access = PL0_RW, .accessfn = pmreg_access,
1403      .type = ARM_CP_ALIAS,
1404      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1405      .writefn = pmovsr_write,
1406      .raw_writefn = raw_write },
1407    /* Unimplemented so WI. */
1408    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1409      .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1410#ifndef CONFIG_USER_ONLY
1411    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1412      .access = PL0_RW, .type = ARM_CP_ALIAS,
1413      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1414      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1415      .raw_writefn = raw_write},
1416    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1417      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1418      .access = PL0_RW, .accessfn = pmreg_access_selr,
1419      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1420      .writefn = pmselr_write, .raw_writefn = raw_write, },
1421    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1422      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
1423      .readfn = pmccntr_read, .writefn = pmccntr_write32,
1424      .accessfn = pmreg_access_ccntr },
1425    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1426      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1427      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1428      .type = ARM_CP_IO,
1429      .readfn = pmccntr_read, .writefn = pmccntr_write, },
1430#endif
1431    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1432      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1433      .writefn = pmccfiltr_write,
1434      .access = PL0_RW, .accessfn = pmreg_access,
1435      .type = ARM_CP_IO,
1436      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1437      .resetvalue = 0, },
1438    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1439      .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1440      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1441    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1442      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1443      .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1444      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1445    /* Unimplemented, RAZ/WI. */
1446    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1447      .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1448      .accessfn = pmreg_access_xevcntr },
1449    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1450      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1451      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
1452      .resetvalue = 0,
1453      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1454    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1455      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1456      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1457      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1458      .resetvalue = 0,
1459      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1460    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1461      .access = PL1_RW, .accessfn = access_tpm,
1462      .type = ARM_CP_ALIAS | ARM_CP_IO,
1463      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1464      .resetvalue = 0,
1465      .writefn = pmintenset_write, .raw_writefn = raw_write },
1466    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1467      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1468      .access = PL1_RW, .accessfn = access_tpm,
1469      .type = ARM_CP_IO,
1470      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1471      .writefn = pmintenset_write, .raw_writefn = raw_write,
1472      .resetvalue = 0x0 },
1473    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1474      .access = PL1_RW, .accessfn = access_tpm,
1475      .type = ARM_CP_ALIAS | ARM_CP_IO,
1476      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1477      .writefn = pmintenclr_write, },
1478    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1479      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1480      .access = PL1_RW, .accessfn = access_tpm,
1481      .type = ARM_CP_ALIAS | ARM_CP_IO,
1482      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1483      .writefn = pmintenclr_write },
1484    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1485      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1486      .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1487    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1488      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1489      .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1490      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1491                             offsetof(CPUARMState, cp15.csselr_ns) } },
1492    /* Auxiliary ID register: this actually has an IMPDEF value but for now
1493     * just RAZ for all cores:
1494     */
1495    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1496      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1497      .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1498    /* Auxiliary fault status registers: these also are IMPDEF, and we
1499     * choose to RAZ/WI for all cores.
1500     */
1501    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1502      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1503      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1504    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1505      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1506      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1507    /* MAIR can just read-as-written because we don't implement caches
1508     * and so don't need to care about memory attributes.
1509     */
1510    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1511      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1512      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1513      .resetvalue = 0 },
1514    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1515      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1516      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1517      .resetvalue = 0 },
1518    /* For non-long-descriptor page tables these are PRRR and NMRR;
1519     * regardless they still act as reads-as-written for QEMU.
1520     */
1521     /* MAIR0/1 are defined separately from their 64-bit counterpart which
1522      * allows them to assign the correct fieldoffset based on the endianness
1523      * handled in the field definitions.
1524      */
1525    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1526      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1527      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1528                             offsetof(CPUARMState, cp15.mair0_ns) },
1529      .resetfn = arm_cp_reset_ignore },
1530    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1531      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1532      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1533                             offsetof(CPUARMState, cp15.mair1_ns) },
1534      .resetfn = arm_cp_reset_ignore },
1535    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1536      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1537      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1538    /* 32 bit ITLB invalidates */
1539    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1540      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1541    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1542      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1543    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1544      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1545    /* 32 bit DTLB invalidates */
1546    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1547      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1548    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1549      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1550    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1551      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1552    /* 32 bit TLB invalidates */
1553    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1554      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1555    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1556      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1557    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1558      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1559    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1560      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1561    REGINFO_SENTINEL
1562};
1563
1564static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1565    /* 32 bit TLB invalidates, Inner Shareable */
1566    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1567      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1568    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1569      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1570    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1571      .type = ARM_CP_NO_RAW, .access = PL1_W,
1572      .writefn = tlbiasid_is_write },
1573    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1574      .type = ARM_CP_NO_RAW, .access = PL1_W,
1575      .writefn = tlbimvaa_is_write },
1576    REGINFO_SENTINEL
1577};
1578
1579static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1580                        uint64_t value)
1581{
1582    value &= 1;
1583    env->teecr = value;
1584}
1585
1586static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1587                                    bool isread)
1588{
1589    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1590        return CP_ACCESS_TRAP;
1591    }
1592    return CP_ACCESS_OK;
1593}
1594
1595static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1596    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1597      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1598      .resetvalue = 0,
1599      .writefn = teecr_write },
1600    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1601      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1602      .accessfn = teehbr_access, .resetvalue = 0 },
1603    REGINFO_SENTINEL
1604};
1605
1606static const ARMCPRegInfo v6k_cp_reginfo[] = {
1607    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1608      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1609      .access = PL0_RW,
1610      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1611    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1612      .access = PL0_RW,
1613      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1614                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1615      .resetfn = arm_cp_reset_ignore },
1616    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1617      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1618      .access = PL0_R|PL1_W,
1619      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1620      .resetvalue = 0},
1621    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1622      .access = PL0_R|PL1_W,
1623      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1624                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1625      .resetfn = arm_cp_reset_ignore },
1626    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1627      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1628      .access = PL1_RW,
1629      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1630    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1631      .access = PL1_RW,
1632      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1633                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1634      .resetvalue = 0 },
1635    REGINFO_SENTINEL
1636};
1637
1638#ifndef CONFIG_USER_ONLY
1639
1640static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1641                                       bool isread)
1642{
1643    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1644     * Writable only at the highest implemented exception level.
1645     */
1646    int el = arm_current_el(env);
1647
1648    switch (el) {
1649    case 0:
1650        if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1651            return CP_ACCESS_TRAP;
1652        }
1653        break;
1654    case 1:
1655        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1656            arm_is_secure_below_el3(env)) {
1657            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1658            return CP_ACCESS_TRAP_UNCATEGORIZED;
1659        }
1660        break;
1661    case 2:
1662    case 3:
1663        break;
1664    }
1665
1666    if (!isread && el < arm_highest_el(env)) {
1667        return CP_ACCESS_TRAP_UNCATEGORIZED;
1668    }
1669
1670    return CP_ACCESS_OK;
1671}
1672
1673static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1674                                        bool isread)
1675{
1676    unsigned int cur_el = arm_current_el(env);
1677    bool secure = arm_is_secure(env);
1678
1679    /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1680    if (cur_el == 0 &&
1681        !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1682        return CP_ACCESS_TRAP;
1683    }
1684
1685    if (arm_feature(env, ARM_FEATURE_EL2) &&
1686        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1687        !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1688        return CP_ACCESS_TRAP_EL2;
1689    }
1690    return CP_ACCESS_OK;
1691}
1692
1693static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1694                                      bool isread)
1695{
1696    unsigned int cur_el = arm_current_el(env);
1697    bool secure = arm_is_secure(env);
1698
1699    /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1700     * EL0[PV]TEN is zero.
1701     */
1702    if (cur_el == 0 &&
1703        !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1704        return CP_ACCESS_TRAP;
1705    }
1706
1707    if (arm_feature(env, ARM_FEATURE_EL2) &&
1708        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1709        !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1710        return CP_ACCESS_TRAP_EL2;
1711    }
1712    return CP_ACCESS_OK;
1713}
1714
1715static CPAccessResult gt_pct_access(CPUARMState *env,
1716                                    const ARMCPRegInfo *ri,
1717                                    bool isread)
1718{
1719    return gt_counter_access(env, GTIMER_PHYS, isread);
1720}
1721
1722static CPAccessResult gt_vct_access(CPUARMState *env,
1723                                    const ARMCPRegInfo *ri,
1724                                    bool isread)
1725{
1726    return gt_counter_access(env, GTIMER_VIRT, isread);
1727}
1728
1729static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1730                                       bool isread)
1731{
1732    return gt_timer_access(env, GTIMER_PHYS, isread);
1733}
1734
1735static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1736                                       bool isread)
1737{
1738    return gt_timer_access(env, GTIMER_VIRT, isread);
1739}
1740
1741static CPAccessResult gt_stimer_access(CPUARMState *env,
1742                                       const ARMCPRegInfo *ri,
1743                                       bool isread)
1744{
1745    /* The AArch64 register view of the secure physical timer is
1746     * always accessible from EL3, and configurably accessible from
1747     * Secure EL1.
1748     */
1749    switch (arm_current_el(env)) {
1750    case 1:
1751        if (!arm_is_secure(env)) {
1752            return CP_ACCESS_TRAP;
1753        }
1754        if (!(env->cp15.scr_el3 & SCR_ST)) {
1755            return CP_ACCESS_TRAP_EL3;
1756        }
1757        return CP_ACCESS_OK;
1758    case 0:
1759    case 2:
1760        return CP_ACCESS_TRAP;
1761    case 3:
1762        return CP_ACCESS_OK;
1763    default:
1764        g_assert_not_reached();
1765    }
1766}
1767
1768static uint64_t gt_get_countervalue(CPUARMState *env)
1769{
1770    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1771}
1772
1773static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1774{
1775    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1776
1777    if (gt->ctl & 1) {
1778        /* Timer enabled: calculate and set current ISTATUS, irq, and
1779         * reset timer to when ISTATUS next has to change
1780         */
1781        uint64_t offset = timeridx == GTIMER_VIRT ?
1782                                      cpu->env.cp15.cntvoff_el2 : 0;
1783        uint64_t count = gt_get_countervalue(&cpu->env);
1784        /* Note that this must be unsigned 64 bit arithmetic: */
1785        int istatus = count - offset >= gt->cval;
1786        uint64_t nexttick;
1787        int irqstate;
1788
1789        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1790
1791        irqstate = (istatus && !(gt->ctl & 2));
1792        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1793
1794        if (istatus) {
1795            /* Next transition is when count rolls back over to zero */
1796            nexttick = UINT64_MAX;
1797        } else {
1798            /* Next transition is when we hit cval */
1799            nexttick = gt->cval + offset;
1800        }
1801        /* Note that the desired next expiry time might be beyond the
1802         * signed-64-bit range of a QEMUTimer -- in this case we just
1803         * set the timer for as far in the future as possible. When the
1804         * timer expires we will reset the timer for any remaining period.
1805         */
1806        if (nexttick > INT64_MAX / GTIMER_SCALE) {
1807            nexttick = INT64_MAX / GTIMER_SCALE;
1808        }
1809        timer_mod(cpu->gt_timer[timeridx], nexttick);
1810        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1811    } else {
1812        /* Timer disabled: ISTATUS and timer output always clear */
1813        gt->ctl &= ~4;
1814        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1815        timer_del(cpu->gt_timer[timeridx]);
1816        trace_arm_gt_recalc_disabled(timeridx);
1817    }
1818}
1819
1820static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1821                           int timeridx)
1822{
1823    ARMCPU *cpu = arm_env_get_cpu(env);
1824
1825    timer_del(cpu->gt_timer[timeridx]);
1826}
1827
1828static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829{
1830    return gt_get_countervalue(env);
1831}
1832
1833static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1834{
1835    return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1836}
1837
1838static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1839                          int timeridx,
1840                          uint64_t value)
1841{
1842    trace_arm_gt_cval_write(timeridx, value);
1843    env->cp15.c14_timer[timeridx].cval = value;
1844    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1845}
1846
1847static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1848                             int timeridx)
1849{
1850    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1851
1852    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1853                      (gt_get_countervalue(env) - offset));
1854}
1855
1856static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1857                          int timeridx,
1858                          uint64_t value)
1859{
1860    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1861
1862    trace_arm_gt_tval_write(timeridx, value);
1863    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1864                                         sextract64(value, 0, 32);
1865    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1866}
1867
1868static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869                         int timeridx,
1870                         uint64_t value)
1871{
1872    ARMCPU *cpu = arm_env_get_cpu(env);
1873    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1874
1875    trace_arm_gt_ctl_write(timeridx, value);
1876    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1877    if ((oldval ^ value) & 1) {
1878        /* Enable toggled */
1879        gt_recalc_timer(cpu, timeridx);
1880    } else if ((oldval ^ value) & 2) {
1881        /* IMASK toggled: don't need to recalculate,
1882         * just set the interrupt line based on ISTATUS
1883         */
1884        int irqstate = (oldval & 4) && !(value & 2);
1885
1886        trace_arm_gt_imask_toggle(timeridx, irqstate);
1887        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1888    }
1889}
1890
1891static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1892{
1893    gt_timer_reset(env, ri, GTIMER_PHYS);
1894}
1895
1896static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1897                               uint64_t value)
1898{
1899    gt_cval_write(env, ri, GTIMER_PHYS, value);
1900}
1901
1902static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1903{
1904    return gt_tval_read(env, ri, GTIMER_PHYS);
1905}
1906
1907static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1908                               uint64_t value)
1909{
1910    gt_tval_write(env, ri, GTIMER_PHYS, value);
1911}
1912
1913static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1914                              uint64_t value)
1915{
1916    gt_ctl_write(env, ri, GTIMER_PHYS, value);
1917}
1918
1919static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1920{
1921    gt_timer_reset(env, ri, GTIMER_VIRT);
1922}
1923
1924static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925                               uint64_t value)
1926{
1927    gt_cval_write(env, ri, GTIMER_VIRT, value);
1928}
1929
1930static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1931{
1932    return gt_tval_read(env, ri, GTIMER_VIRT);
1933}
1934
1935static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1936                               uint64_t value)
1937{
1938    gt_tval_write(env, ri, GTIMER_VIRT, value);
1939}
1940
1941static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1942                              uint64_t value)
1943{
1944    gt_ctl_write(env, ri, GTIMER_VIRT, value);
1945}
1946
1947static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1948                              uint64_t value)
1949{
1950    ARMCPU *cpu = arm_env_get_cpu(env);
1951
1952    trace_arm_gt_cntvoff_write(value);
1953    raw_write(env, ri, value);
1954    gt_recalc_timer(cpu, GTIMER_VIRT);
1955}
1956
1957static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1958{
1959    gt_timer_reset(env, ri, GTIMER_HYP);
1960}
1961
1962static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1963                              uint64_t value)
1964{
1965    gt_cval_write(env, ri, GTIMER_HYP, value);
1966}
1967
1968static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1969{
1970    return gt_tval_read(env, ri, GTIMER_HYP);
1971}
1972
1973static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1974                              uint64_t value)
1975{
1976    gt_tval_write(env, ri, GTIMER_HYP, value);
1977}
1978
1979static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1980                              uint64_t value)
1981{
1982    gt_ctl_write(env, ri, GTIMER_HYP, value);
1983}
1984
1985static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1986{
1987    gt_timer_reset(env, ri, GTIMER_SEC);
1988}
1989
1990static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1991                              uint64_t value)
1992{
1993    gt_cval_write(env, ri, GTIMER_SEC, value);
1994}
1995
1996static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1997{
1998    return gt_tval_read(env, ri, GTIMER_SEC);
1999}
2000
2001static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2002                              uint64_t value)
2003{
2004    gt_tval_write(env, ri, GTIMER_SEC, value);
2005}
2006
2007static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2008                              uint64_t value)
2009{
2010    gt_ctl_write(env, ri, GTIMER_SEC, value);
2011}
2012
2013void arm_gt_ptimer_cb(void *opaque)
2014{
2015    ARMCPU *cpu = opaque;
2016
2017    gt_recalc_timer(cpu, GTIMER_PHYS);
2018}
2019
2020void arm_gt_vtimer_cb(void *opaque)
2021{
2022    ARMCPU *cpu = opaque;
2023
2024    gt_recalc_timer(cpu, GTIMER_VIRT);
2025}
2026
2027void arm_gt_htimer_cb(void *opaque)
2028{
2029    ARMCPU *cpu = opaque;
2030
2031    gt_recalc_timer(cpu, GTIMER_HYP);
2032}
2033
2034void arm_gt_stimer_cb(void *opaque)
2035{
2036    ARMCPU *cpu = opaque;
2037
2038    gt_recalc_timer(cpu, GTIMER_SEC);
2039}
2040
2041static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2042    /* Note that CNTFRQ is purely reads-as-written for the benefit
2043     * of software; writing it doesn't actually change the timer frequency.
2044     * Our reset value matches the fixed frequency we implement the timer at.
2045     */
2046    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2047      .type = ARM_CP_ALIAS,
2048      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2049      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2050    },
2051    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2052      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2053      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2054      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2055      .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2056    },
2057    /* overall control: mostly access permissions */
2058    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2059      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2060      .access = PL1_RW,
2061      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2062      .resetvalue = 0,
2063    },
2064    /* per-timer control */
2065    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2066      .secure = ARM_CP_SECSTATE_NS,
2067      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2068      .accessfn = gt_ptimer_access,
2069      .fieldoffset = offsetoflow32(CPUARMState,
2070                                   cp15.c14_timer[GTIMER_PHYS].ctl),
2071      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2072    },
2073    { .name = "CNTP_CTL_S",
2074      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2075      .secure = ARM_CP_SECSTATE_S,
2076      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2077      .accessfn = gt_ptimer_access,
2078      .fieldoffset = offsetoflow32(CPUARMState,
2079                                   cp15.c14_timer[GTIMER_SEC].ctl),
2080      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2081    },
2082    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2083      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2084      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2085      .accessfn = gt_ptimer_access,
2086      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2087      .resetvalue = 0,
2088      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2089    },
2090    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2091      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
2092      .accessfn = gt_vtimer_access,
2093      .fieldoffset = offsetoflow32(CPUARMState,
2094                                   cp15.c14_timer[GTIMER_VIRT].ctl),
2095      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2096    },
2097    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2098      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2099      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
2100      .accessfn = gt_vtimer_access,
2101      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2102      .resetvalue = 0,
2103      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2104    },
2105    /* TimerValue views: a 32 bit downcounting view of the underlying state */
2106    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2107      .secure = ARM_CP_SECSTATE_NS,
2108      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2109      .accessfn = gt_ptimer_access,
2110      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2111    },
2112    { .name = "CNTP_TVAL_S",
2113      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2114      .secure = ARM_CP_SECSTATE_S,
2115      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2116      .accessfn = gt_ptimer_access,
2117      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2118    },
2119    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2120      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2121      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2122      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2123      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2124    },
2125    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2126      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2127      .accessfn = gt_vtimer_access,
2128      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2129    },
2130    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2131      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2132      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2133      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2134      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2135    },
2136    /* The counter itself */
2137    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2138      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2139      .accessfn = gt_pct_access,
2140      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2141    },
2142    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2143      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2144      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2145      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2146    },
2147    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2148      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2149      .accessfn = gt_vct_access,
2150      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2151    },
2152    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2153      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2154      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2155      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2156    },
2157    /* Comparison value, indicating when the timer goes off */
2158    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2159      .secure = ARM_CP_SECSTATE_NS,
2160      .access = PL1_RW | PL0_R,
2161      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2162      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2163      .accessfn = gt_ptimer_access,
2164      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2165    },
2166    { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2167      .secure = ARM_CP_SECSTATE_S,
2168      .access = PL1_RW | PL0_R,
2169      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2170      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2171      .accessfn = gt_ptimer_access,
2172      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2173    },
2174    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2175      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2176      .access = PL1_RW | PL0_R,
2177      .type = ARM_CP_IO,
2178      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2179      .resetvalue = 0, .accessfn = gt_ptimer_access,
2180      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2181    },
2182    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2183      .access = PL1_RW | PL0_R,
2184      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2185      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2186      .accessfn = gt_vtimer_access,
2187      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2188    },
2189    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2190      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2191      .access = PL1_RW | PL0_R,
2192      .type = ARM_CP_IO,
2193      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2194      .resetvalue = 0, .accessfn = gt_vtimer_access,
2195      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2196    },
2197    /* Secure timer -- this is actually restricted to only EL3
2198     * and configurably Secure-EL1 via the accessfn.
2199     */
2200    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2201      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2202      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2203      .accessfn = gt_stimer_access,
2204      .readfn = gt_sec_tval_read,
2205      .writefn = gt_sec_tval_write,
2206      .resetfn = gt_sec_timer_reset,
2207    },
2208    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2209      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2210      .type = ARM_CP_IO, .access = PL1_RW,
2211      .accessfn = gt_stimer_access,
2212      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2213      .resetvalue = 0,
2214      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2215    },
2216    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2217      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2218      .type = ARM_CP_IO, .access = PL1_RW,
2219      .accessfn = gt_stimer_access,
2220      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2221      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2222    },
2223    REGINFO_SENTINEL
2224};
2225
2226#else
2227
2228/* In user-mode most of the generic timer registers are inaccessible
2229 * however modern kernels (4.12+) allow access to cntvct_el0
2230 */
2231
2232static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2233{
2234    /* Currently we have no support for QEMUTimer in linux-user so we
2235     * can't call gt_get_countervalue(env), instead we directly
2236     * call the lower level functions.
2237     */
2238    return cpu_get_clock() / GTIMER_SCALE;
2239}
2240
2241static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2242    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2243      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2244      .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2245      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2246      .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2247    },
2248    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2249      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2250      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2251      .readfn = gt_virt_cnt_read,
2252    },
2253    REGINFO_SENTINEL
2254};
2255
2256#endif
2257
2258static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2259{
2260    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2261        raw_write(env, ri, value);
2262    } else if (arm_feature(env, ARM_FEATURE_V7)) {
2263        raw_write(env, ri, value & 0xfffff6ff);
2264    } else {
2265        raw_write(env, ri, value & 0xfffff1ff);
2266    }
2267}
2268
2269#ifndef CONFIG_USER_ONLY
2270/* get_phys_addr() isn't present for user-mode-only targets */
2271
2272static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2273                                 bool isread)
2274{
2275    if (ri->opc2 & 4) {
2276        /* The ATS12NSO* operations must trap to EL3 if executed in
2277         * Secure EL1 (which can only happen if EL3 is AArch64).
2278         * They are simply UNDEF if executed from NS EL1.
2279         * They function normally from EL2 or EL3.
2280         */
2281        if (arm_current_el(env) == 1) {
2282            if (arm_is_secure_below_el3(env)) {
2283                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2284            }
2285            return CP_ACCESS_TRAP_UNCATEGORIZED;
2286        }
2287    }
2288    return CP_ACCESS_OK;
2289}
2290
2291static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2292                             MMUAccessType access_type, ARMMMUIdx mmu_idx)
2293{
2294    hwaddr phys_addr;
2295    target_ulong page_size;
2296    int prot;
2297    bool ret;
2298    uint64_t par64;
2299    bool format64 = false;
2300    MemTxAttrs attrs = {};
2301    ARMMMUFaultInfo fi = {};
2302    ARMCacheAttrs cacheattrs = {};
2303
2304    ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2305                        &prot, &page_size, &fi, &cacheattrs);
2306
2307    if (is_a64(env)) {
2308        format64 = true;
2309    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2310        /*
2311         * ATS1Cxx:
2312         * * TTBCR.EAE determines whether the result is returned using the
2313         *   32-bit or the 64-bit PAR format
2314         * * Instructions executed in Hyp mode always use the 64bit format
2315         *
2316         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2317         * * The Non-secure TTBCR.EAE bit is set to 1
2318         * * The implementation includes EL2, and the value of HCR.VM is 1
2319         *
2320         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2321         *
2322         * ATS1Hx always uses the 64bit format.
2323         */
2324        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2325
2326        if (arm_feature(env, ARM_FEATURE_EL2)) {
2327            if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2328                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2329            } else {
2330                format64 |= arm_current_el(env) == 2;
2331            }
2332        }
2333    }
2334
2335    if (format64) {
2336        /* Create a 64-bit PAR */
2337        par64 = (1 << 11); /* LPAE bit always set */
2338        if (!ret) {
2339            par64 |= phys_addr & ~0xfffULL;
2340            if (!attrs.secure) {
2341                par64 |= (1 << 9); /* NS */
2342            }
2343            par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2344            par64 |= cacheattrs.shareability << 7; /* SH */
2345        } else {
2346            uint32_t fsr = arm_fi_to_lfsc(&fi);
2347
2348            par64 |= 1; /* F */
2349            par64 |= (fsr & 0x3f) << 1; /* FS */
2350            if (fi.stage2) {
2351                par64 |= (1 << 9); /* S */
2352            }
2353            if (fi.s1ptw) {
2354                par64 |= (1 << 8); /* PTW */
2355            }
2356        }
2357    } else {
2358        /* fsr is a DFSR/IFSR value for the short descriptor
2359         * translation table format (with WnR always clear).
2360         * Convert it to a 32-bit PAR.
2361         */
2362        if (!ret) {
2363            /* We do not set any attribute bits in the PAR */
2364            if (page_size == (1 << 24)
2365                && arm_feature(env, ARM_FEATURE_V7)) {
2366                par64 = (phys_addr & 0xff000000) | (1 << 1);
2367            } else {
2368                par64 = phys_addr & 0xfffff000;
2369            }
2370            if (!attrs.secure) {
2371                par64 |= (1 << 9); /* NS */
2372            }
2373        } else {
2374            uint32_t fsr = arm_fi_to_sfsc(&fi);
2375
2376            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2377                    ((fsr & 0xf) << 1) | 1;
2378        }
2379    }
2380    return par64;
2381}
2382
2383static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2384{
2385    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2386    uint64_t par64;
2387    ARMMMUIdx mmu_idx;
2388    int el = arm_current_el(env);
2389    bool secure = arm_is_secure_below_el3(env);
2390
2391    switch (ri->opc2 & 6) {
2392    case 0:
2393        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2394        switch (el) {
2395        case 3:
2396            mmu_idx = ARMMMUIdx_S1E3;
2397            break;
2398        case 2:
2399            mmu_idx = ARMMMUIdx_S1NSE1;
2400            break;
2401        case 1:
2402            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2403            break;
2404        default:
2405            g_assert_not_reached();
2406        }
2407        break;
2408    case 2:
2409        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2410        switch (el) {
2411        case 3:
2412            mmu_idx = ARMMMUIdx_S1SE0;
2413            break;
2414        case 2:
2415            mmu_idx = ARMMMUIdx_S1NSE0;
2416            break;
2417        case 1:
2418            mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2419            break;
2420        default:
2421            g_assert_not_reached();
2422        }
2423        break;
2424    case 4:
2425        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2426        mmu_idx = ARMMMUIdx_S12NSE1;
2427        break;
2428    case 6:
2429        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2430        mmu_idx = ARMMMUIdx_S12NSE0;
2431        break;
2432    default:
2433        g_assert_not_reached();
2434    }
2435
2436    par64 = do_ats_write(env, value, access_type, mmu_idx);
2437
2438    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2439}
2440
2441static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2442                        uint64_t value)
2443{
2444    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2445    uint64_t par64;
2446
2447    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
2448
2449    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2450}
2451
2452static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2453                                     bool isread)
2454{
2455    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2456        return CP_ACCESS_TRAP;
2457    }
2458    return CP_ACCESS_OK;
2459}
2460
2461static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2462                        uint64_t value)
2463{
2464    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
2465    ARMMMUIdx mmu_idx;
2466    int secure = arm_is_secure_below_el3(env);
2467
2468    switch (ri->opc2 & 6) {
2469    case 0:
2470        switch (ri->opc1) {
2471        case 0: /* AT S1E1R, AT S1E1W */
2472            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2473            break;
2474        case 4: /* AT S1E2R, AT S1E2W */
2475            mmu_idx = ARMMMUIdx_S1E2;
2476            break;
2477        case 6: /* AT S1E3R, AT S1E3W */
2478            mmu_idx = ARMMMUIdx_S1E3;
2479            break;
2480        default:
2481            g_assert_not_reached();
2482        }
2483        break;
2484    case 2: /* AT S1E0R, AT S1E0W */
2485        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2486        break;
2487    case 4: /* AT S12E1R, AT S12E1W */
2488        mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2489        break;
2490    case 6: /* AT S12E0R, AT S12E0W */
2491        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2492        break;
2493    default:
2494        g_assert_not_reached();
2495    }
2496
2497    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2498}
2499#endif
2500
2501static const ARMCPRegInfo vapa_cp_reginfo[] = {
2502    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2503      .access = PL1_RW, .resetvalue = 0,
2504      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2505                             offsetoflow32(CPUARMState, cp15.par_ns) },
2506      .writefn = par_write },
2507#ifndef CONFIG_USER_ONLY
2508    /* This underdecoding is safe because the reginfo is NO_RAW. */
2509    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2510      .access = PL1_W, .accessfn = ats_access,
2511      .writefn = ats_write, .type = ARM_CP_NO_RAW },
2512#endif
2513    REGINFO_SENTINEL
2514};
2515
2516/* Return basic MPU access permission bits.  */
2517static uint32_t simple_mpu_ap_bits(uint32_t val)
2518{
2519    uint32_t ret;
2520    uint32_t mask;
2521    int i;
2522    ret = 0;
2523    mask = 3;
2524    for (i = 0; i < 16; i += 2) {
2525        ret |= (val >> i) & mask;
2526        mask <<= 2;
2527    }
2528    return ret;
2529}
2530
2531/* Pad basic MPU access permission bits to extended format.  */
2532static uint32_t extended_mpu_ap_bits(uint32_t val)
2533{
2534    uint32_t ret;
2535    uint32_t mask;
2536    int i;
2537    ret = 0;
2538    mask = 3;
2539    for (i = 0; i < 16; i += 2) {
2540        ret |= (val & mask) << i;
2541        mask <<= 2;
2542    }
2543    return ret;
2544}
2545
2546static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2547                                 uint64_t value)
2548{
2549    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2550}
2551
2552static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2553{
2554    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2555}
2556
2557static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2558                                 uint64_t value)
2559{
2560    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2561}
2562
2563static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2564{
2565    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2566}
2567
2568static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2569{
2570    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2571
2572    if (!u32p) {
2573        return 0;
2574    }
2575
2576    u32p += env->pmsav7.rnr[M_REG_NS];
2577    return *u32p;
2578}
2579
2580static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2581                         uint64_t value)
2582{
2583    ARMCPU *cpu = arm_env_get_cpu(env);
2584    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2585
2586    if (!u32p) {
2587        return;
2588    }
2589
2590    u32p += env->pmsav7.rnr[M_REG_NS];
2591    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2592    *u32p = value;
2593}
2594
2595static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2596                              uint64_t value)
2597{
2598    ARMCPU *cpu = arm_env_get_cpu(env);
2599    uint32_t nrgs = cpu->pmsav7_dregion;
2600
2601    if (value >= nrgs) {
2602        qemu_log_mask(LOG_GUEST_ERROR,
2603                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2604                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2605        return;
2606    }
2607
2608    raw_write(env, ri, value);
2609}
2610
2611static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2612    /* Reset for all these registers is handled in arm_cpu_reset(),
2613     * because the PMSAv7 is also used by M-profile CPUs, which do
2614     * not register cpregs but still need the state to be reset.
2615     */
2616    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2617      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2618      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2619      .readfn = pmsav7_read, .writefn = pmsav7_write,
2620      .resetfn = arm_cp_reset_ignore },
2621    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2622      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2623      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2624      .readfn = pmsav7_read, .writefn = pmsav7_write,
2625      .resetfn = arm_cp_reset_ignore },
2626    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2627      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2628      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2629      .readfn = pmsav7_read, .writefn = pmsav7_write,
2630      .resetfn = arm_cp_reset_ignore },
2631    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2632      .access = PL1_RW,
2633      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
2634      .writefn = pmsav7_rgnr_write,
2635      .resetfn = arm_cp_reset_ignore },
2636    REGINFO_SENTINEL
2637};
2638
2639static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2640    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2641      .access = PL1_RW, .type = ARM_CP_ALIAS,
2642      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2643      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2644    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2645      .access = PL1_RW, .type = ARM_CP_ALIAS,
2646      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2647      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2648    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2649      .access = PL1_RW,
2650      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2651      .resetvalue = 0, },
2652    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2653      .access = PL1_RW,
2654      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2655      .resetvalue = 0, },
2656    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2657      .access = PL1_RW,
2658      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2659    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2660      .access = PL1_RW,
2661      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2662    /* Protection region base and size registers */
2663    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2664      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2665      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2666    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2667      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2668      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2669    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2670      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2671      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2672    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2673      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2674      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2675    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2676      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2677      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2678    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2679      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2680      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2681    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2682      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2683      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2684    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2685      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2686      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2687    REGINFO_SENTINEL
2688};
2689
2690static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2691                                 uint64_t value)
2692{
2693    TCR *tcr = raw_ptr(env, ri);
2694    int maskshift = extract32(value, 0, 3);
2695
2696    if (!arm_feature(env, ARM_FEATURE_V8)) {
2697        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2698            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2699             * using Long-desciptor translation table format */
2700            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2701        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2702            /* In an implementation that includes the Security Extensions
2703             * TTBCR has additional fields PD0 [4] and PD1 [5] for
2704             * Short-descriptor translation table format.
2705             */
2706            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2707        } else {
2708            value &= TTBCR_N;
2709        }
2710    }
2711
2712    /* Update the masks corresponding to the TCR bank being written
2713     * Note that we always calculate mask and base_mask, but
2714     * they are only used for short-descriptor tables (ie if EAE is 0);
2715     * for long-descriptor tables the TCR fields are used differently
2716     * and the mask and base_mask values are meaningless.
2717     */
2718    tcr->raw_tcr = value;
2719    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2720    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2721}
2722
2723static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2724                             uint64_t value)
2725{
2726    ARMCPU *cpu = arm_env_get_cpu(env);
2727
2728    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2729        /* With LPAE the TTBCR could result in a change of ASID
2730         * via the TTBCR.A1 bit, so do a TLB flush.
2731         */
2732        tlb_flush(CPU(cpu));
2733    }
2734    vmsa_ttbcr_raw_write(env, ri, value);
2735}
2736
2737static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2738{
2739    TCR *tcr = raw_ptr(env, ri);
2740
2741    /* Reset both the TCR as well as the masks corresponding to the bank of
2742     * the TCR being reset.
2743     */
2744    tcr->raw_tcr = 0;
2745    tcr->mask = 0;
2746    tcr->base_mask = 0xffffc000u;
2747}
2748
2749static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2750                               uint64_t value)
2751{
2752    ARMCPU *cpu = arm_env_get_cpu(env);
2753    TCR *tcr = raw_ptr(env, ri);
2754
2755    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2756    tlb_flush(CPU(cpu));
2757    tcr->raw_tcr = value;
2758}
2759
2760static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2761                            uint64_t value)
2762{
2763    /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
2764    if (cpreg_field_is_64bit(ri) &&
2765        extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
2766        ARMCPU *cpu = arm_env_get_cpu(env);
2767        tlb_flush(CPU(cpu));
2768    }
2769    raw_write(env, ri, value);
2770}
2771
2772static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2773                        uint64_t value)
2774{
2775    ARMCPU *cpu = arm_env_get_cpu(env);
2776    CPUState *cs = CPU(cpu);
2777
2778    /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2779    if (raw_read(env, ri) != value) {
2780        tlb_flush_by_mmuidx(cs,
2781                            ARMMMUIdxBit_S12NSE1 |
2782                            ARMMMUIdxBit_S12NSE0 |
2783                            ARMMMUIdxBit_S2NS);
2784        raw_write(env, ri, value);
2785    }
2786}
2787
2788static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2789    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2790      .access = PL1_RW, .type = ARM_CP_ALIAS,
2791      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2792                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2793    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2794      .access = PL1_RW, .resetvalue = 0,
2795      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2796                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2797    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2798      .access = PL1_RW, .resetvalue = 0,
2799      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2800                             offsetof(CPUARMState, cp15.dfar_ns) } },
2801    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2802      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2803      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2804      .resetvalue = 0, },
2805    REGINFO_SENTINEL
2806};
2807
2808static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2809    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2810      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2811      .access = PL1_RW,
2812      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2813    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2814      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2815      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2816      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2817                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
2818    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2819      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2820      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2821      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2822                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
2823    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2824      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2825      .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2826      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2827      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2828    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2829      .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2830      .raw_writefn = vmsa_ttbcr_raw_write,
2831      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2832                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2833    REGINFO_SENTINEL
2834};
2835
2836static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2837                                uint64_t value)
2838{
2839    env->cp15.c15_ticonfig = value & 0xe7;
2840    /* The OS_TYPE bit in this register changes the reported CPUID! */
2841    env->cp15.c0_cpuid = (value & (1 << 5)) ?
2842        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2843}
2844
2845static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2846                                uint64_t value)
2847{
2848    env->cp15.c15_threadid = value & 0xffff;
2849}
2850
2851static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2852                           uint64_t value)
2853{
2854    /* Wait-for-interrupt (deprecated) */
2855    cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2856}
2857
2858static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2859                                  uint64_t value)
2860{
2861    /* On OMAP there are registers indicating the max/min index of dcache lines
2862     * containing a dirty line; cache flush operations have to reset these.
2863     */
2864    env->cp15.c15_i_max = 0x000;
2865    env->cp15.c15_i_min = 0xff0;
2866}
2867
2868static const ARMCPRegInfo omap_cp_reginfo[] = {
2869    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2870      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2871      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2872      .resetvalue = 0, },
2873    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2874      .access = PL1_RW, .type = ARM_CP_NOP },
2875    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2876      .access = PL1_RW,
2877      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2878      .writefn = omap_ticonfig_write },
2879    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2880      .access = PL1_RW,
2881      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2882    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2883      .access = PL1_RW, .resetvalue = 0xff0,
2884      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2885    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2886      .access = PL1_RW,
2887      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2888      .writefn = omap_threadid_write },
2889    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2890      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2891      .type = ARM_CP_NO_RAW,
2892      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2893    /* TODO: Peripheral port remap register:
2894     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2895     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2896     * when MMU is off.
2897     */
2898    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2899      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2900      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2901      .writefn = omap_cachemaint_write },
2902    { .name = "C9", .cp = 15, .crn = 9,
2903      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2904      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2905    REGINFO_SENTINEL
2906};
2907
2908static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909                              uint64_t value)
2910{
2911    env->cp15.c15_cpar = value & 0x3fff;
2912}
2913
2914static const ARMCPRegInfo xscale_cp_reginfo[] = {
2915    { .name = "XSCALE_CPAR",
2916      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2917      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2918      .writefn = xscale_cpar_write, },
2919    { .name = "XSCALE_AUXCR",
2920      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2921      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2922      .resetvalue = 0, },
2923    /* XScale specific cache-lockdown: since we have no cache we NOP these
2924     * and hope the guest does not really rely on cache behaviour.
2925     */
2926    { .name = "XSCALE_LOCK_ICACHE_LINE",
2927      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2928      .access = PL1_W, .type = ARM_CP_NOP },
2929    { .name = "XSCALE_UNLOCK_ICACHE",
2930      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2931      .access = PL1_W, .type = ARM_CP_NOP },
2932    { .name = "XSCALE_DCACHE_LOCK",
2933      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2934      .access = PL1_RW, .type = ARM_CP_NOP },
2935    { .name = "XSCALE_UNLOCK_DCACHE",
2936      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2937      .access = PL1_W, .type = ARM_CP_NOP },
2938    REGINFO_SENTINEL
2939};
2940
2941static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2942    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2943     * implementation of this implementation-defined space.
2944     * Ideally this should eventually disappear in favour of actually
2945     * implementing the correct behaviour for all cores.
2946     */
2947    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2948      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2949      .access = PL1_RW,
2950      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2951      .resetvalue = 0 },
2952    REGINFO_SENTINEL
2953};
2954
2955static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2956    /* Cache status: RAZ because we have no cache so it's always clean */
2957    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2958      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2959      .resetvalue = 0 },
2960    REGINFO_SENTINEL
2961};
2962
2963static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2964    /* We never have a a block transfer operation in progress */
2965    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2966      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2967      .resetvalue = 0 },
2968    /* The cache ops themselves: these all NOP for QEMU */
2969    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2970      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2971    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2972      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2973    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2974      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2975    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2976      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2977    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2978      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2979    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2980      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2981    REGINFO_SENTINEL
2982};
2983
2984static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2985    /* The cache test-and-clean instructions always return (1 << 30)
2986     * to indicate that there are no dirty cache lines.
2987     */
2988    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2989      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2990      .resetvalue = (1 << 30) },
2991    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2992      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2993      .resetvalue = (1 << 30) },
2994    REGINFO_SENTINEL
2995};
2996
2997static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2998    /* Ignore ReadBuffer accesses */
2999    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3000      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3001      .access = PL1_RW, .resetvalue = 0,
3002      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3003    REGINFO_SENTINEL
3004};
3005
3006static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3007{
3008    ARMCPU *cpu = arm_env_get_cpu(env);
3009    unsigned int cur_el = arm_current_el(env);
3010    bool secure = arm_is_secure(env);
3011
3012    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3013        return env->cp15.vpidr_el2;
3014    }
3015    return raw_read(env, ri);
3016}
3017
3018static uint64_t mpidr_read_val(CPUARMState *env)
3019{
3020    ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
3021    uint64_t mpidr = cpu->mp_affinity;
3022
3023    if (arm_feature(env, ARM_FEATURE_V7MP)) {
3024        mpidr |= (1U << 31);
3025        /* Cores which are uniprocessor (non-coherent)
3026         * but still implement the MP extensions set
3027         * bit 30. (For instance, Cortex-R5).
3028         */
3029        if (cpu->mp_is_up) {
3030            mpidr |= (1u << 30);
3031        }
3032    }
3033    return mpidr;
3034}
3035
3036static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3037{
3038    unsigned int cur_el = arm_current_el(env);
3039    bool secure = arm_is_secure(env);
3040
3041    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3042        return env->cp15.vmpidr_el2;
3043    }
3044    return mpidr_read_val(env);
3045}
3046
3047static const ARMCPRegInfo mpidr_cp_reginfo[] = {
3048    { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
3049      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
3050      .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
3051    REGINFO_SENTINEL
3052};
3053
3054static const ARMCPRegInfo lpae_cp_reginfo[] = {
3055    /* NOP AMAIR0/1 */
3056    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3057      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3058      .access = PL1_RW, .type = ARM_CP_CONST,
3059      .resetvalue = 0 },
3060    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3061    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3062      .access = PL1_RW, .type = ARM_CP_CONST,
3063      .resetvalue = 0 },
3064    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3065      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3066      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3067                             offsetof(CPUARMState, cp15.par_ns)} },
3068    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3069      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3070      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3071                             offsetof(CPUARMState, cp15.ttbr0_ns) },
3072      .writefn = vmsa_ttbr_write, },
3073    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3074      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3075      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3076                             offsetof(CPUARMState, cp15.ttbr1_ns) },
3077      .writefn = vmsa_ttbr_write, },
3078    REGINFO_SENTINEL
3079};
3080
3081static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3082{
3083    return vfp_get_fpcr(env);
3084}
3085
3086static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3087                            uint64_t value)
3088{
3089    vfp_set_fpcr(env, value);
3090}
3091
3092static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3093{
3094    return vfp_get_fpsr(env);
3095}
3096
3097static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3098                            uint64_t value)
3099{
3100    vfp_set_fpsr(env, value);
3101}
3102
3103static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3104                                       bool isread)
3105{
3106    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3107        return CP_ACCESS_TRAP;
3108    }
3109    return CP_ACCESS_OK;
3110}
3111
3112static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3113                            uint64_t value)
3114{
3115    env->daif = value & PSTATE_DAIF;
3116}
3117
3118static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3119                                          const ARMCPRegInfo *ri,
3120                                          bool isread)
3121{
3122    /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3123     * SCTLR_EL1.UCI is set.
3124     */
3125    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3126        return CP_ACCESS_TRAP;
3127    }
3128    return CP_ACCESS_OK;
3129}
3130
3131/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3132 * Page D4-1736 (DDI0487A.b)
3133 */
3134
3135static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3136                                      uint64_t value)
3137{
3138    CPUState *cs = ENV_GET_CPU(env);
3139    bool sec = arm_is_secure_below_el3(env);
3140
3141    if (sec) {
3142        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3143                                            ARMMMUIdxBit_S1SE1 |
3144                                            ARMMMUIdxBit_S1SE0);
3145    } else {
3146        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3147                                            ARMMMUIdxBit_S12NSE1 |
3148                                            ARMMMUIdxBit_S12NSE0);
3149    }
3150}
3151
3152static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3153                                    uint64_t value)
3154{
3155    CPUState *cs = ENV_GET_CPU(env);
3156
3157    if (tlb_force_broadcast(env)) {
3158        tlbi_aa64_vmalle1is_write(env, NULL, value);
3159        return;
3160    }
3161
3162    if (arm_is_secure_below_el3(env)) {
3163        tlb_flush_by_mmuidx(cs,
3164                            ARMMMUIdxBit_S1SE1 |
3165                            ARMMMUIdxBit_S1SE0);
3166    } else {
3167        tlb_flush_by_mmuidx(cs,
3168                            ARMMMUIdxBit_S12NSE1 |
3169                            ARMMMUIdxBit_S12NSE0);
3170    }
3171}
3172
3173static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3174                                  uint64_t value)
3175{
3176    /* Note that the 'ALL' scope must invalidate both stage 1 and
3177     * stage 2 translations, whereas most other scopes only invalidate
3178     * stage 1 translations.
3179     */
3180    ARMCPU *cpu = arm_env_get_cpu(env);
3181    CPUState *cs = CPU(cpu);
3182
3183    if (arm_is_secure_below_el3(env)) {
3184        tlb_flush_by_mmuidx(cs,
3185                            ARMMMUIdxBit_S1SE1 |
3186                            ARMMMUIdxBit_S1SE0);
3187    } else {
3188        if (arm_feature(env, ARM_FEATURE_EL2)) {
3189            tlb_flush_by_mmuidx(cs,
3190                                ARMMMUIdxBit_S12NSE1 |
3191                                ARMMMUIdxBit_S12NSE0 |
3192                                ARMMMUIdxBit_S2NS);
3193        } else {
3194            tlb_flush_by_mmuidx(cs,
3195                                ARMMMUIdxBit_S12NSE1 |
3196                                ARMMMUIdxBit_S12NSE0);
3197        }
3198    }
3199}
3200
3201static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3202                                  uint64_t value)
3203{
3204    ARMCPU *cpu = arm_env_get_cpu(env);
3205    CPUState *cs = CPU(cpu);
3206
3207    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3208}
3209
3210static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3211                                  uint64_t value)
3212{
3213    ARMCPU *cpu = arm_env_get_cpu(env);
3214    CPUState *cs = CPU(cpu);
3215
3216    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3217}
3218
3219static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3220                                    uint64_t value)
3221{
3222    /* Note that the 'ALL' scope must invalidate both stage 1 and
3223     * stage 2 translations, whereas most other scopes only invalidate
3224     * stage 1 translations.
3225     */
3226    CPUState *cs = ENV_GET_CPU(env);
3227    bool sec = arm_is_secure_below_el3(env);
3228    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3229
3230    if (sec) {
3231        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3232                                            ARMMMUIdxBit_S1SE1 |
3233                                            ARMMMUIdxBit_S1SE0);
3234    } else if (has_el2) {
3235        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3236                                            ARMMMUIdxBit_S12NSE1 |
3237                                            ARMMMUIdxBit_S12NSE0 |
3238                                            ARMMMUIdxBit_S2NS);
3239    } else {
3240          tlb_flush_by_mmuidx_all_cpus_synced(cs,
3241                                              ARMMMUIdxBit_S12NSE1 |
3242                                              ARMMMUIdxBit_S12NSE0);
3243    }
3244}
3245
3246static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3247                                    uint64_t value)
3248{
3249    CPUState *cs = ENV_GET_CPU(env);
3250
3251    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3252}
3253
3254static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3255                                    uint64_t value)
3256{
3257    CPUState *cs = ENV_GET_CPU(env);
3258
3259    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3260}
3261
3262static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3263                                 uint64_t value)
3264{
3265    /* Invalidate by VA, EL2
3266     * Currently handles both VAE2 and VALE2, since we don't support
3267     * flush-last-level-only.
3268     */
3269    ARMCPU *cpu = arm_env_get_cpu(env);
3270    CPUState *cs = CPU(cpu);
3271    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3272
3273    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3274}
3275
3276static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3277                                 uint64_t value)
3278{
3279    /* Invalidate by VA, EL3
3280     * Currently handles both VAE3 and VALE3, since we don't support
3281     * flush-last-level-only.
3282     */
3283    ARMCPU *cpu = arm_env_get_cpu(env);
3284    CPUState *cs = CPU(cpu);
3285    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3286
3287    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3288}
3289
3290static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3291                                   uint64_t value)
3292{
3293    ARMCPU *cpu = arm_env_get_cpu(env);
3294    CPUState *cs = CPU(cpu);
3295    bool sec = arm_is_secure_below_el3(env);
3296    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3297
3298    if (sec) {
3299        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3300                                                 ARMMMUIdxBit_S1SE1 |
3301                                                 ARMMMUIdxBit_S1SE0);
3302    } else {
3303        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3304                                                 ARMMMUIdxBit_S12NSE1 |
3305                                                 ARMMMUIdxBit_S12NSE0);
3306    }
3307}
3308
3309static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3310                                 uint64_t value)
3311{
3312    /* Invalidate by VA, EL1&0 (AArch64 version).
3313     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3314     * since we don't support flush-for-specific-ASID-only or
3315     * flush-last-level-only.
3316     */
3317    ARMCPU *cpu = arm_env_get_cpu(env);
3318    CPUState *cs = CPU(cpu);
3319    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3320
3321    if (tlb_force_broadcast(env)) {
3322        tlbi_aa64_vae1is_write(env, NULL, value);
3323        return;
3324    }
3325
3326    if (arm_is_secure_below_el3(env)) {
3327        tlb_flush_page_by_mmuidx(cs, pageaddr,
3328                                 ARMMMUIdxBit_S1SE1 |
3329                                 ARMMMUIdxBit_S1SE0);
3330    } else {
3331        tlb_flush_page_by_mmuidx(cs, pageaddr,
3332                                 ARMMMUIdxBit_S12NSE1 |
3333                                 ARMMMUIdxBit_S12NSE0);
3334    }
3335}
3336
3337static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3338                                   uint64_t value)
3339{
3340    CPUState *cs = ENV_GET_CPU(env);
3341    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3342
3343    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3344                                             ARMMMUIdxBit_S1E2);
3345}
3346
3347static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3348                                   uint64_t value)
3349{
3350    CPUState *cs = ENV_GET_CPU(env);
3351    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3352
3353    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3354                                             ARMMMUIdxBit_S1E3);
3355}
3356
3357static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3358                                    uint64_t value)
3359{
3360    /* Invalidate by IPA. This has to invalidate any structures that
3361     * contain only stage 2 translation information, but does not need
3362     * to apply to structures that contain combined stage 1 and stage 2
3363     * translation information.
3364     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3365     */
3366    ARMCPU *cpu = arm_env_get_cpu(env);
3367    CPUState *cs = CPU(cpu);
3368    uint64_t pageaddr;
3369
3370    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3371        return;
3372    }
3373
3374    pageaddr = sextract64(value << 12, 0, 48);
3375
3376    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3377}
3378
3379static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3380                                      uint64_t value)
3381{
3382    CPUState *cs = ENV_GET_CPU(env);
3383    uint64_t pageaddr;
3384
3385    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3386        return;
3387    }
3388
3389    pageaddr = sextract64(value << 12, 0, 48);
3390
3391    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3392                                             ARMMMUIdxBit_S2NS);
3393}
3394
3395static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3396                                      bool isread)
3397{
3398    /* We don't implement EL2, so the only control on DC ZVA is the
3399     * bit in the SCTLR which can prohibit access for EL0.
3400     */
3401    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3402        return CP_ACCESS_TRAP;
3403    }
3404    return CP_ACCESS_OK;
3405}
3406
3407static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3408{
3409    ARMCPU *cpu = arm_env_get_cpu(env);
3410    int dzp_bit = 1 << 4;
3411
3412    /* DZP indicates whether DC ZVA access is allowed */
3413    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3414        dzp_bit = 0;
3415    }
3416    return cpu->dcz_blocksize | dzp_bit;
3417}
3418
3419static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3420                                    bool isread)
3421{
3422    if (!(env->pstate & PSTATE_SP)) {
3423        /* Access to SP_EL0 is undefined if it's being used as
3424         * the stack pointer.
3425         */
3426        return CP_ACCESS_TRAP_UNCATEGORIZED;
3427    }
3428    return CP_ACCESS_OK;
3429}
3430
3431static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3432{
3433    return env->pstate & PSTATE_SP;
3434}
3435
3436static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3437{
3438    update_spsel(env, val);
3439}
3440
3441static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3442                        uint64_t value)
3443{
3444    ARMCPU *cpu = arm_env_get_cpu(env);
3445
3446    if (raw_read(env, ri) == value) {
3447        /* Skip the TLB flush if nothing actually changed; Linux likes
3448         * to do a lot of pointless SCTLR writes.
3449         */
3450        return;
3451    }
3452
3453    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3454        /* M bit is RAZ/WI for PMSA with no MPU implemented */
3455        value &= ~SCTLR_M;
3456    }
3457
3458    raw_write(env, ri, value);
3459    /* ??? Lots of these bits are not implemented.  */
3460    /* This may enable/disable the MMU, so do a TLB flush.  */
3461    tlb_flush(CPU(cpu));
3462}
3463
3464static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3465                                     bool isread)
3466{
3467    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3468        return CP_ACCESS_TRAP_FP_EL2;
3469    }
3470    if (env->cp15.cptr_el[3] & CPTR_TFP) {
3471        return CP_ACCESS_TRAP_FP_EL3;
3472    }
3473    return CP_ACCESS_OK;
3474}
3475
3476static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3477                       uint64_t value)
3478{
3479    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3480}
3481
3482static const ARMCPRegInfo v8_cp_reginfo[] = {
3483    /* Minimal set of EL0-visible registers. This will need to be expanded
3484     * significantly for system emulation of AArch64 CPUs.
3485     */
3486    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3487      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3488      .access = PL0_RW, .type = ARM_CP_NZCV },
3489    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3490      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3491      .type = ARM_CP_NO_RAW,
3492      .access = PL0_RW, .accessfn = aa64_daif_access,
3493      .fieldoffset = offsetof(CPUARMState, daif),
3494      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3495    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3496      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3497      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3498      .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3499    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3500      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3501      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
3502      .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3503    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3504      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3505      .access = PL0_R, .type = ARM_CP_NO_RAW,
3506      .readfn = aa64_dczid_read },
3507    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3508      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3509      .access = PL0_W, .type = ARM_CP_DC_ZVA,
3510#ifndef CONFIG_USER_ONLY
3511      /* Avoid overhead of an access check that always passes in user-mode */
3512      .accessfn = aa64_zva_access,
3513#endif
3514    },
3515    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3516      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3517      .access = PL1_R, .type = ARM_CP_CURRENTEL },
3518    /* Cache ops: all NOPs since we don't emulate caches */
3519    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3520      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3521      .access = PL1_W, .type = ARM_CP_NOP },
3522    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3523      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3524      .access = PL1_W, .type = ARM_CP_NOP },
3525    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3526      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3527      .access = PL0_W, .type = ARM_CP_NOP,
3528      .accessfn = aa64_cacheop_access },
3529    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3530      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3531      .access = PL1_W, .type = ARM_CP_NOP },
3532    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3533      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3534      .access = PL1_W, .type = ARM_CP_NOP },
3535    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3536      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3537      .access = PL0_W, .type = ARM_CP_NOP,
3538      .accessfn = aa64_cacheop_access },
3539    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3540      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3541      .access = PL1_W, .type = ARM_CP_NOP },
3542    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3543      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3544      .access = PL0_W, .type = ARM_CP_NOP,
3545      .accessfn = aa64_cacheop_access },
3546    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3547      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3548      .access = PL0_W, .type = ARM_CP_NOP,
3549      .accessfn = aa64_cacheop_access },
3550    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3551      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3552      .access = PL1_W, .type = ARM_CP_NOP },
3553    /* TLBI operations */
3554    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3555      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3556      .access = PL1_W, .type = ARM_CP_NO_RAW,
3557      .writefn = tlbi_aa64_vmalle1is_write },
3558    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3559      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3560      .access = PL1_W, .type = ARM_CP_NO_RAW,
3561      .writefn = tlbi_aa64_vae1is_write },
3562    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3563      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3564      .access = PL1_W, .type = ARM_CP_NO_RAW,
3565      .writefn = tlbi_aa64_vmalle1is_write },
3566    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3567      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3568      .access = PL1_W, .type = ARM_CP_NO_RAW,
3569      .writefn = tlbi_aa64_vae1is_write },
3570    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3571      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3572      .access = PL1_W, .type = ARM_CP_NO_RAW,
3573      .writefn = tlbi_aa64_vae1is_write },
3574    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3575      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3576      .access = PL1_W, .type = ARM_CP_NO_RAW,
3577      .writefn = tlbi_aa64_vae1is_write },
3578    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3579      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3580      .access = PL1_W, .type = ARM_CP_NO_RAW,
3581      .writefn = tlbi_aa64_vmalle1_write },
3582    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3583      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3584      .access = PL1_W, .type = ARM_CP_NO_RAW,
3585      .writefn = tlbi_aa64_vae1_write },
3586    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3587      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3588      .access = PL1_W, .type = ARM_CP_NO_RAW,
3589      .writefn = tlbi_aa64_vmalle1_write },
3590    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3591      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3592      .access = PL1_W, .type = ARM_CP_NO_RAW,
3593      .writefn = tlbi_aa64_vae1_write },
3594    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3595      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3596      .access = PL1_W, .type = ARM_CP_NO_RAW,
3597      .writefn = tlbi_aa64_vae1_write },
3598    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3599      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3600      .access = PL1_W, .type = ARM_CP_NO_RAW,
3601      .writefn = tlbi_aa64_vae1_write },
3602    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3603      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3604      .access = PL2_W, .type = ARM_CP_NO_RAW,
3605      .writefn = tlbi_aa64_ipas2e1is_write },
3606    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3607      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3608      .access = PL2_W, .type = ARM_CP_NO_RAW,
3609      .writefn = tlbi_aa64_ipas2e1is_write },
3610    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3611      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3612      .access = PL2_W, .type = ARM_CP_NO_RAW,
3613      .writefn = tlbi_aa64_alle1is_write },
3614    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3615      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3616      .access = PL2_W, .type = ARM_CP_NO_RAW,
3617      .writefn = tlbi_aa64_alle1is_write },
3618    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3619      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3620      .access = PL2_W, .type = ARM_CP_NO_RAW,
3621      .writefn = tlbi_aa64_ipas2e1_write },
3622    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3623      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3624      .access = PL2_W, .type = ARM_CP_NO_RAW,
3625      .writefn = tlbi_aa64_ipas2e1_write },
3626    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3627      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3628      .access = PL2_W, .type = ARM_CP_NO_RAW,
3629      .writefn = tlbi_aa64_alle1_write },
3630    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3631      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3632      .access = PL2_W, .type = ARM_CP_NO_RAW,
3633      .writefn = tlbi_aa64_alle1is_write },
3634#ifndef CONFIG_USER_ONLY
3635    /* 64 bit address translation operations */
3636    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3637      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3638      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3639    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3640      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3641      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3642    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3643      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3644      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3645    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3646      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3647      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3648    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3649      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3650      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3651    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3652      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3653      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3654    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3655      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3656      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3657    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3658      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3659      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3660    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3661    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3662      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3663      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3664    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3665      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3666      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3667    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3668      .type = ARM_CP_ALIAS,
3669      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3670      .access = PL1_RW, .resetvalue = 0,
3671      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3672      .writefn = par_write },
3673#endif
3674    /* TLB invalidate last level of translation table walk */
3675    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3676      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3677    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3678      .type = ARM_CP_NO_RAW, .access = PL1_W,
3679      .writefn = tlbimvaa_is_write },
3680    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3681      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3682    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3683      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3684    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3685      .type = ARM_CP_NO_RAW, .access = PL2_W,
3686      .writefn = tlbimva_hyp_write },
3687    { .name = "TLBIMVALHIS",
3688      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3689      .type = ARM_CP_NO_RAW, .access = PL2_W,
3690      .writefn = tlbimva_hyp_is_write },
3691    { .name = "TLBIIPAS2",
3692      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3693      .type = ARM_CP_NO_RAW, .access = PL2_W,
3694      .writefn = tlbiipas2_write },
3695    { .name = "TLBIIPAS2IS",
3696      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3697      .type = ARM_CP_NO_RAW, .access = PL2_W,
3698      .writefn = tlbiipas2_is_write },
3699    { .name = "TLBIIPAS2L",
3700      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3701      .type = ARM_CP_NO_RAW, .access = PL2_W,
3702      .writefn = tlbiipas2_write },
3703    { .name = "TLBIIPAS2LIS",
3704      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3705      .type = ARM_CP_NO_RAW, .access = PL2_W,
3706      .writefn = tlbiipas2_is_write },
3707    /* 32 bit cache operations */
3708    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3709      .type = ARM_CP_NOP, .access = PL1_W },
3710    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3711      .type = ARM_CP_NOP, .access = PL1_W },
3712    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3713      .type = ARM_CP_NOP, .access = PL1_W },
3714    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3715      .type = ARM_CP_NOP, .access = PL1_W },
3716    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3717      .type = ARM_CP_NOP, .access = PL1_W },
3718    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3719      .type = ARM_CP_NOP, .access = PL1_W },
3720    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3721      .type = ARM_CP_NOP, .access = PL1_W },
3722    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3723      .type = ARM_CP_NOP, .access = PL1_W },
3724    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3725      .type = ARM_CP_NOP, .access = PL1_W },
3726    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3727      .type = ARM_CP_NOP, .access = PL1_W },
3728    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3729      .type = ARM_CP_NOP, .access = PL1_W },
3730    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3731      .type = ARM_CP_NOP, .access = PL1_W },
3732    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3733      .type = ARM_CP_NOP, .access = PL1_W },
3734    /* MMU Domain access control / MPU write buffer control */
3735    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3736      .access = PL1_RW, .resetvalue = 0,
3737      .writefn = dacr_write, .raw_writefn = raw_write,
3738      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3739                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3740    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3741      .type = ARM_CP_ALIAS,
3742      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3743      .access = PL1_RW,
3744      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3745    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3746      .type = ARM_CP_ALIAS,
3747      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3748      .access = PL1_RW,
3749      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3750    /* We rely on the access checks not allowing the guest to write to the
3751     * state field when SPSel indicates that it's being used as the stack
3752     * pointer.
3753     */
3754    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3755      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3756      .access = PL1_RW, .accessfn = sp_el0_access,
3757      .type = ARM_CP_ALIAS,
3758      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3759    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3760      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3761      .access = PL2_RW, .type = ARM_CP_ALIAS,
3762      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3763    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3764      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3765      .type = ARM_CP_NO_RAW,
3766      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3767    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3768      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3769      .type = ARM_CP_ALIAS,
3770      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3771      .access = PL2_RW, .accessfn = fpexc32_access },
3772    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3773      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3774      .access = PL2_RW, .resetvalue = 0,
3775      .writefn = dacr_write, .raw_writefn = raw_write,
3776      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3777    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3778      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3779      .access = PL2_RW, .resetvalue = 0,
3780      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3781    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3782      .type = ARM_CP_ALIAS,
3783      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3784      .access = PL2_RW,
3785      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3786    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3787      .type = ARM_CP_ALIAS,
3788      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3789      .access = PL2_RW,
3790      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3791    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3792      .type = ARM_CP_ALIAS,
3793      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3794      .access = PL2_RW,
3795      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3796    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3797      .type = ARM_CP_ALIAS,
3798      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3799      .access = PL2_RW,
3800      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3801    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3802      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3803      .resetvalue = 0,
3804      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3805    { .name = "SDCR", .type = ARM_CP_ALIAS,
3806      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3807      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3808      .writefn = sdcr_write,
3809      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3810    REGINFO_SENTINEL
3811};
3812
3813/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3814static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3815    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
3816      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3817      .access = PL2_RW,
3818      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3819    { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
3820      .type = ARM_CP_NO_RAW,
3821      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3822      .access = PL2_RW,
3823      .type = ARM_CP_CONST, .resetvalue = 0 },
3824    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
3825      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3826      .access = PL2_RW,
3827      .type = ARM_CP_CONST, .resetvalue = 0 },
3828    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3829      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3830      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3831    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3832      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3833      .access = PL2_RW, .type = ARM_CP_CONST,
3834      .resetvalue = 0 },
3835    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3836      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3837      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3838    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3839      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3840      .access = PL2_RW, .type = ARM_CP_CONST,
3841      .resetvalue = 0 },
3842    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
3843      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3844      .access = PL2_RW, .type = ARM_CP_CONST,
3845      .resetvalue = 0 },
3846    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3847      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3848      .access = PL2_RW, .type = ARM_CP_CONST,
3849      .resetvalue = 0 },
3850    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3851      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3852      .access = PL2_RW, .type = ARM_CP_CONST,
3853      .resetvalue = 0 },
3854    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3855      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3856      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3857    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3858      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3859      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3860      .type = ARM_CP_CONST, .resetvalue = 0 },
3861    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3862      .cp = 15, .opc1 = 6, .crm = 2,
3863      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3864      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3865    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3866      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3867      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3868    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3869      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3870      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3871    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3872      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3873      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3874    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3875      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3876      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3877    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3878      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3879      .resetvalue = 0 },
3880    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3881      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3882      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3883    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3884      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3885      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3886    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3887      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3888      .resetvalue = 0 },
3889    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3890      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3891      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3892    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3893      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3894      .resetvalue = 0 },
3895    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3896      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3897      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3898    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3899      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3900      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3901    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3902      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3903      .access = PL2_RW, .accessfn = access_tda,
3904      .type = ARM_CP_CONST, .resetvalue = 0 },
3905    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3906      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3907      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3908      .type = ARM_CP_CONST, .resetvalue = 0 },
3909    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3910      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3911      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3912    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
3913      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3914      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3915    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
3916      .type = ARM_CP_CONST,
3917      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
3918      .access = PL2_RW, .resetvalue = 0 },
3919    REGINFO_SENTINEL
3920};
3921
3922/* Ditto, but for registers which exist in ARMv8 but not v7 */
3923static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
3924    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
3925      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
3926      .access = PL2_RW,
3927      .type = ARM_CP_CONST, .resetvalue = 0 },
3928    REGINFO_SENTINEL
3929};
3930
3931static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3932{
3933    ARMCPU *cpu = arm_env_get_cpu(env);
3934    uint64_t valid_mask = HCR_MASK;
3935
3936    if (arm_feature(env, ARM_FEATURE_EL3)) {
3937        valid_mask &= ~HCR_HCD;
3938    } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
3939        /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
3940         * However, if we're using the SMC PSCI conduit then QEMU is
3941         * effectively acting like EL3 firmware and so the guest at
3942         * EL2 should retain the ability to prevent EL1 from being
3943         * able to make SMC calls into the ersatz firmware, so in
3944         * that case HCR.TSC should be read/write.
3945         */
3946        valid_mask &= ~HCR_TSC;
3947    }
3948
3949    /* Clear RES0 bits.  */
3950    value &= valid_mask;
3951
3952    /* These bits change the MMU setup:
3953     * HCR_VM enables stage 2 translation
3954     * HCR_PTW forbids certain page-table setups
3955     * HCR_DC Disables stage1 and enables stage2 translation
3956     */
3957    if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3958        tlb_flush(CPU(cpu));
3959    }
3960    env->cp15.hcr_el2 = value;
3961
3962    /*
3963     * Updates to VI and VF require us to update the status of
3964     * virtual interrupts, which are the logical OR of these bits
3965     * and the state of the input lines from the GIC. (This requires
3966     * that we have the iothread lock, which is done by marking the
3967     * reginfo structs as ARM_CP_IO.)
3968     * Note that if a write to HCR pends a VIRQ or VFIQ it is never
3969     * possible for it to be taken immediately, because VIRQ and
3970     * VFIQ are masked unless running at EL0 or EL1, and HCR
3971     * can only be written at EL2.
3972     */
3973    g_assert(qemu_mutex_iothread_locked());
3974    arm_cpu_update_virq(cpu);
3975    arm_cpu_update_vfiq(cpu);
3976}
3977
3978static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
3979                          uint64_t value)
3980{
3981    /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
3982    value = deposit64(env->cp15.hcr_el2, 32, 32, value);
3983    hcr_write(env, NULL, value);
3984}
3985
3986static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
3987                         uint64_t value)
3988{
3989    /* Handle HCR write, i.e. write to low half of HCR_EL2 */
3990    value = deposit64(env->cp15.hcr_el2, 0, 32, value);
3991    hcr_write(env, NULL, value);
3992}
3993
3994static const ARMCPRegInfo el2_cp_reginfo[] = {
3995    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3996      .type = ARM_CP_IO,
3997      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3998      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3999      .writefn = hcr_write },
4000    { .name = "HCR", .state = ARM_CP_STATE_AA32,
4001      .type = ARM_CP_ALIAS | ARM_CP_IO,
4002      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4003      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4004      .writefn = hcr_writelow },
4005    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4006      .type = ARM_CP_ALIAS,
4007      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4008      .access = PL2_RW,
4009      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4010    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4011      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4012      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4013    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4014      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4015      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4016    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4017      .type = ARM_CP_ALIAS,
4018      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4019      .access = PL2_RW,
4020      .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4021    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4022      .type = ARM_CP_ALIAS,
4023      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4024      .access = PL2_RW,
4025      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4026    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4027      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4028      .access = PL2_RW, .writefn = vbar_write,
4029      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4030      .resetvalue = 0 },
4031    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4032      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4033      .access = PL3_RW, .type = ARM_CP_ALIAS,
4034      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4035    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4036      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4037      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4038      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
4039    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4040      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4041      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4042      .resetvalue = 0 },
4043    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4044      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4045      .access = PL2_RW, .type = ARM_CP_ALIAS,
4046      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4047    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4048      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4049      .access = PL2_RW, .type = ARM_CP_CONST,
4050      .resetvalue = 0 },
4051    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4052    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4053      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4054      .access = PL2_RW, .type = ARM_CP_CONST,
4055      .resetvalue = 0 },
4056    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4057      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4058      .access = PL2_RW, .type = ARM_CP_CONST,
4059      .resetvalue = 0 },
4060    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4061      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4062      .access = PL2_RW, .type = ARM_CP_CONST,
4063      .resetvalue = 0 },
4064    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4065      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4066      .access = PL2_RW,
4067      /* no .writefn needed as this can't cause an ASID change;
4068       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4069       */
4070      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4071    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4072      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4073      .type = ARM_CP_ALIAS,
4074      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4075      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4076    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4077      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4078      .access = PL2_RW,
4079      /* no .writefn needed as this can't cause an ASID change;
4080       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4081       */
4082      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4083    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4084      .cp = 15, .opc1 = 6, .crm = 2,
4085      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4086      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4087      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4088      .writefn = vttbr_write },
4089    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4090      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4091      .access = PL2_RW, .writefn = vttbr_write,
4092      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4093    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4094      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4095      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4096      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4097    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4098      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4099      .access = PL2_RW, .resetvalue = 0,
4100      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4101    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4102      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4103      .access = PL2_RW, .resetvalue = 0,
4104      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4105    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4106      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4107      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4108    { .name = "TLBIALLNSNH",
4109      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4110      .type = ARM_CP_NO_RAW, .access = PL2_W,
4111      .writefn = tlbiall_nsnh_write },
4112    { .name = "TLBIALLNSNHIS",
4113      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4114      .type = ARM_CP_NO_RAW, .access = PL2_W,
4115      .writefn = tlbiall_nsnh_is_write },
4116    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4117      .type = ARM_CP_NO_RAW, .access = PL2_W,
4118      .writefn = tlbiall_hyp_write },
4119    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4120      .type = ARM_CP_NO_RAW, .access = PL2_W,
4121      .writefn = tlbiall_hyp_is_write },
4122    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4123      .type = ARM_CP_NO_RAW, .access = PL2_W,
4124      .writefn = tlbimva_hyp_write },
4125    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4126      .type = ARM_CP_NO_RAW, .access = PL2_W,
4127      .writefn = tlbimva_hyp_is_write },
4128    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4129      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4130      .type = ARM_CP_NO_RAW, .access = PL2_W,
4131      .writefn = tlbi_aa64_alle2_write },
4132    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4133      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4134      .type = ARM_CP_NO_RAW, .access = PL2_W,
4135      .writefn = tlbi_aa64_vae2_write },
4136    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4137      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4138      .access = PL2_W, .type = ARM_CP_NO_RAW,
4139      .writefn = tlbi_aa64_vae2_write },
4140    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4141      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4142      .access = PL2_W, .type = ARM_CP_NO_RAW,
4143      .writefn = tlbi_aa64_alle2is_write },
4144    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4145      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4146      .type = ARM_CP_NO_RAW, .access = PL2_W,
4147      .writefn = tlbi_aa64_vae2is_write },
4148    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4149      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4150      .access = PL2_W, .type = ARM_CP_NO_RAW,
4151      .writefn = tlbi_aa64_vae2is_write },
4152#ifndef CONFIG_USER_ONLY
4153    /* Unlike the other EL2-related AT operations, these must
4154     * UNDEF from EL3 if EL2 is not implemented, which is why we
4155     * define them here rather than with the rest of the AT ops.
4156     */
4157    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4158      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4159      .access = PL2_W, .accessfn = at_s1e2_access,
4160      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4161    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4162      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4163      .access = PL2_W, .accessfn = at_s1e2_access,
4164      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4165    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4166     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4167     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4168     * to behave as if SCR.NS was 1.
4169     */
4170    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4171      .access = PL2_W,
4172      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4173    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4174      .access = PL2_W,
4175      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4176    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4177      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4178      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4179       * reset values as IMPDEF. We choose to reset to 3 to comply with
4180       * both ARMv7 and ARMv8.
4181       */
4182      .access = PL2_RW, .resetvalue = 3,
4183      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4184    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4185      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4186      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4187      .writefn = gt_cntvoff_write,
4188      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4189    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4190      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4191      .writefn = gt_cntvoff_write,
4192      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4193    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4194      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4195      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4196      .type = ARM_CP_IO, .access = PL2_RW,
4197      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4198    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4199      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4200      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4201      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4202    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4203      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4204      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4205      .resetfn = gt_hyp_timer_reset,
4206      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4207    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4208      .type = ARM_CP_IO,
4209      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4210      .access = PL2_RW,
4211      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4212      .resetvalue = 0,
4213      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4214#endif
4215    /* The only field of MDCR_EL2 that has a defined architectural reset value
4216     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4217     * don't impelment any PMU event counters, so using zero as a reset
4218     * value for MDCR_EL2 is okay
4219     */
4220    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4221      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4222      .access = PL2_RW, .resetvalue = 0,
4223      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4224    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4225      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4226      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4227      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4228    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4229      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4230      .access = PL2_RW,
4231      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4232    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4233      .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4234      .access = PL2_RW,
4235      .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4236    REGINFO_SENTINEL
4237};
4238
4239static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4240    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4241      .type = ARM_CP_ALIAS | ARM_CP_IO,
4242      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4243      .access = PL2_RW,
4244      .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4245      .writefn = hcr_writehigh },
4246    REGINFO_SENTINEL
4247};
4248
4249static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4250                                   bool isread)
4251{
4252    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4253     * At Secure EL1 it traps to EL3.
4254     */
4255    if (arm_current_el(env) == 3) {
4256        return CP_ACCESS_OK;
4257    }
4258    if (arm_is_secure_below_el3(env)) {
4259        return CP_ACCESS_TRAP_EL3;
4260    }
4261    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4262    if (isread) {
4263        return CP_ACCESS_OK;
4264    }
4265    return CP_ACCESS_TRAP_UNCATEGORIZED;
4266}
4267
4268static const ARMCPRegInfo el3_cp_reginfo[] = {
4269    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
4270      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
4271      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
4272      .resetvalue = 0, .writefn = scr_write },
4273    { .name = "SCR",  .type = ARM_CP_ALIAS,
4274      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
4275      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4276      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4277      .writefn = scr_write },
4278    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4279      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4280      .access = PL3_RW, .resetvalue = 0,
4281      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4282    { .name = "SDER",
4283      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4284      .access = PL3_RW, .resetvalue = 0,
4285      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4286    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4287      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4288      .writefn = vbar_write, .resetvalue = 0,
4289      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4290    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4291      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4292      .access = PL3_RW, .resetvalue = 0,
4293      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4294    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4295      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4296      .access = PL3_RW,
4297      /* no .writefn needed as this can't cause an ASID change;
4298       * we must provide a .raw_writefn and .resetfn because we handle
4299       * reset and migration for the AArch32 TTBCR(S), which might be
4300       * using mask and base_mask.
4301       */
4302      .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4303      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4304    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4305      .type = ARM_CP_ALIAS,
4306      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4307      .access = PL3_RW,
4308      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4309    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4310      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4311      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4312    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4313      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4314      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4315    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4316      .type = ARM_CP_ALIAS,
4317      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4318      .access = PL3_RW,
4319      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4320    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4321      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4322      .access = PL3_RW, .writefn = vbar_write,
4323      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4324      .resetvalue = 0 },
4325    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4326      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4327      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4328      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4329    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4330      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4331      .access = PL3_RW, .resetvalue = 0,
4332      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4333    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4334      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4335      .access = PL3_RW, .type = ARM_CP_CONST,
4336      .resetvalue = 0 },
4337    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4338      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4339      .access = PL3_RW, .type = ARM_CP_CONST,
4340      .resetvalue = 0 },
4341    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4342      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4343      .access = PL3_RW, .type = ARM_CP_CONST,
4344      .resetvalue = 0 },
4345    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4346      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4347      .access = PL3_W, .type = ARM_CP_NO_RAW,
4348      .writefn = tlbi_aa64_alle3is_write },
4349    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4350      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4351      .access = PL3_W, .type = ARM_CP_NO_RAW,
4352      .writefn = tlbi_aa64_vae3is_write },
4353    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4354      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4355      .access = PL3_W, .type = ARM_CP_NO_RAW,
4356      .writefn = tlbi_aa64_vae3is_write },
4357    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4358      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4359      .access = PL3_W, .type = ARM_CP_NO_RAW,
4360      .writefn = tlbi_aa64_alle3_write },
4361    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4362      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4363      .access = PL3_W, .type = ARM_CP_NO_RAW,
4364      .writefn = tlbi_aa64_vae3_write },
4365    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4366      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4367      .access = PL3_W, .type = ARM_CP_NO_RAW,
4368      .writefn = tlbi_aa64_vae3_write },
4369    REGINFO_SENTINEL
4370};
4371
4372static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4373                                     bool isread)
4374{
4375    /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4376     * but the AArch32 CTR has its own reginfo struct)
4377     */
4378    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4379        return CP_ACCESS_TRAP;
4380    }
4381    return CP_ACCESS_OK;
4382}
4383
4384static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4385                        uint64_t value)
4386{
4387    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4388     * read via a bit in OSLSR_EL1.
4389     */
4390    int oslock;
4391
4392    if (ri->state == ARM_CP_STATE_AA32) {
4393        oslock = (value == 0xC5ACCE55);
4394    } else {
4395        oslock = value & 1;
4396    }
4397
4398    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4399}
4400
4401static const ARMCPRegInfo debug_cp_reginfo[] = {
4402    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4403     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4404     * unlike DBGDRAR it is never accessible from EL0.
4405     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4406     * accessor.
4407     */
4408    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4409      .access = PL0_R, .accessfn = access_tdra,
4410      .type = ARM_CP_CONST, .resetvalue = 0 },
4411    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4412      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4413      .access = PL1_R, .accessfn = access_tdra,
4414      .type = ARM_CP_CONST, .resetvalue = 0 },
4415    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4416      .access = PL0_R, .accessfn = access_tdra,
4417      .type = ARM_CP_CONST, .resetvalue = 0 },
4418    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4419    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4420      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4421      .access = PL1_RW, .accessfn = access_tda,
4422      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4423      .resetvalue = 0 },
4424    /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4425     * We don't implement the configurable EL0 access.
4426     */
4427    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4428      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4429      .type = ARM_CP_ALIAS,
4430      .access = PL1_R, .accessfn = access_tda,
4431      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4432    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4433      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4434      .access = PL1_W, .type = ARM_CP_NO_RAW,
4435      .accessfn = access_tdosa,
4436      .writefn = oslar_write },
4437    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4438      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4439      .access = PL1_R, .resetvalue = 10,
4440      .accessfn = access_tdosa,
4441      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4442    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4443    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4444      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4445      .access = PL1_RW, .accessfn = access_tdosa,
4446      .type = ARM_CP_NOP },
4447    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4448     * implement vector catch debug events yet.
4449     */
4450    { .name = "DBGVCR",
4451      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4452      .access = PL1_RW, .accessfn = access_tda,
4453      .type = ARM_CP_NOP },
4454    /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4455     * to save and restore a 32-bit guest's DBGVCR)
4456     */
4457    { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4458      .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4459      .access = PL2_RW, .accessfn = access_tda,
4460      .type = ARM_CP_NOP },
4461    /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4462     * Channel but Linux may try to access this register. The 32-bit
4463     * alias is DBGDCCINT.
4464     */
4465    { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4466      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4467      .access = PL1_RW, .accessfn = access_tda,
4468      .type = ARM_CP_NOP },
4469    REGINFO_SENTINEL
4470};
4471
4472static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4473    /* 64 bit access versions of the (dummy) debug registers */
4474    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4475      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4476    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4477      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4478    REGINFO_SENTINEL
4479};
4480
4481/* Return the exception level to which exceptions should be taken
4482 * via SVEAccessTrap.  If an exception should be routed through
4483 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
4484 * take care of raising that exception.
4485 * C.f. the ARM pseudocode function CheckSVEEnabled.
4486 */
4487int sve_exception_el(CPUARMState *env, int el)
4488{
4489#ifndef CONFIG_USER_ONLY
4490    if (el <= 1) {
4491        bool disabled = false;
4492
4493        /* The CPACR.ZEN controls traps to EL1:
4494         * 0, 2 : trap EL0 and EL1 accesses
4495         * 1    : trap only EL0 accesses
4496         * 3    : trap no accesses
4497         */
4498        if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
4499            disabled = true;
4500        } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
4501            disabled = el == 0;
4502        }
4503        if (disabled) {
4504            /* route_to_el2 */
4505            return (arm_feature(env, ARM_FEATURE_EL2)
4506                    && !arm_is_secure(env)
4507                    && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
4508        }
4509
4510        /* Check CPACR.FPEN.  */
4511        if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
4512            disabled = true;
4513        } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
4514            disabled = el == 0;
4515        }
4516        if (disabled) {
4517            return 0;
4518        }
4519    }
4520
4521    /* CPTR_EL2.  Since TZ and TFP are positive,
4522     * they will be zero when EL2 is not present.
4523     */
4524    if (el <= 2 && !arm_is_secure_below_el3(env)) {
4525        if (env->cp15.cptr_el[2] & CPTR_TZ) {
4526            return 2;
4527        }
4528        if (env->cp15.cptr_el[2] & CPTR_TFP) {
4529            return 0;
4530        }
4531    }
4532
4533    /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
4534    if (arm_feature(env, ARM_FEATURE_EL3)
4535        && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
4536        return 3;
4537    }
4538#endif
4539    return 0;
4540}
4541
4542/*
4543 * Given that SVE is enabled, return the vector length for EL.
4544 */
4545uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
4546{
4547    ARMCPU *cpu = arm_env_get_cpu(env);
4548    uint32_t zcr_len = cpu->sve_max_vq - 1;
4549
4550    if (el <= 1) {
4551        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
4552    }
4553    if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
4554        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
4555    }
4556    if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
4557        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
4558    }
4559    return zcr_len;
4560}
4561
4562static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4563                      uint64_t value)
4564{
4565    int cur_el = arm_current_el(env);
4566    int old_len = sve_zcr_len_for_el(env, cur_el);
4567    int new_len;
4568
4569    /* Bits other than [3:0] are RAZ/WI.  */
4570    raw_write(env, ri, value & 0xf);
4571
4572    /*
4573     * Because we arrived here, we know both FP and SVE are enabled;
4574     * otherwise we would have trapped access to the ZCR_ELn register.
4575     */
4576    new_len = sve_zcr_len_for_el(env, cur_el);
4577    if (new_len < old_len) {
4578        aarch64_sve_narrow_vq(env, new_len + 1);
4579    }
4580}
4581
4582static const ARMCPRegInfo zcr_el1_reginfo = {
4583    .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
4584    .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
4585    .access = PL1_RW, .type = ARM_CP_SVE,
4586    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
4587    .writefn = zcr_write, .raw_writefn = raw_write
4588};
4589
4590static const ARMCPRegInfo zcr_el2_reginfo = {
4591    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4592    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4593    .access = PL2_RW, .type = ARM_CP_SVE,
4594    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
4595    .writefn = zcr_write, .raw_writefn = raw_write
4596};
4597
4598static const ARMCPRegInfo zcr_no_el2_reginfo = {
4599    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
4600    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
4601    .access = PL2_RW, .type = ARM_CP_SVE,
4602    .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
4603};
4604
4605static const ARMCPRegInfo zcr_el3_reginfo = {
4606    .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
4607    .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
4608    .access = PL3_RW, .type = ARM_CP_SVE,
4609    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
4610    .writefn = zcr_write, .raw_writefn = raw_write
4611};
4612
4613void hw_watchpoint_update(ARMCPU *cpu, int n)
4614{
4615    CPUARMState *env = &cpu->env;
4616    vaddr len = 0;
4617    vaddr wvr = env->cp15.dbgwvr[n];
4618    uint64_t wcr = env->cp15.dbgwcr[n];
4619    int mask;
4620    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4621
4622    if (env->cpu_watchpoint[n]) {
4623        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4624        env->cpu_watchpoint[n] = NULL;
4625    }
4626
4627    if (!extract64(wcr, 0, 1)) {
4628        /* E bit clear : watchpoint disabled */
4629        return;
4630    }
4631
4632    switch (extract64(wcr, 3, 2)) {
4633    case 0:
4634        /* LSC 00 is reserved and must behave as if the wp is disabled */
4635        return;
4636    case 1:
4637        flags |= BP_MEM_READ;
4638        break;
4639    case 2:
4640        flags |= BP_MEM_WRITE;
4641        break;
4642    case 3:
4643        flags |= BP_MEM_ACCESS;
4644        break;
4645    }
4646
4647    /* Attempts to use both MASK and BAS fields simultaneously are
4648     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4649     * thus generating a watchpoint for every byte in the masked region.
4650     */
4651    mask = extract64(wcr, 24, 4);
4652    if (mask == 1 || mask == 2) {
4653        /* Reserved values of MASK; we must act as if the mask value was
4654         * some non-reserved value, or as if the watchpoint were disabled.
4655         * We choose the latter.
4656         */
4657        return;
4658    } else if (mask) {
4659        /* Watchpoint covers an aligned area up to 2GB in size */
4660        len = 1ULL << mask;
4661        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4662         * whether the watchpoint fires when the unmasked bits match; we opt
4663         * to generate the exceptions.
4664         */
4665        wvr &= ~(len - 1);
4666    } else {
4667        /* Watchpoint covers bytes defined by the byte address select bits */
4668        int bas = extract64(wcr, 5, 8);
4669        int basstart;
4670
4671        if (bas == 0) {
4672            /* This must act as if the watchpoint is disabled */
4673            return;
4674        }
4675
4676        if (extract64(wvr, 2, 1)) {
4677            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4678             * ignored, and BAS[3:0] define which bytes to watch.
4679             */
4680            bas &= 0xf;
4681        }
4682        /* The BAS bits are supposed to be programmed to indicate a contiguous
4683         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4684         * we fire for each byte in the word/doubleword addressed by the WVR.
4685         * We choose to ignore any non-zero bits after the first range of 1s.
4686         */
4687        basstart = ctz32(bas);
4688        len = cto32(bas >> basstart);
4689        wvr += basstart;
4690    }
4691
4692    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4693                          &env->cpu_watchpoint[n]);
4694}
4695
4696void hw_watchpoint_update_all(ARMCPU *cpu)
4697{
4698    int i;
4699    CPUARMState *env = &cpu->env;
4700
4701    /* Completely clear out existing QEMU watchpoints and our array, to
4702     * avoid possible stale entries following migration load.
4703     */
4704    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4705    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4706
4707    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4708        hw_watchpoint_update(cpu, i);
4709    }
4710}
4711
4712static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4713                         uint64_t value)
4714{
4715    ARMCPU *cpu = arm_env_get_cpu(env);
4716    int i = ri->crm;
4717
4718    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4719     * register reads and behaves as if values written are sign extended.
4720     * Bits [1:0] are RES0.
4721     */
4722    value = sextract64(value, 0, 49) & ~3ULL;
4723
4724    raw_write(env, ri, value);
4725    hw_watchpoint_update(cpu, i);
4726}
4727
4728static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4729                         uint64_t value)
4730{
4731    ARMCPU *cpu = arm_env_get_cpu(env);
4732    int i = ri->crm;
4733
4734    raw_write(env, ri, value);
4735    hw_watchpoint_update(cpu, i);
4736}
4737
4738void hw_breakpoint_update(ARMCPU *cpu, int n)
4739{
4740    CPUARMState *env = &cpu->env;
4741    uint64_t bvr = env->cp15.dbgbvr[n];
4742    uint64_t bcr = env->cp15.dbgbcr[n];
4743    vaddr addr;
4744    int bt;
4745    int flags = BP_CPU;
4746
4747    if (env->cpu_breakpoint[n]) {
4748        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4749        env->cpu_breakpoint[n] = NULL;
4750    }
4751
4752    if (!extract64(bcr, 0, 1)) {
4753        /* E bit clear : watchpoint disabled */
4754        return;
4755    }
4756
4757    bt = extract64(bcr, 20, 4);
4758
4759    switch (bt) {
4760    case 4: /* unlinked address mismatch (reserved if AArch64) */
4761    case 5: /* linked address mismatch (reserved if AArch64) */
4762        qemu_log_mask(LOG_UNIMP,
4763                      "arm: address mismatch breakpoint types not implemented\n");
4764        return;
4765    case 0: /* unlinked address match */
4766    case 1: /* linked address match */
4767    {
4768        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4769         * we behave as if the register was sign extended. Bits [1:0] are
4770         * RES0. The BAS field is used to allow setting breakpoints on 16
4771         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4772         * a bp will fire if the addresses covered by the bp and the addresses
4773         * covered by the insn overlap but the insn doesn't start at the
4774         * start of the bp address range. We choose to require the insn and
4775         * the bp to have the same address. The constraints on writing to
4776         * BAS enforced in dbgbcr_write mean we have only four cases:
4777         *  0b0000  => no breakpoint
4778         *  0b0011  => breakpoint on addr
4779         *  0b1100  => breakpoint on addr + 2
4780         *  0b1111  => breakpoint on addr
4781         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4782         */
4783        int bas = extract64(bcr, 5, 4);
4784        addr = sextract64(bvr, 0, 49) & ~3ULL;
4785        if (bas == 0) {
4786            return;
4787        }
4788        if (bas == 0xc) {
4789            addr += 2;
4790        }
4791        break;
4792    }
4793    case 2: /* unlinked context ID match */
4794    case 8: /* unlinked VMID match (reserved if no EL2) */
4795    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4796        qemu_log_mask(LOG_UNIMP,
4797                      "arm: unlinked context breakpoint types not implemented\n");
4798        return;
4799    case 9: /* linked VMID match (reserved if no EL2) */
4800    case 11: /* linked context ID and VMID match (reserved if no EL2) */
4801    case 3: /* linked context ID match */
4802    default:
4803        /* We must generate no events for Linked context matches (unless
4804         * they are linked to by some other bp/wp, which is handled in
4805         * updates for the linking bp/wp). We choose to also generate no events
4806         * for reserved values.
4807         */
4808        return;
4809    }