qemu/target-arm/helper.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "cpu.h"
   3#include "internals.h"
   4#include "exec/gdbstub.h"
   5#include "exec/helper-proto.h"
   6#include "qemu/host-utils.h"
   7#include "sysemu/arch_init.h"
   8#include "sysemu/sysemu.h"
   9#include "qemu/bitops.h"
  10#include "qemu/crc32c.h"
  11#include "exec/cpu_ldst.h"
  12#include "arm_ldst.h"
  13#include <zlib.h> /* For crc32 */
  14#include "exec/semihost.h"
  15#include "sysemu/kvm.h"
  16
  17#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  18
  19#ifndef CONFIG_USER_ONLY
  20static bool get_phys_addr(CPUARMState *env, target_ulong address,
  21                          int access_type, ARMMMUIdx mmu_idx,
  22                          hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  23                          target_ulong *page_size, uint32_t *fsr,
  24                          ARMMMUFaultInfo *fi);
  25
  26static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
  27                               int access_type, ARMMMUIdx mmu_idx,
  28                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  29                               target_ulong *page_size_ptr, uint32_t *fsr,
  30                               ARMMMUFaultInfo *fi);
  31
  32/* Definitions for the PMCCNTR and PMCR registers */
  33#define PMCRD   0x8
  34#define PMCRC   0x4
  35#define PMCRE   0x1
  36#endif
  37
  38static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  39{
  40    int nregs;
  41
  42    /* VFP data registers are always little-endian.  */
  43    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  44    if (reg < nregs) {
  45        stfq_le_p(buf, env->vfp.regs[reg]);
  46        return 8;
  47    }
  48    if (arm_feature(env, ARM_FEATURE_NEON)) {
  49        /* Aliases for Q regs.  */
  50        nregs += 16;
  51        if (reg < nregs) {
  52            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
  53            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
  54            return 16;
  55        }
  56    }
  57    switch (reg - nregs) {
  58    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
  59    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
  60    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
  61    }
  62    return 0;
  63}
  64
  65static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  66{
  67    int nregs;
  68
  69    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  70    if (reg < nregs) {
  71        env->vfp.regs[reg] = ldfq_le_p(buf);
  72        return 8;
  73    }
  74    if (arm_feature(env, ARM_FEATURE_NEON)) {
  75        nregs += 16;
  76        if (reg < nregs) {
  77            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
  78            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
  79            return 16;
  80        }
  81    }
  82    switch (reg - nregs) {
  83    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
  84    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
  85    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
  86    }
  87    return 0;
  88}
  89
  90static int arm_sys_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  91{
  92    switch (reg) {
  93    case 0:
  94        /* TTBCR Secure */
  95        stl_p(buf, env->cp15.tcr_el[3].raw_tcr);
  96        return 4;
  97    case 1:
  98        /* TTBR0 Secure */
  99        stl_p(buf, env->cp15.ttbr0_s);
 100        return 4;
 101    case 2:
 102        /* TTBR1 Secure */
 103        stl_p(buf, env->cp15.ttbr1_s);
 104        return 4;
 105    default:
 106        return 0;
 107    }
 108}
 109
 110static int arm_sys_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 111{
 112    switch (reg) {
 113    case 0:
 114        /* TTBCR Secure */
 115        return 0;
 116    case 1:
 117        /* TTBR0 Secure */
 118        env->cp15.ttbr0_s = ldl_p(buf);
 119        return 4;
 120    case 2:
 121        /* TTBR1 Secure */
 122        env->cp15.ttbr1_s = ldl_p(buf);
 123        return 4;
 124    default:
 125        return 0;
 126    }
 127}
 128
 129static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 130{
 131    switch (reg) {
 132    case 0 ... 31:
 133        /* 128 bit FP register */
 134        stfq_le_p(buf, env->vfp.regs[reg * 2]);
 135        stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
 136        return 16;
 137    case 32:
 138        /* FPSR */
 139        stl_p(buf, vfp_get_fpsr(env));
 140        return 4;
 141    case 33:
 142        /* FPCR */
 143        stl_p(buf, vfp_get_fpcr(env));
 144        return 4;
 145    default:
 146        return 0;
 147    }
 148}
 149
 150static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 151{
 152    switch (reg) {
 153    case 0 ... 31:
 154        /* 128 bit FP register */
 155        env->vfp.regs[reg * 2] = ldfq_le_p(buf);
 156        env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
 157        return 16;
 158    case 32:
 159        /* FPSR */
 160        vfp_set_fpsr(env, ldl_p(buf));
 161        return 4;
 162    case 33:
 163        /* FPCR */
 164        vfp_set_fpcr(env, ldl_p(buf));
 165        return 4;
 166    default:
 167        return 0;
 168    }
 169}
 170
 171static int aarch64_elx_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg, int el)
 172{
 173    switch (reg) {
 174    case 0:
 175        stfq_le_p(buf, env->elr_el[el]);
 176        return 8;
 177    case 1:
 178        stfq_le_p(buf, env->cp15.esr_el[el]);
 179        return 8;
 180    case 2:
 181        stfq_le_p(buf, env->banked_spsr[aarch64_banked_spsr_index(el)]);
 182        return 8;
 183    case 3:
 184        stfq_le_p(buf, env->cp15.ttbr0_el[el]);
 185        return 8;
 186    case 4:
 187        if (el == 1) {
 188            stfq_le_p(buf, env->cp15.ttbr1_el[el]);
 189            return 8;
 190        }
 191        /* Fallthrough */
 192    default:
 193        return 0;
 194    }
 195}
 196
 197static int aarch64_elx_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg, int el)
 198{
 199    switch (reg) {
 200    case 0:
 201        env->elr_el[el] = ldfq_le_p(buf);
 202        return 8;
 203    case 1:
 204        env->cp15.esr_el[el] = ldfq_le_p(buf);
 205        return 8;
 206    case 2:
 207        env->banked_spsr[aarch64_banked_spsr_index(el)] = ldfq_le_p(buf);
 208        return 8;
 209    case 3:
 210        env->cp15.ttbr0_el[el] = ldfq_le_p(buf);
 211        return 8;
 212    case 4:
 213        if (el == 1) {
 214            env->cp15.ttbr1_el[el] = ldfq_le_p(buf);
 215            return 8;
 216        }
 217        /* Fallthrough */
 218    default:
 219        return 0;
 220    }
 221}
 222
 223static int aarch64_el1_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 224{
 225    return aarch64_elx_gdb_get_reg(env, buf, reg, 1);
 226}
 227
 228static int aarch64_el1_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 229{
 230    return aarch64_elx_gdb_set_reg(env, buf, reg, 1);
 231}
 232
 233static int aarch64_el2_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 234{
 235    return aarch64_elx_gdb_get_reg(env, buf, reg, 2);
 236}
 237
 238static int aarch64_el2_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 239{
 240    return aarch64_elx_gdb_set_reg(env, buf, reg, 2);
 241}
 242
 243static int aarch64_el3_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 244{
 245    return aarch64_elx_gdb_get_reg(env, buf, reg, 3);
 246}
 247
 248static int aarch64_el3_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 249{
 250    return aarch64_elx_gdb_set_reg(env, buf, reg, 3);
 251}
 252
 253static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 254{
 255    assert(ri->fieldoffset);
 256    if (cpreg_field_is_64bit(ri)) {
 257        return CPREG_FIELD64(env, ri);
 258    } else {
 259        return CPREG_FIELD32(env, ri);
 260    }
 261}
 262
 263static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 264                      uint64_t value)
 265{
 266    assert(ri->fieldoffset);
 267    if (cpreg_field_is_64bit(ri)) {
 268        CPREG_FIELD64(env, ri) = value;
 269    } else {
 270        CPREG_FIELD32(env, ri) = value;
 271    }
 272}
 273
 274static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 275{
 276    return (char *)env + ri->fieldoffset;
 277}
 278
 279uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 280{
 281    /* Raw read of a coprocessor register (as needed for migration, etc). */
 282    if (ri->type & ARM_CP_CONST) {
 283        return ri->resetvalue;
 284    } else if (ri->raw_readfn) {
 285        return ri->raw_readfn(env, ri);
 286    } else if (ri->readfn) {
 287        return ri->readfn(env, ri);
 288    } else {
 289        return raw_read(env, ri);
 290    }
 291}
 292
 293static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 294                             uint64_t v)
 295{
 296    /* Raw write of a coprocessor register (as needed for migration, etc).
 297     * Note that constant registers are treated as write-ignored; the
 298     * caller should check for success by whether a readback gives the
 299     * value written.
 300     */
 301    if (ri->type & ARM_CP_CONST) {
 302        return;
 303    } else if (ri->raw_writefn) {
 304        ri->raw_writefn(env, ri, v);
 305    } else if (ri->writefn) {
 306        ri->writefn(env, ri, v);
 307    } else {
 308        raw_write(env, ri, v);
 309    }
 310}
 311
 312static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 313{
 314   /* Return true if the regdef would cause an assertion if you called
 315    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 316    * program bug for it not to have the NO_RAW flag).
 317    * NB that returning false here doesn't necessarily mean that calling
 318    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 319    * read/write access functions which are safe for raw use" from "has
 320    * read/write access functions which have side effects but has forgotten
 321    * to provide raw access functions".
 322    * The tests here line up with the conditions in read/write_raw_cp_reg()
 323    * and assertions in raw_read()/raw_write().
 324    */
 325    if ((ri->type & ARM_CP_CONST) ||
 326        ri->fieldoffset ||
 327        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 328        return false;
 329    }
 330    return true;
 331}
 332
 333bool write_cpustate_to_list(ARMCPU *cpu)
 334{
 335    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 336    int i;
 337    bool ok = true;
 338
 339    for (i = 0; i < cpu->cpreg_array_len; i++) {
 340        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 341        const ARMCPRegInfo *ri;
 342
 343        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 344        if (!ri) {
 345            ok = false;
 346            continue;
 347        }
 348        if (ri->type & ARM_CP_NO_RAW) {
 349            continue;
 350        }
 351        cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
 352    }
 353    return ok;
 354}
 355
 356bool write_list_to_cpustate(ARMCPU *cpu)
 357{
 358    int i;
 359    bool ok = true;
 360
 361    for (i = 0; i < cpu->cpreg_array_len; i++) {
 362        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 363        uint64_t v = cpu->cpreg_values[i];
 364        const ARMCPRegInfo *ri;
 365
 366        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 367        if (!ri) {
 368            ok = false;
 369            continue;
 370        }
 371        if (ri->type & ARM_CP_NO_RAW) {
 372            continue;
 373        }
 374        /* Write value and confirm it reads back as written
 375         * (to catch read-only registers and partially read-only
 376         * registers where the incoming migration value doesn't match)
 377         */
 378        write_raw_cp_reg(&cpu->env, ri, v);
 379        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 380            ok = false;
 381        }
 382    }
 383    return ok;
 384}
 385
 386static void add_cpreg_to_list(gpointer key, gpointer opaque)
 387{
 388    ARMCPU *cpu = opaque;
 389    uint64_t regidx;
 390    const ARMCPRegInfo *ri;
 391
 392    regidx = *(uint32_t *)key;
 393    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 394
 395    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 396        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 397        /* The value array need not be initialized at this point */
 398        cpu->cpreg_array_len++;
 399    }
 400}
 401
 402static void count_cpreg(gpointer key, gpointer opaque)
 403{
 404    ARMCPU *cpu = opaque;
 405    uint64_t regidx;
 406    const ARMCPRegInfo *ri;
 407
 408    regidx = *(uint32_t *)key;
 409    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 410
 411    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 412        cpu->cpreg_array_len++;
 413    }
 414}
 415
 416static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 417{
 418    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 419    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 420
 421    if (aidx > bidx) {
 422        return 1;
 423    }
 424    if (aidx < bidx) {
 425        return -1;
 426    }
 427    return 0;
 428}
 429
 430void init_cpreg_list(ARMCPU *cpu)
 431{
 432    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 433     * Note that we require cpreg_tuples[] to be sorted by key ID.
 434     */
 435    GList *keys;
 436    int arraylen;
 437
 438    keys = g_hash_table_get_keys(cpu->cp_regs);
 439    keys = g_list_sort(keys, cpreg_key_compare);
 440
 441    cpu->cpreg_array_len = 0;
 442
 443    g_list_foreach(keys, count_cpreg, cpu);
 444
 445    arraylen = cpu->cpreg_array_len;
 446    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 447    cpu->cpreg_values = g_new(uint64_t, arraylen);
 448    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 449    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 450    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 451    cpu->cpreg_array_len = 0;
 452
 453    g_list_foreach(keys, add_cpreg_to_list, cpu);
 454
 455    assert(cpu->cpreg_array_len == arraylen);
 456
 457    g_list_free(keys);
 458}
 459
 460/*
 461 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
 462 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
 463 *
 464 * access_el3_aa32ns: Used to check AArch32 register views.
 465 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
 466 */
 467static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 468                                        const ARMCPRegInfo *ri,
 469                                        bool isread)
 470{
 471    bool secure = arm_is_secure_below_el3(env);
 472
 473    assert(!arm_el_is_aa64(env, 3));
 474    if (secure) {
 475        return CP_ACCESS_TRAP_UNCATEGORIZED;
 476    }
 477    return CP_ACCESS_OK;
 478}
 479
 480static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
 481                                                const ARMCPRegInfo *ri,
 482                                                bool isread)
 483{
 484    if (!arm_el_is_aa64(env, 3)) {
 485        return access_el3_aa32ns(env, ri, isread);
 486    }
 487    return CP_ACCESS_OK;
 488}
 489
 490/* Some secure-only AArch32 registers trap to EL3 if used from
 491 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 492 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 493 * We assume that the .access field is set to PL1_RW.
 494 */
 495static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 496                                            const ARMCPRegInfo *ri,
 497                                            bool isread)
 498{
 499    if (arm_current_el(env) == 3) {
 500        return CP_ACCESS_OK;
 501    }
 502    if (arm_is_secure_below_el3(env)) {
 503        return CP_ACCESS_TRAP_EL3;
 504    }
 505    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 506    return CP_ACCESS_TRAP_UNCATEGORIZED;
 507}
 508
 509/* Check for traps to "powerdown debug" registers, which are controlled
 510 * by MDCR.TDOSA
 511 */
 512static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 513                                   bool isread)
 514{
 515    int el = arm_current_el(env);
 516
 517    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
 518        && !arm_is_secure_below_el3(env)) {
 519        return CP_ACCESS_TRAP_EL2;
 520    }
 521    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 522        return CP_ACCESS_TRAP_EL3;
 523    }
 524    return CP_ACCESS_OK;
 525}
 526
 527/* Check for traps to "debug ROM" registers, which are controlled
 528 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 529 */
 530static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 531                                  bool isread)
 532{
 533    int el = arm_current_el(env);
 534
 535    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
 536        && !arm_is_secure_below_el3(env)) {
 537        return CP_ACCESS_TRAP_EL2;
 538    }
 539    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 540        return CP_ACCESS_TRAP_EL3;
 541    }
 542    return CP_ACCESS_OK;
 543}
 544
 545/* Check for traps to general debug registers, which are controlled
 546 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 547 */
 548static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 549                                  bool isread)
 550{
 551    int el = arm_current_el(env);
 552
 553    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
 554        && !arm_is_secure_below_el3(env)) {
 555        return CP_ACCESS_TRAP_EL2;
 556    }
 557    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 558        return CP_ACCESS_TRAP_EL3;
 559    }
 560    return CP_ACCESS_OK;
 561}
 562
 563/* Check for traps to performance monitor registers, which are controlled
 564 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 565 */
 566static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 567                                 bool isread)
 568{
 569    int el = arm_current_el(env);
 570
 571    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 572        && !arm_is_secure_below_el3(env)) {
 573        return CP_ACCESS_TRAP_EL2;
 574    }
 575    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 576        return CP_ACCESS_TRAP_EL3;
 577    }
 578    return CP_ACCESS_OK;
 579}
 580
 581static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 582{
 583    ARMCPU *cpu = arm_env_get_cpu(env);
 584
 585    raw_write(env, ri, value);
 586    tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
 587}
 588
 589static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 590{
 591    ARMCPU *cpu = arm_env_get_cpu(env);
 592
 593    if (raw_read(env, ri) != value) {
 594        /* Unlike real hardware the qemu TLB uses virtual addresses,
 595         * not modified virtual addresses, so this causes a TLB flush.
 596         */
 597        tlb_flush(CPU(cpu), 1);
 598        raw_write(env, ri, value);
 599    }
 600}
 601
 602static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 603                             uint64_t value)
 604{
 605    ARMCPU *cpu = arm_env_get_cpu(env);
 606
 607    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
 608        && !extended_addresses_enabled(env)) {
 609        /* For VMSA (when not using the LPAE long descriptor page table
 610         * format) this register includes the ASID, so do a TLB flush.
 611         * For PMSA it is purely a process ID and no action is needed.
 612         */
 613        tlb_flush(CPU(cpu), 1);
 614    }
 615    raw_write(env, ri, value);
 616}
 617
 618static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 619                          uint64_t value)
 620{
 621    /* Invalidate all (TLBIALL) */
 622    ARMCPU *cpu = arm_env_get_cpu(env);
 623
 624    tlb_flush(CPU(cpu), 1);
 625}
 626
 627static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 628                          uint64_t value)
 629{
 630    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 631    ARMCPU *cpu = arm_env_get_cpu(env);
 632
 633    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 634}
 635
 636static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 637                           uint64_t value)
 638{
 639    /* Invalidate by ASID (TLBIASID) */
 640    ARMCPU *cpu = arm_env_get_cpu(env);
 641
 642    tlb_flush(CPU(cpu), value == 0);
 643}
 644
 645static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 646                           uint64_t value)
 647{
 648    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 649    ARMCPU *cpu = arm_env_get_cpu(env);
 650
 651    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 652}
 653
 654/* IS variants of TLB operations must affect all cores */
 655static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 656                             uint64_t value)
 657{
 658    CPUState *other_cs;
 659
 660    CPU_FOREACH(other_cs) {
 661        tlb_flush(other_cs, 1);
 662    }
 663}
 664
 665static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 666                             uint64_t value)
 667{
 668    CPUState *other_cs;
 669
 670    CPU_FOREACH(other_cs) {
 671        tlb_flush(other_cs, value == 0);
 672    }
 673}
 674
 675static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 676                             uint64_t value)
 677{
 678    CPUState *other_cs;
 679
 680    CPU_FOREACH(other_cs) {
 681        tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
 682    }
 683}
 684
 685static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 686                             uint64_t value)
 687{
 688    CPUState *other_cs;
 689
 690    CPU_FOREACH(other_cs) {
 691        tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
 692    }
 693}
 694
 695static const ARMCPRegInfo cp_reginfo[] = {
 696    /* Define the secure and non-secure FCSE identifier CP registers
 697     * separately because there is no secure bank in V8 (no _EL3).  This allows
 698     * the secure register to be properly reset and migrated. There is also no
 699     * v8 EL1 version of the register so the non-secure instance stands alone.
 700     */
 701    { .name = "FCSEIDR(NS)",
 702      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 703      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 704      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 705      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 706    { .name = "FCSEIDR(S)",
 707      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 708      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 709      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 710      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 711    /* Define the secure and non-secure context identifier CP registers
 712     * separately because there is no secure bank in V8 (no _EL3).  This allows
 713     * the secure register to be properly reset and migrated.  In the
 714     * non-secure case, the 32-bit register will have reset and migration
 715     * disabled during registration as it is handled by the 64-bit instance.
 716     */
 717    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 718      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 719      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 720      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 721      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 722    { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
 723      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 724      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 725      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 726      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 727    REGINFO_SENTINEL
 728};
 729
 730static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 731    /* NB: Some of these registers exist in v8 but with more precise
 732     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 733     */
 734    /* MMU Domain access control / MPU write buffer control */
 735    { .name = "DACR",
 736      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 737      .access = PL1_RW, .resetvalue = 0,
 738      .writefn = dacr_write, .raw_writefn = raw_write,
 739      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 740                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 741    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 742     * For v6 and v5, these mappings are overly broad.
 743     */
 744    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 745      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 746    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 747      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 748    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 749      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 750    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 751      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 752    /* Cache maintenance ops; some of this space may be overridden later. */
 753    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 754      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 755      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 756    REGINFO_SENTINEL
 757};
 758
 759static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 760    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 761     * over-broad.
 762     */
 763    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 764      .access = PL1_W, .type = ARM_CP_WFI },
 765    REGINFO_SENTINEL
 766};
 767
 768static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 769    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 770     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 771     */
 772    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 773      .access = PL1_W, .type = ARM_CP_WFI },
 774    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 775     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 776     * OMAPCP will override this space.
 777     */
 778    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 779      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 780      .resetvalue = 0 },
 781    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 782      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 783      .resetvalue = 0 },
 784    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 785    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 786      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 787      .resetvalue = 0 },
 788    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 789     * implementing it as RAZ means the "debug architecture version" bits
 790     * will read as a reserved value, which should cause Linux to not try
 791     * to use the debug hardware.
 792     */
 793    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 794      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 795    /* MMU TLB control. Note that the wildcarding means we cover not just
 796     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 797     */
 798    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 799      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 800      .type = ARM_CP_NO_RAW },
 801    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 802      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 803      .type = ARM_CP_NO_RAW },
 804    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 805      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 806      .type = ARM_CP_NO_RAW },
 807    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 808      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 809      .type = ARM_CP_NO_RAW },
 810    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 811      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 812    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 813      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 814    REGINFO_SENTINEL
 815};
 816
 817static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 818                        uint64_t value)
 819{
 820    uint32_t mask = 0;
 821
 822    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 823    if (!arm_feature(env, ARM_FEATURE_V8)) {
 824        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 825         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 826         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 827         */
 828        if (arm_feature(env, ARM_FEATURE_VFP)) {
 829            /* VFP coprocessor: cp10 & cp11 [23:20] */
 830            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 831
 832            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 833                /* ASEDIS [31] bit is RAO/WI */
 834                value |= (1 << 31);
 835            }
 836
 837            /* VFPv3 and upwards with NEON implement 32 double precision
 838             * registers (D0-D31).
 839             */
 840            if (!arm_feature(env, ARM_FEATURE_NEON) ||
 841                    !arm_feature(env, ARM_FEATURE_VFP3)) {
 842                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 843                value |= (1 << 30);
 844            }
 845        }
 846        value &= mask;
 847    }
 848    env->cp15.cpacr_el1 = value;
 849}
 850
 851static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 852                                   bool isread)
 853{
 854    if (arm_feature(env, ARM_FEATURE_V8)) {
 855        /* Check if CPACR accesses are to be trapped to EL2 */
 856        if (arm_current_el(env) == 1 &&
 857            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
 858            return CP_ACCESS_TRAP_EL2;
 859        /* Check if CPACR accesses are to be trapped to EL3 */
 860        } else if (arm_current_el(env) < 3 &&
 861                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 862            return CP_ACCESS_TRAP_EL3;
 863        }
 864    }
 865
 866    return CP_ACCESS_OK;
 867}
 868
 869static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 870                                  bool isread)
 871{
 872    /* Check if CPTR accesses are set to trap to EL3 */
 873    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 874        return CP_ACCESS_TRAP_EL3;
 875    }
 876
 877    return CP_ACCESS_OK;
 878}
 879
 880static const ARMCPRegInfo v6_cp_reginfo[] = {
 881    /* prefetch by MVA in v6, NOP in v7 */
 882    { .name = "MVA_prefetch",
 883      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 884      .access = PL1_W, .type = ARM_CP_NOP },
 885    /* We need to break the TB after ISB to execute self-modifying code
 886     * correctly and also to take any pending interrupts immediately.
 887     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 888     */
 889    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 890      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 891    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 892      .access = PL0_W, .type = ARM_CP_NOP },
 893    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 894      .access = PL0_W, .type = ARM_CP_NOP },
 895    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 896      .access = PL1_RW,
 897      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 898                             offsetof(CPUARMState, cp15.ifar_ns) },
 899      .resetvalue = 0, },
 900    /* Watchpoint Fault Address Register : should actually only be present
 901     * for 1136, 1176, 11MPCore.
 902     */
 903    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
 904      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
 905    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
 906      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
 907      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
 908      .resetvalue = 0, .writefn = cpacr_write },
 909    REGINFO_SENTINEL
 910};
 911
 912static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
 913                                   bool isread)
 914{
 915    /* Performance monitor registers user accessibility is controlled
 916     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
 917     * trapping to EL2 or EL3 for other accesses.
 918     */
 919    int el = arm_current_el(env);
 920
 921    if (el == 0 && !env->cp15.c9_pmuserenr) {
 922        return CP_ACCESS_TRAP;
 923    }
 924    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 925        && !arm_is_secure_below_el3(env)) {
 926        return CP_ACCESS_TRAP_EL2;
 927    }
 928    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 929        return CP_ACCESS_TRAP_EL3;
 930    }
 931
 932    return CP_ACCESS_OK;
 933}
 934
 935#ifndef CONFIG_USER_ONLY
 936
 937static inline bool arm_ccnt_enabled(CPUARMState *env)
 938{
 939    /* This does not support checking PMCCFILTR_EL0 register */
 940
 941    if (!(env->cp15.c9_pmcr & PMCRE)) {
 942        return false;
 943    }
 944
 945    return true;
 946}
 947
 948void pmccntr_sync(CPUARMState *env)
 949{
 950    uint64_t temp_ticks;
 951
 952    temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 953                          ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
 954
 955    if (env->cp15.c9_pmcr & PMCRD) {
 956        /* Increment once every 64 processor clock cycles */
 957        temp_ticks /= 64;
 958    }
 959
 960    if (arm_ccnt_enabled(env)) {
 961        env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
 962    }
 963}
 964
 965static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 966                       uint64_t value)
 967{
 968    pmccntr_sync(env);
 969
 970    if (value & PMCRC) {
 971        /* The counter has been reset */
 972        env->cp15.c15_ccnt = 0;
 973    }
 974
 975    /* only the DP, X, D and E bits are writable */
 976    env->cp15.c9_pmcr &= ~0x39;
 977    env->cp15.c9_pmcr |= (value & 0x39);
 978
 979    pmccntr_sync(env);
 980}
 981
 982static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 983{
 984    uint64_t total_ticks;
 985
 986    if (!arm_ccnt_enabled(env)) {
 987        /* Counter is disabled, do not change value */
 988        return env->cp15.c15_ccnt;
 989    }
 990
 991    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 992                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
 993
 994    if (env->cp15.c9_pmcr & PMCRD) {
 995        /* Increment once every 64 processor clock cycles */
 996        total_ticks /= 64;
 997    }
 998    return total_ticks - env->cp15.c15_ccnt;
 999}
1000
1001static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1002                        uint64_t value)
1003{
1004    uint64_t total_ticks;
1005
1006    if (!arm_ccnt_enabled(env)) {
1007        /* Counter is disabled, set the absolute value */
1008        env->cp15.c15_ccnt = value;
1009        return;
1010    }
1011
1012    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1013                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1014
1015    if (env->cp15.c9_pmcr & PMCRD) {
1016        /* Increment once every 64 processor clock cycles */
1017        total_ticks /= 64;
1018    }
1019    env->cp15.c15_ccnt = total_ticks - value;
1020}
1021
1022static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1023                            uint64_t value)
1024{
1025    uint64_t cur_val = pmccntr_read(env, NULL);
1026
1027    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1028}
1029
1030#else /* CONFIG_USER_ONLY */
1031
1032void pmccntr_sync(CPUARMState *env)
1033{
1034}
1035
1036#endif
1037
1038static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1039                            uint64_t value)
1040{
1041    pmccntr_sync(env);
1042    env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1043    pmccntr_sync(env);
1044}
1045
1046static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1047                            uint64_t value)
1048{
1049    value &= (1 << 31);
1050    env->cp15.c9_pmcnten |= value;
1051}
1052
1053static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1054                             uint64_t value)
1055{
1056    value &= (1 << 31);
1057    env->cp15.c9_pmcnten &= ~value;
1058}
1059
1060static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1061                         uint64_t value)
1062{
1063    env->cp15.c9_pmovsr &= ~value;
1064}
1065
1066static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1067                             uint64_t value)
1068{
1069    env->cp15.c9_pmxevtyper = value & 0xff;
1070}
1071
1072static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1073                            uint64_t value)
1074{
1075    env->cp15.c9_pmuserenr = value & 1;
1076}
1077
1078static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1079                             uint64_t value)
1080{
1081    /* We have no event counters so only the C bit can be changed */
1082    value &= (1 << 31);
1083    env->cp15.c9_pminten |= value;
1084}
1085
1086static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1087                             uint64_t value)
1088{
1089    value &= (1 << 31);
1090    env->cp15.c9_pminten &= ~value;
1091}
1092
1093static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1094                       uint64_t value)
1095{
1096    /* Note that even though the AArch64 view of this register has bits
1097     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1098     * architectural requirements for bits which are RES0 only in some
1099     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1100     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1101     */
1102    raw_write(env, ri, value & ~0x1FULL);
1103}
1104
1105static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1106{
1107    /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1108     * For bits that vary between AArch32/64, code needs to check the
1109     * current execution mode before directly using the feature bit.
1110     */
1111    uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1112
1113    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1114        valid_mask &= ~SCR_HCE;
1115
1116        /* On ARMv7, SMD (or SCD as it is called in v7) is only
1117         * supported if EL2 exists. The bit is UNK/SBZP when
1118         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1119         * when EL2 is unavailable.
1120         * On ARMv8, this bit is always available.
1121         */
1122        if (arm_feature(env, ARM_FEATURE_V7) &&
1123            !arm_feature(env, ARM_FEATURE_V8)) {
1124            valid_mask &= ~SCR_SMD;
1125        }
1126    }
1127
1128    /* Clear all-context RES0 bits.  */
1129    value &= valid_mask;
1130    raw_write(env, ri, value);
1131}
1132
1133static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1134{
1135    ARMCPU *cpu = arm_env_get_cpu(env);
1136
1137    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1138     * bank
1139     */
1140    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1141                                        ri->secure & ARM_CP_SECSTATE_S);
1142
1143    return cpu->ccsidr[index];
1144}
1145
1146static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1147                         uint64_t value)
1148{
1149    raw_write(env, ri, value & 0xf);
1150}
1151
1152static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1153{
1154    CPUState *cs = ENV_GET_CPU(env);
1155    uint64_t ret = 0;
1156
1157    if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1158        ret |= CPSR_I;
1159    }
1160    if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1161        ret |= CPSR_F;
1162    }
1163    /* External aborts are not possible in QEMU so A bit is always clear */
1164    return ret;
1165}
1166
1167static const ARMCPRegInfo v7_cp_reginfo[] = {
1168    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1169    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1170      .access = PL1_W, .type = ARM_CP_NOP },
1171    /* Performance monitors are implementation defined in v7,
1172     * but with an ARM recommended set of registers, which we
1173     * follow (although we don't actually implement any counters)
1174     *
1175     * Performance registers fall into three categories:
1176     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1177     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1178     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1179     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1180     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1181     */
1182    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1183      .access = PL0_RW, .type = ARM_CP_ALIAS,
1184      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1185      .writefn = pmcntenset_write,
1186      .accessfn = pmreg_access,
1187      .raw_writefn = raw_write },
1188    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1189      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1190      .access = PL0_RW, .accessfn = pmreg_access,
1191      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1192      .writefn = pmcntenset_write, .raw_writefn = raw_write },
1193    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1194      .access = PL0_RW,
1195      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1196      .accessfn = pmreg_access,
1197      .writefn = pmcntenclr_write,
1198      .type = ARM_CP_ALIAS },
1199    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1200      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1201      .access = PL0_RW, .accessfn = pmreg_access,
1202      .type = ARM_CP_ALIAS,
1203      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1204      .writefn = pmcntenclr_write },
1205    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1206      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1207      .accessfn = pmreg_access,
1208      .writefn = pmovsr_write,
1209      .raw_writefn = raw_write },
1210    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1211      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1212      .access = PL0_RW, .accessfn = pmreg_access,
1213      .type = ARM_CP_ALIAS,
1214      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1215      .writefn = pmovsr_write,
1216      .raw_writefn = raw_write },
1217    /* Unimplemented so WI. */
1218    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1219      .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
1220    /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
1221     * We choose to RAZ/WI.
1222     */
1223    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1224      .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1225      .accessfn = pmreg_access },
1226#ifndef CONFIG_USER_ONLY
1227    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1228      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1229      .readfn = pmccntr_read, .writefn = pmccntr_write32,
1230      .accessfn = pmreg_access },
1231    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1232      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1233      .access = PL0_RW, .accessfn = pmreg_access,
1234      .type = ARM_CP_IO,
1235      .readfn = pmccntr_read, .writefn = pmccntr_write, },
1236#endif
1237    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1238      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1239      .writefn = pmccfiltr_write,
1240      .access = PL0_RW, .accessfn = pmreg_access,
1241      .type = ARM_CP_IO,
1242      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1243      .resetvalue = 0, },
1244    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1245      .access = PL0_RW,
1246      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
1247      .accessfn = pmreg_access, .writefn = pmxevtyper_write,
1248      .raw_writefn = raw_write },
1249    /* Unimplemented, RAZ/WI. */
1250    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1251      .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1252      .accessfn = pmreg_access },
1253    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1254      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1255      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1256      .resetvalue = 0,
1257      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1258    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1259      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1260      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1261      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1262      .resetvalue = 0,
1263      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1264    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1265      .access = PL1_RW, .accessfn = access_tpm,
1266      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1267      .resetvalue = 0,
1268      .writefn = pmintenset_write, .raw_writefn = raw_write },
1269    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1270      .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1271      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1272      .writefn = pmintenclr_write, },
1273    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1274      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1275      .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1276      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1277      .writefn = pmintenclr_write },
1278    { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
1279      .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
1280      .access = PL1_RW, .writefn = vbar_write,
1281      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
1282                             offsetof(CPUARMState, cp15.vbar_ns) },
1283      .resetvalue = 0 },
1284    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1285      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1286      .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1287    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1288      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1289      .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1290      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1291                             offsetof(CPUARMState, cp15.csselr_ns) } },
1292    /* Auxiliary ID register: this actually has an IMPDEF value but for now
1293     * just RAZ for all cores:
1294     */
1295    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1296      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1297      .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1298    /* Auxiliary fault status registers: these also are IMPDEF, and we
1299     * choose to RAZ/WI for all cores.
1300     */
1301    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1302      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1303      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1304    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1305      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1306      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1307    /* MAIR can just read-as-written because we don't implement caches
1308     * and so don't need to care about memory attributes.
1309     */
1310    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1311      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1312      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1313      .resetvalue = 0 },
1314    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1315      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1316      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1317      .resetvalue = 0 },
1318    /* For non-long-descriptor page tables these are PRRR and NMRR;
1319     * regardless they still act as reads-as-written for QEMU.
1320     */
1321     /* MAIR0/1 are defined separately from their 64-bit counterpart which
1322      * allows them to assign the correct fieldoffset based on the endianness
1323      * handled in the field definitions.
1324      */
1325    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1326      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1327      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1328                             offsetof(CPUARMState, cp15.mair0_ns) },
1329      .resetfn = arm_cp_reset_ignore },
1330    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1331      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1332      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1333                             offsetof(CPUARMState, cp15.mair1_ns) },
1334      .resetfn = arm_cp_reset_ignore },
1335    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1336      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1337      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1338    /* 32 bit ITLB invalidates */
1339    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1340      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1341    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1342      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1343    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1344      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1345    /* 32 bit DTLB invalidates */
1346    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1347      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1348    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1349      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1350    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1351      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1352    /* 32 bit TLB invalidates */
1353    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1354      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1355    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1356      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1357    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1358      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1359    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1360      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1361    REGINFO_SENTINEL
1362};
1363
1364static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1365    /* 32 bit TLB invalidates, Inner Shareable */
1366    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1367      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1368    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1369      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1370    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1371      .type = ARM_CP_NO_RAW, .access = PL1_W,
1372      .writefn = tlbiasid_is_write },
1373    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1374      .type = ARM_CP_NO_RAW, .access = PL1_W,
1375      .writefn = tlbimvaa_is_write },
1376    REGINFO_SENTINEL
1377};
1378
1379static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1380                        uint64_t value)
1381{
1382    value &= 1;
1383    env->teecr = value;
1384}
1385
1386static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1387                                    bool isread)
1388{
1389    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1390        return CP_ACCESS_TRAP;
1391    }
1392    return CP_ACCESS_OK;
1393}
1394
1395static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1396    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1397      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1398      .resetvalue = 0,
1399      .writefn = teecr_write },
1400    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1401      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1402      .accessfn = teehbr_access, .resetvalue = 0 },
1403    REGINFO_SENTINEL
1404};
1405
1406static const ARMCPRegInfo v6k_cp_reginfo[] = {
1407    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1408      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1409      .access = PL0_RW,
1410      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1411    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1412      .access = PL0_RW,
1413      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1414                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1415      .resetfn = arm_cp_reset_ignore },
1416    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1417      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1418      .access = PL0_R|PL1_W,
1419      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1420      .resetvalue = 0},
1421    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1422      .access = PL0_R|PL1_W,
1423      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1424                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1425      .resetfn = arm_cp_reset_ignore },
1426    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1427      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1428      .access = PL1_RW,
1429      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1430    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1431      .access = PL1_RW,
1432      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1433                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1434      .resetvalue = 0 },
1435    REGINFO_SENTINEL
1436};
1437
1438#ifndef CONFIG_USER_ONLY
1439
1440static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1441                                       bool isread)
1442{
1443    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1444     * Writable only at the highest implemented exception level.
1445     */
1446    int el = arm_current_el(env);
1447
1448    switch (el) {
1449    case 0:
1450        if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1451            return CP_ACCESS_TRAP;
1452        }
1453        break;
1454    case 1:
1455        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1456            arm_is_secure_below_el3(env)) {
1457            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1458            return CP_ACCESS_TRAP_UNCATEGORIZED;
1459        }
1460        break;
1461    case 2:
1462    case 3:
1463        break;
1464    }
1465
1466    if (!isread && el < arm_highest_el(env)) {
1467        return CP_ACCESS_TRAP_UNCATEGORIZED;
1468    }
1469
1470    return CP_ACCESS_OK;
1471}
1472
1473static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1474                                        bool isread)
1475{
1476    unsigned int cur_el = arm_current_el(env);
1477    bool secure = arm_is_secure(env);
1478
1479    /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1480    if (cur_el == 0 &&
1481        !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1482        return CP_ACCESS_TRAP;
1483    }
1484
1485    if (arm_feature(env, ARM_FEATURE_EL2) &&
1486        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1487        !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1488        return CP_ACCESS_TRAP_EL2;
1489    }
1490    return CP_ACCESS_OK;
1491}
1492
1493static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1494                                      bool isread)
1495{
1496    unsigned int cur_el = arm_current_el(env);
1497    bool secure = arm_is_secure(env);
1498
1499    /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1500     * EL0[PV]TEN is zero.
1501     */
1502    if (cur_el == 0 &&
1503        !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1504        return CP_ACCESS_TRAP;
1505    }
1506
1507    if (arm_feature(env, ARM_FEATURE_EL2) &&
1508        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1509        !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1510        return CP_ACCESS_TRAP_EL2;
1511    }
1512    return CP_ACCESS_OK;
1513}
1514
1515static CPAccessResult gt_pct_access(CPUARMState *env,
1516                                    const ARMCPRegInfo *ri,
1517                                    bool isread)
1518{
1519    return gt_counter_access(env, GTIMER_PHYS, isread);
1520}
1521
1522static CPAccessResult gt_vct_access(CPUARMState *env,
1523                                    const ARMCPRegInfo *ri,
1524                                    bool isread)
1525{
1526    return gt_counter_access(env, GTIMER_VIRT, isread);
1527}
1528
1529static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1530                                       bool isread)
1531{
1532    return gt_timer_access(env, GTIMER_PHYS, isread);
1533}
1534
1535static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1536                                       bool isread)
1537{
1538    return gt_timer_access(env, GTIMER_VIRT, isread);
1539}
1540
1541static CPAccessResult gt_stimer_access(CPUARMState *env,
1542                                       const ARMCPRegInfo *ri,
1543                                       bool isread)
1544{
1545    /* The AArch64 register view of the secure physical timer is
1546     * always accessible from EL3, and configurably accessible from
1547     * Secure EL1.
1548     */
1549    switch (arm_current_el(env)) {
1550    case 1:
1551        if (!arm_is_secure(env)) {
1552            return CP_ACCESS_TRAP;
1553        }
1554        if (!(env->cp15.scr_el3 & SCR_ST)) {
1555            return CP_ACCESS_TRAP_EL3;
1556        }
1557        return CP_ACCESS_OK;
1558    case 0:
1559    case 2:
1560        return CP_ACCESS_TRAP;
1561    case 3:
1562        return CP_ACCESS_OK;
1563    default:
1564        g_assert_not_reached();
1565    }
1566}
1567
1568static uint64_t gt_get_countervalue(CPUARMState *env)
1569{
1570    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1571}
1572
1573static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1574{
1575    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1576
1577    if (gt->ctl & 1) {
1578        /* Timer enabled: calculate and set current ISTATUS, irq, and
1579         * reset timer to when ISTATUS next has to change
1580         */
1581        uint64_t offset = timeridx == GTIMER_VIRT ?
1582                                      cpu->env.cp15.cntvoff_el2 : 0;
1583        uint64_t count = gt_get_countervalue(&cpu->env);
1584        /* Note that this must be unsigned 64 bit arithmetic: */
1585        int istatus = count - offset >= gt->cval;
1586        uint64_t nexttick;
1587
1588        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1589        qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1590                     (istatus && !(gt->ctl & 2)));
1591        if (istatus) {
1592            /* Next transition is when count rolls back over to zero */
1593            nexttick = UINT64_MAX;
1594        } else {
1595            /* Next transition is when we hit cval */
1596            nexttick = gt->cval + offset;
1597        }
1598        /* Note that the desired next expiry time might be beyond the
1599         * signed-64-bit range of a QEMUTimer -- in this case we just
1600         * set the timer for as far in the future as possible. When the
1601         * timer expires we will reset the timer for any remaining period.
1602         */
1603        if (nexttick > INT64_MAX / GTIMER_SCALE) {
1604            nexttick = INT64_MAX / GTIMER_SCALE;
1605        }
1606        timer_mod(cpu->gt_timer[timeridx], nexttick);
1607    } else {
1608        /* Timer disabled: ISTATUS and timer output always clear */
1609        gt->ctl &= ~4;
1610        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1611        timer_del(cpu->gt_timer[timeridx]);
1612    }
1613}
1614
1615static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1616                           int timeridx)
1617{
1618    ARMCPU *cpu = arm_env_get_cpu(env);
1619
1620    timer_del(cpu->gt_timer[timeridx]);
1621}
1622
1623static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1624{
1625    return gt_get_countervalue(env);
1626}
1627
1628static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1629{
1630    return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1631}
1632
1633static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1634                          int timeridx,
1635                          uint64_t value)
1636{
1637    env->cp15.c14_timer[timeridx].cval = value;
1638    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1639}
1640
1641static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1642                             int timeridx)
1643{
1644    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1645
1646    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1647                      (gt_get_countervalue(env) - offset));
1648}
1649
1650static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1651                          int timeridx,
1652                          uint64_t value)
1653{
1654    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1655
1656    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1657                                         sextract64(value, 0, 32);
1658    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1659}
1660
1661static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1662                         int timeridx,
1663                         uint64_t value)
1664{
1665    ARMCPU *cpu = arm_env_get_cpu(env);
1666    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1667
1668    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1669    if ((oldval ^ value) & 1) {
1670        /* Enable toggled */
1671        gt_recalc_timer(cpu, timeridx);
1672    } else if ((oldval ^ value) & 2) {
1673        /* IMASK toggled: don't need to recalculate,
1674         * just set the interrupt line based on ISTATUS
1675         */
1676        qemu_set_irq(cpu->gt_timer_outputs[timeridx],
1677                     (oldval & 4) && !(value & 2));
1678    }
1679}
1680
1681static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1682{
1683    gt_timer_reset(env, ri, GTIMER_PHYS);
1684}
1685
1686static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1687                               uint64_t value)
1688{
1689    gt_cval_write(env, ri, GTIMER_PHYS, value);
1690}
1691
1692static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1693{
1694    return gt_tval_read(env, ri, GTIMER_PHYS);
1695}
1696
1697static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1698                               uint64_t value)
1699{
1700    gt_tval_write(env, ri, GTIMER_PHYS, value);
1701}
1702
1703static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1704                              uint64_t value)
1705{
1706    gt_ctl_write(env, ri, GTIMER_PHYS, value);
1707}
1708
1709static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1710{
1711    gt_timer_reset(env, ri, GTIMER_VIRT);
1712}
1713
1714static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1715                               uint64_t value)
1716{
1717    gt_cval_write(env, ri, GTIMER_VIRT, value);
1718}
1719
1720static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1721{
1722    return gt_tval_read(env, ri, GTIMER_VIRT);
1723}
1724
1725static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1726                               uint64_t value)
1727{
1728    gt_tval_write(env, ri, GTIMER_VIRT, value);
1729}
1730
1731static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1732                              uint64_t value)
1733{
1734    gt_ctl_write(env, ri, GTIMER_VIRT, value);
1735}
1736
1737static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1738                              uint64_t value)
1739{
1740    ARMCPU *cpu = arm_env_get_cpu(env);
1741
1742    raw_write(env, ri, value);
1743    gt_recalc_timer(cpu, GTIMER_VIRT);
1744}
1745
1746static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1747{
1748    gt_timer_reset(env, ri, GTIMER_HYP);
1749}
1750
1751static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1752                              uint64_t value)
1753{
1754    gt_cval_write(env, ri, GTIMER_HYP, value);
1755}
1756
1757static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1758{
1759    return gt_tval_read(env, ri, GTIMER_HYP);
1760}
1761
1762static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1763                              uint64_t value)
1764{
1765    gt_tval_write(env, ri, GTIMER_HYP, value);
1766}
1767
1768static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1769                              uint64_t value)
1770{
1771    gt_ctl_write(env, ri, GTIMER_HYP, value);
1772}
1773
1774static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1775{
1776    gt_timer_reset(env, ri, GTIMER_SEC);
1777}
1778
1779static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1780                              uint64_t value)
1781{
1782    gt_cval_write(env, ri, GTIMER_SEC, value);
1783}
1784
1785static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1786{
1787    return gt_tval_read(env, ri, GTIMER_SEC);
1788}
1789
1790static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1791                              uint64_t value)
1792{
1793    gt_tval_write(env, ri, GTIMER_SEC, value);
1794}
1795
1796static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1797                              uint64_t value)
1798{
1799    gt_ctl_write(env, ri, GTIMER_SEC, value);
1800}
1801
1802void arm_gt_ptimer_cb(void *opaque)
1803{
1804    ARMCPU *cpu = opaque;
1805
1806    gt_recalc_timer(cpu, GTIMER_PHYS);
1807}
1808
1809void arm_gt_vtimer_cb(void *opaque)
1810{
1811    ARMCPU *cpu = opaque;
1812
1813    gt_recalc_timer(cpu, GTIMER_VIRT);
1814}
1815
1816void arm_gt_htimer_cb(void *opaque)
1817{
1818    ARMCPU *cpu = opaque;
1819
1820    gt_recalc_timer(cpu, GTIMER_HYP);
1821}
1822
1823void arm_gt_stimer_cb(void *opaque)
1824{
1825    ARMCPU *cpu = opaque;
1826
1827    gt_recalc_timer(cpu, GTIMER_SEC);
1828}
1829
1830static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1831    /* Note that CNTFRQ is purely reads-as-written for the benefit
1832     * of software; writing it doesn't actually change the timer frequency.
1833     * Our reset value matches the fixed frequency we implement the timer at.
1834     */
1835    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1836      .type = ARM_CP_ALIAS,
1837      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1838      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1839    },
1840    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1841      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1842      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1843      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1844      .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1845    },
1846    /* overall control: mostly access permissions */
1847    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1848      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1849      .access = PL1_RW,
1850      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1851      .resetvalue = 0,
1852    },
1853    /* per-timer control */
1854    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1855      .secure = ARM_CP_SECSTATE_NS,
1856      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1857      .accessfn = gt_ptimer_access,
1858      .fieldoffset = offsetoflow32(CPUARMState,
1859                                   cp15.c14_timer[GTIMER_PHYS].ctl),
1860      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1861    },
1862    { .name = "CNTP_CTL(S)",
1863      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1864      .secure = ARM_CP_SECSTATE_S,
1865      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1866      .accessfn = gt_ptimer_access,
1867      .fieldoffset = offsetoflow32(CPUARMState,
1868                                   cp15.c14_timer[GTIMER_SEC].ctl),
1869      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1870    },
1871    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1872      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1873      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1874      .accessfn = gt_ptimer_access,
1875      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1876      .resetvalue = 0,
1877      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1878    },
1879    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1880      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1881      .accessfn = gt_vtimer_access,
1882      .fieldoffset = offsetoflow32(CPUARMState,
1883                                   cp15.c14_timer[GTIMER_VIRT].ctl),
1884      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1885    },
1886    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1887      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1888      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1889      .accessfn = gt_vtimer_access,
1890      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1891      .resetvalue = 0,
1892      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1893    },
1894    /* TimerValue views: a 32 bit downcounting view of the underlying state */
1895    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1896      .secure = ARM_CP_SECSTATE_NS,
1897      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1898      .accessfn = gt_ptimer_access,
1899      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1900    },
1901    { .name = "CNTP_TVAL(S)",
1902      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1903      .secure = ARM_CP_SECSTATE_S,
1904      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1905      .accessfn = gt_ptimer_access,
1906      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1907    },
1908    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1909      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1910      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1911      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1912      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1913    },
1914    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1915      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1916      .accessfn = gt_vtimer_access,
1917      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1918    },
1919    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1920      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1921      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1922      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
1923      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1924    },
1925    /* The counter itself */
1926    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
1927      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1928      .accessfn = gt_pct_access,
1929      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
1930    },
1931    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
1932      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
1933      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1934      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
1935    },
1936    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
1937      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
1938      .accessfn = gt_vct_access,
1939      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
1940    },
1941    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
1942      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
1943      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
1944      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
1945    },
1946    /* Comparison value, indicating when the timer goes off */
1947    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
1948      .secure = ARM_CP_SECSTATE_NS,
1949      .access = PL1_RW | PL0_R,
1950      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1951      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1952      .accessfn = gt_ptimer_access,
1953      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1954    },
1955    { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
1956      .secure = ARM_CP_SECSTATE_S,
1957      .access = PL1_RW | PL0_R,
1958      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1959      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
1960      .accessfn = gt_ptimer_access,
1961      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
1962    },
1963    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1964      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
1965      .access = PL1_RW | PL0_R,
1966      .type = ARM_CP_IO,
1967      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
1968      .resetvalue = 0, .accessfn = gt_ptimer_access,
1969      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
1970    },
1971    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
1972      .access = PL1_RW | PL0_R,
1973      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
1974      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1975      .accessfn = gt_vtimer_access,
1976      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1977    },
1978    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
1979      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
1980      .access = PL1_RW | PL0_R,
1981      .type = ARM_CP_IO,
1982      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
1983      .resetvalue = 0, .accessfn = gt_vtimer_access,
1984      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
1985    },
1986    /* Secure timer -- this is actually restricted to only EL3
1987     * and configurably Secure-EL1 via the accessfn.
1988     */
1989    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
1990      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
1991      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
1992      .accessfn = gt_stimer_access,
1993      .readfn = gt_sec_tval_read,
1994      .writefn = gt_sec_tval_write,
1995      .resetfn = gt_sec_timer_reset,
1996    },
1997    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
1998      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
1999      .type = ARM_CP_IO, .access = PL1_RW,
2000      .accessfn = gt_stimer_access,
2001      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2002      .resetvalue = 0,
2003      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2004    },
2005    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2006      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2007      .type = ARM_CP_IO, .access = PL1_RW,
2008      .accessfn = gt_stimer_access,
2009      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2010      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2011    },
2012    REGINFO_SENTINEL
2013};
2014
2015#else
2016/* In user-mode none of the generic timer registers are accessible,
2017 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2018 * so instead just don't register any of them.
2019 */
2020static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2021    REGINFO_SENTINEL
2022};
2023
2024#endif
2025
2026static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2027{
2028    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2029        raw_write(env, ri, value);
2030    } else if (arm_feature(env, ARM_FEATURE_V7)) {
2031        raw_write(env, ri, value & 0xfffff6ff);
2032    } else {
2033        raw_write(env, ri, value & 0xfffff1ff);
2034    }
2035}
2036
2037#ifndef CONFIG_USER_ONLY
2038/* get_phys_addr() isn't present for user-mode-only targets */
2039
2040static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2041                                 bool isread)
2042{
2043    if (ri->opc2 & 4) {
2044        /* The ATS12NSO* operations must trap to EL3 if executed in
2045         * Secure EL1 (which can only happen if EL3 is AArch64).
2046         * They are simply UNDEF if executed from NS EL1.
2047         * They function normally from EL2 or EL3.
2048         */
2049        if (arm_current_el(env) == 1) {
2050            if (arm_is_secure_below_el3(env)) {
2051                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2052            }
2053            return CP_ACCESS_TRAP_UNCATEGORIZED;
2054        }
2055    }
2056    return CP_ACCESS_OK;
2057}
2058
2059static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2060                             int access_type, ARMMMUIdx mmu_idx)
2061{
2062    hwaddr phys_addr;
2063    target_ulong page_size;
2064    int prot;
2065    uint32_t fsr;
2066    bool ret;
2067    uint64_t par64;
2068    MemTxAttrs attrs = {};
2069    ARMMMUFaultInfo fi = {};
2070
2071    ret = get_phys_addr(env, value, access_type, mmu_idx,
2072                        &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2073    if (extended_addresses_enabled(env)) {
2074        /* fsr is a DFSR/IFSR value for the long descriptor
2075         * translation table format, but with WnR always clear.
2076         * Convert it to a 64-bit PAR.
2077         */
2078        par64 = (1 << 11); /* LPAE bit always set */
2079        if (!ret) {
2080            par64 |= phys_addr & ~0xfffULL;
2081            if (!attrs.secure) {
2082                par64 |= (1 << 9); /* NS */
2083            }
2084            /* We don't set the ATTR or SH fields in the PAR. */
2085        } else {
2086            par64 |= 1; /* F */
2087            par64 |= (fsr & 0x3f) << 1; /* FS */
2088            /* Note that S2WLK and FSTAGE are always zero, because we don't
2089             * implement virtualization and therefore there can't be a stage 2
2090             * fault.
2091             */
2092        }
2093    } else {
2094        /* fsr is a DFSR/IFSR value for the short descriptor
2095         * translation table format (with WnR always clear).
2096         * Convert it to a 32-bit PAR.
2097         */
2098        if (!ret) {
2099            /* We do not set any attribute bits in the PAR */
2100            if (page_size == (1 << 24)
2101                && arm_feature(env, ARM_FEATURE_V7)) {
2102                par64 = (phys_addr & 0xff000000) | (1 << 1);
2103            } else {
2104                par64 = phys_addr & 0xfffff000;
2105            }
2106            if (!attrs.secure) {
2107                par64 |= (1 << 9); /* NS */
2108            }
2109        } else {
2110            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2111                    ((fsr & 0xf) << 1) | 1;
2112        }
2113    }
2114    return par64;
2115}
2116
2117static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2118{
2119    int access_type = ri->opc2 & 1;
2120    uint64_t par64;
2121    ARMMMUIdx mmu_idx;
2122    int el = arm_current_el(env);
2123    bool secure = arm_is_secure_below_el3(env);
2124
2125    switch (ri->opc2 & 6) {
2126    case 0:
2127        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2128        switch (el) {
2129        case 3:
2130            mmu_idx = ARMMMUIdx_S1E3;
2131            break;
2132        case 2:
2133            mmu_idx = ARMMMUIdx_S1NSE1;
2134            break;
2135        case 1:
2136            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2137            break;
2138        default:
2139            g_assert_not_reached();
2140        }
2141        break;
2142    case 2:
2143        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2144        switch (el) {
2145        case 3:
2146            mmu_idx = ARMMMUIdx_S1SE0;
2147            break;
2148        case 2:
2149            mmu_idx = ARMMMUIdx_S1NSE0;
2150            break;
2151        case 1:
2152            mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2153            break;
2154        default:
2155            g_assert_not_reached();
2156        }
2157        break;
2158    case 4:
2159        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2160        mmu_idx = ARMMMUIdx_S12NSE1;
2161        break;
2162    case 6:
2163        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2164        mmu_idx = ARMMMUIdx_S12NSE0;
2165        break;
2166    default:
2167        g_assert_not_reached();
2168    }
2169
2170    par64 = do_ats_write(env, value, access_type, mmu_idx);
2171
2172    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2173}
2174
2175static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2176                        uint64_t value)
2177{
2178    int access_type = ri->opc2 & 1;
2179    uint64_t par64;
2180
2181    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2182
2183    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2184}
2185
2186static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2187                                     bool isread)
2188{
2189    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2190        return CP_ACCESS_TRAP;
2191    }
2192    return CP_ACCESS_OK;
2193}
2194
2195static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2196                        uint64_t value)
2197{
2198    int access_type = ri->opc2 & 1;
2199    ARMMMUIdx mmu_idx;
2200    int secure = arm_is_secure_below_el3(env);
2201
2202    switch (ri->opc2 & 6) {
2203    case 0:
2204        switch (ri->opc1) {
2205        case 0: /* AT S1E1R, AT S1E1W */
2206            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2207            break;
2208        case 4: /* AT S1E2R, AT S1E2W */
2209            mmu_idx = ARMMMUIdx_S1E2;
2210            break;
2211        case 6: /* AT S1E3R, AT S1E3W */
2212            mmu_idx = ARMMMUIdx_S1E3;
2213            break;
2214        default:
2215            g_assert_not_reached();
2216        }
2217        break;
2218    case 2: /* AT S1E0R, AT S1E0W */
2219        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2220        break;
2221    case 4: /* AT S12E1R, AT S12E1W */
2222        mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2223        break;
2224    case 6: /* AT S12E0R, AT S12E0W */
2225        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2226        break;
2227    default:
2228        g_assert_not_reached();
2229    }
2230
2231    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2232}
2233#endif
2234
2235static const ARMCPRegInfo vapa_cp_reginfo[] = {
2236    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2237      .access = PL1_RW, .resetvalue = 0,
2238      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2239                             offsetoflow32(CPUARMState, cp15.par_ns) },
2240      .writefn = par_write },
2241#ifndef CONFIG_USER_ONLY
2242    /* This underdecoding is safe because the reginfo is NO_RAW. */
2243    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2244      .access = PL1_W, .accessfn = ats_access,
2245      .writefn = ats_write, .type = ARM_CP_NO_RAW },
2246#endif
2247    REGINFO_SENTINEL
2248};
2249
2250/* Return basic MPU access permission bits.  */
2251static uint32_t simple_mpu_ap_bits(uint32_t val)
2252{
2253    uint32_t ret;
2254    uint32_t mask;
2255    int i;
2256    ret = 0;
2257    mask = 3;
2258    for (i = 0; i < 16; i += 2) {
2259        ret |= (val >> i) & mask;
2260        mask <<= 2;
2261    }
2262    return ret;
2263}
2264
2265/* Pad basic MPU access permission bits to extended format.  */
2266static uint32_t extended_mpu_ap_bits(uint32_t val)
2267{
2268    uint32_t ret;
2269    uint32_t mask;
2270    int i;
2271    ret = 0;
2272    mask = 3;
2273    for (i = 0; i < 16; i += 2) {
2274        ret |= (val & mask) << i;
2275        mask <<= 2;
2276    }
2277    return ret;
2278}
2279
2280static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2281                                 uint64_t value)
2282{
2283    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2284}
2285
2286static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2287{
2288    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2289}
2290
2291static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2292                                 uint64_t value)
2293{
2294    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2295}
2296
2297static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2298{
2299    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2300}
2301
2302static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2303{
2304    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2305
2306    if (!u32p) {
2307        return 0;
2308    }
2309
2310    u32p += env->cp15.c6_rgnr;
2311    return *u32p;
2312}
2313
2314static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2315                         uint64_t value)
2316{
2317    ARMCPU *cpu = arm_env_get_cpu(env);
2318    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2319
2320    if (!u32p) {
2321        return;
2322    }
2323
2324    u32p += env->cp15.c6_rgnr;
2325    tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
2326    *u32p = value;
2327}
2328
2329static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2330{
2331    ARMCPU *cpu = arm_env_get_cpu(env);
2332    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2333
2334    if (!u32p) {
2335        return;
2336    }
2337
2338    memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
2339}
2340
2341static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2342                              uint64_t value)
2343{
2344    ARMCPU *cpu = arm_env_get_cpu(env);
2345    uint32_t nrgs = cpu->pmsav7_dregion;
2346
2347    if (value >= nrgs) {
2348        qemu_log_mask(LOG_GUEST_ERROR,
2349                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2350                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2351        return;
2352    }
2353
2354    raw_write(env, ri, value);
2355}
2356
2357static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2358    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2359      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2360      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2361      .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2362    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2363      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2364      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2365      .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2366    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2367      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2368      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2369      .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
2370    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2371      .access = PL1_RW,
2372      .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
2373      .writefn = pmsav7_rgnr_write },
2374    REGINFO_SENTINEL
2375};
2376
2377static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2378    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2379      .access = PL1_RW, .type = ARM_CP_ALIAS,
2380      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2381      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2382    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2383      .access = PL1_RW, .type = ARM_CP_ALIAS,
2384      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2385      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2386    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2387      .access = PL1_RW,
2388      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2389      .resetvalue = 0, },
2390    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2391      .access = PL1_RW,
2392      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2393      .resetvalue = 0, },
2394    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2395      .access = PL1_RW,
2396      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2397    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2398      .access = PL1_RW,
2399      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2400    /* Protection region base and size registers */
2401    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2402      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2403      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2404    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2405      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2406      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2407    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2408      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2409      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2410    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2411      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2412      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2413    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2414      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2415      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2416    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2417      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2418      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2419    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2420      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2421      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2422    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2423      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2424      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2425    REGINFO_SENTINEL
2426};
2427
2428static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2429                                 uint64_t value)
2430{
2431    TCR *tcr = raw_ptr(env, ri);
2432    int maskshift = extract32(value, 0, 3);
2433
2434    if (!arm_feature(env, ARM_FEATURE_V8)) {
2435        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2436            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2437             * using Long-desciptor translation table format */
2438            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2439        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2440            /* In an implementation that includes the Security Extensions
2441             * TTBCR has additional fields PD0 [4] and PD1 [5] for
2442             * Short-descriptor translation table format.
2443             */
2444            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2445        } else {
2446            value &= TTBCR_N;
2447        }
2448    }
2449
2450    /* Update the masks corresponding to the TCR bank being written
2451     * Note that we always calculate mask and base_mask, but
2452     * they are only used for short-descriptor tables (ie if EAE is 0);
2453     * for long-descriptor tables the TCR fields are used differently
2454     * and the mask and base_mask values are meaningless.
2455     */
2456    tcr->raw_tcr = value;
2457    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2458    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2459}
2460
2461static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2462                             uint64_t value)
2463{
2464    ARMCPU *cpu = arm_env_get_cpu(env);
2465
2466    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2467        /* With LPAE the TTBCR could result in a change of ASID
2468         * via the TTBCR.A1 bit, so do a TLB flush.
2469         */
2470        tlb_flush(CPU(cpu), 1);
2471    }
2472    vmsa_ttbcr_raw_write(env, ri, value);
2473}
2474
2475static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2476{
2477    TCR *tcr = raw_ptr(env, ri);
2478
2479    /* Reset both the TCR as well as the masks corresponding to the bank of
2480     * the TCR being reset.
2481     */
2482    tcr->raw_tcr = 0;
2483    tcr->mask = 0;
2484    tcr->base_mask = 0xffffc000u;
2485}
2486
2487static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2488                               uint64_t value)
2489{
2490    ARMCPU *cpu = arm_env_get_cpu(env);
2491    TCR *tcr = raw_ptr(env, ri);
2492
2493    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2494    tlb_flush(CPU(cpu), 1);
2495    tcr->raw_tcr = value;
2496}
2497
2498static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2499                            uint64_t value)
2500{
2501    /* 64 bit accesses to the TTBRs can change the ASID and so we
2502     * must flush the TLB.
2503     */
2504    if (cpreg_field_is_64bit(ri)) {
2505        ARMCPU *cpu = arm_env_get_cpu(env);
2506
2507        tlb_flush(CPU(cpu), 1);
2508    }
2509    raw_write(env, ri, value);
2510}
2511
2512static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2513                        uint64_t value)
2514{
2515    ARMCPU *cpu = arm_env_get_cpu(env);
2516    CPUState *cs = CPU(cpu);
2517
2518    /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2519    if (raw_read(env, ri) != value) {
2520        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2521                            ARMMMUIdx_S2NS, -1);
2522        raw_write(env, ri, value);
2523    }
2524}
2525
2526static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2527    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2528      .access = PL1_RW, .type = ARM_CP_ALIAS,
2529      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2530                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2531    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2532      .access = PL1_RW, .resetvalue = 0,
2533      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2534                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2535    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2536      .access = PL1_RW, .resetvalue = 0,
2537      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2538                             offsetof(CPUARMState, cp15.dfar_ns) } },
2539    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2540      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2541      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2542      .resetvalue = 0, },
2543    REGINFO_SENTINEL
2544};
2545
2546static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2547    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2548      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2549      .access = PL1_RW,
2550      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2551    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2552      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2553      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2554      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2555                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
2556    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2557      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2558      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2559      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2560                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
2561    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2562      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2563      .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2564      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2565      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2566    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2567      .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2568      .raw_writefn = vmsa_ttbcr_raw_write,
2569      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2570                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2571    REGINFO_SENTINEL
2572};
2573
2574static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2575                                uint64_t value)
2576{
2577    env->cp15.c15_ticonfig = value & 0xe7;
2578    /* The OS_TYPE bit in this register changes the reported CPUID! */
2579    env->cp15.c0_cpuid = (value & (1 << 5)) ?
2580        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2581}
2582
2583static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2584                                uint64_t value)
2585{
2586    env->cp15.c15_threadid = value & 0xffff;
2587}
2588
2589static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2590                           uint64_t value)
2591{
2592    /* Wait-for-interrupt (deprecated) */
2593    cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2594}
2595
2596static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2597                                  uint64_t value)
2598{
2599    /* On OMAP there are registers indicating the max/min index of dcache lines
2600     * containing a dirty line; cache flush operations have to reset these.
2601     */
2602    env->cp15.c15_i_max = 0x000;
2603    env->cp15.c15_i_min = 0xff0;
2604}
2605
2606static const ARMCPRegInfo omap_cp_reginfo[] = {
2607    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2608      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2609      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2610      .resetvalue = 0, },
2611    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2612      .access = PL1_RW, .type = ARM_CP_NOP },
2613    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2614      .access = PL1_RW,
2615      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2616      .writefn = omap_ticonfig_write },
2617    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2618      .access = PL1_RW,
2619      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2620    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2621      .access = PL1_RW, .resetvalue = 0xff0,
2622      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2623    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2624      .access = PL1_RW,
2625      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2626      .writefn = omap_threadid_write },
2627    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2628      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2629      .type = ARM_CP_NO_RAW,
2630      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2631    /* TODO: Peripheral port remap register:
2632     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2633     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2634     * when MMU is off.
2635     */
2636    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2637      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2638      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2639      .writefn = omap_cachemaint_write },
2640    { .name = "C9", .cp = 15, .crn = 9,
2641      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2642      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2643    REGINFO_SENTINEL
2644};
2645
2646static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2647                              uint64_t value)
2648{
2649    env->cp15.c15_cpar = value & 0x3fff;
2650}
2651
2652static const ARMCPRegInfo xscale_cp_reginfo[] = {
2653    { .name = "XSCALE_CPAR",
2654      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2655      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2656      .writefn = xscale_cpar_write, },
2657    { .name = "XSCALE_AUXCR",
2658      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2659      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2660      .resetvalue = 0, },
2661    /* XScale specific cache-lockdown: since we have no cache we NOP these
2662     * and hope the guest does not really rely on cache behaviour.
2663     */
2664    { .name = "XSCALE_LOCK_ICACHE_LINE",
2665      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2666      .access = PL1_W, .type = ARM_CP_NOP },
2667    { .name = "XSCALE_UNLOCK_ICACHE",
2668      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2669      .access = PL1_W, .type = ARM_CP_NOP },
2670    { .name = "XSCALE_DCACHE_LOCK",
2671      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2672      .access = PL1_RW, .type = ARM_CP_NOP },
2673    { .name = "XSCALE_UNLOCK_DCACHE",
2674      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2675      .access = PL1_W, .type = ARM_CP_NOP },
2676    REGINFO_SENTINEL
2677};
2678
2679static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2680    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2681     * implementation of this implementation-defined space.
2682     * Ideally this should eventually disappear in favour of actually
2683     * implementing the correct behaviour for all cores.
2684     */
2685    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2686      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2687      .access = PL1_RW,
2688      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2689      .resetvalue = 0 },
2690    REGINFO_SENTINEL
2691};
2692
2693static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2694    /* Cache status: RAZ because we have no cache so it's always clean */
2695    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2696      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2697      .resetvalue = 0 },
2698    REGINFO_SENTINEL
2699};
2700
2701static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2702    /* We never have a a block transfer operation in progress */
2703    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2704      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2705      .resetvalue = 0 },
2706    /* The cache ops themselves: these all NOP for QEMU */
2707    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2708      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2709    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2710      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2711    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2712      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2713    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2714      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2715    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2716      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2717    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2718      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2719    REGINFO_SENTINEL
2720};
2721
2722static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2723    /* The cache test-and-clean instructions always return (1 << 30)
2724     * to indicate that there are no dirty cache lines.
2725     */
2726    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2727      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2728      .resetvalue = (1 << 30) },
2729    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2730      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2731      .resetvalue = (1 << 30) },
2732    REGINFO_SENTINEL
2733};
2734
2735static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2736    /* Ignore ReadBuffer accesses */
2737    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2738      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2739      .access = PL1_RW, .resetvalue = 0,
2740      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2741    REGINFO_SENTINEL
2742};
2743
2744static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2745{
2746    ARMCPU *cpu = arm_env_get_cpu(env);
2747    unsigned int cur_el = arm_current_el(env);
2748    bool secure = arm_is_secure(env);
2749
2750    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2751        return env->cp15.vpidr_el2;
2752    }
2753    return raw_read(env, ri);
2754}
2755
2756uint64_t mpidr_read_val(CPUARMState *env)
2757{
2758    ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2759    uint64_t mpidr = cpu->mp_affinity;
2760    unsigned int cur_el = arm_current_el(env);
2761    bool secure = arm_is_secure(env);
2762
2763    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2764        return env->cp15.vmpidr_el2;
2765    }
2766
2767    if (arm_feature(env, ARM_FEATURE_V7MP)) {
2768        mpidr |= (1U << 31);
2769        /* Cores which are uniprocessor (non-coherent)
2770         * but still implement the MP extensions set
2771         * bit 30. (For instance, Cortex-R5).
2772         */
2773        if (cpu->mp_is_up) {
2774            mpidr |= (1u << 30);
2775        }
2776    }
2777    return mpidr;
2778}
2779
2780static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2781{
2782    unsigned int cur_el = arm_current_el(env);
2783    bool secure = arm_is_secure(env);
2784
2785    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2786        return env->cp15.vmpidr_el2;
2787    }
2788    return mpidr_read_val(env);
2789}
2790
2791static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2792    { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2793      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2794      .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2795    REGINFO_SENTINEL
2796};
2797
2798static const ARMCPRegInfo lpae_cp_reginfo[] = {
2799    /* NOP AMAIR0/1 */
2800    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2801      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2802      .access = PL1_RW, .type = ARM_CP_CONST,
2803      .resetvalue = 0 },
2804    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2805    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2806      .access = PL1_RW, .type = ARM_CP_CONST,
2807      .resetvalue = 0 },
2808    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2809      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2810      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2811                             offsetof(CPUARMState, cp15.par_ns)} },
2812    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2813      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2814      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2815                             offsetof(CPUARMState, cp15.ttbr0_ns) },
2816      .writefn = vmsa_ttbr_write, },
2817    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2818      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2819      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2820                             offsetof(CPUARMState, cp15.ttbr1_ns) },
2821      .writefn = vmsa_ttbr_write, },
2822    REGINFO_SENTINEL
2823};
2824
2825static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2826{
2827    return vfp_get_fpcr(env);
2828}
2829
2830static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2831                            uint64_t value)
2832{
2833    vfp_set_fpcr(env, value);
2834}
2835
2836static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2837{
2838    return vfp_get_fpsr(env);
2839}
2840
2841static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2842                            uint64_t value)
2843{
2844    vfp_set_fpsr(env, value);
2845}
2846
2847static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2848                                       bool isread)
2849{
2850    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2851        return CP_ACCESS_TRAP;
2852    }
2853    return CP_ACCESS_OK;
2854}
2855
2856static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2857                            uint64_t value)
2858{
2859    env->daif = value & PSTATE_DAIF;
2860}
2861
2862static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2863                                          const ARMCPRegInfo *ri,
2864                                          bool isread)
2865{
2866    /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2867     * SCTLR_EL1.UCI is set.
2868     */
2869    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2870        return CP_ACCESS_TRAP;
2871    }
2872    return CP_ACCESS_OK;
2873}
2874
2875/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2876 * Page D4-1736 (DDI0487A.b)
2877 */
2878
2879static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2880                                    uint64_t value)
2881{
2882    ARMCPU *cpu = arm_env_get_cpu(env);
2883    CPUState *cs = CPU(cpu);
2884
2885    if (arm_is_secure_below_el3(env)) {
2886        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2887    } else {
2888        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2889    }
2890}
2891
2892static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2893                                      uint64_t value)
2894{
2895    bool sec = arm_is_secure_below_el3(env);
2896    CPUState *other_cs;
2897
2898    CPU_FOREACH(other_cs) {
2899        if (sec) {
2900            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2901        } else {
2902            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2903                                ARMMMUIdx_S12NSE0, -1);
2904        }
2905    }
2906}
2907
2908static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2909                                  uint64_t value)
2910{
2911    /* Note that the 'ALL' scope must invalidate both stage 1 and
2912     * stage 2 translations, whereas most other scopes only invalidate
2913     * stage 1 translations.
2914     */
2915    ARMCPU *cpu = arm_env_get_cpu(env);
2916    CPUState *cs = CPU(cpu);
2917
2918    if (arm_is_secure_below_el3(env)) {
2919        tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2920    } else {
2921        if (arm_feature(env, ARM_FEATURE_EL2)) {
2922            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
2923                                ARMMMUIdx_S2NS, -1);
2924        } else {
2925            tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
2926        }
2927    }
2928}
2929
2930static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
2931                                  uint64_t value)
2932{
2933    ARMCPU *cpu = arm_env_get_cpu(env);
2934    CPUState *cs = CPU(cpu);
2935
2936    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
2937}
2938
2939static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
2940                                  uint64_t value)
2941{
2942    ARMCPU *cpu = arm_env_get_cpu(env);
2943    CPUState *cs = CPU(cpu);
2944
2945    tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
2946}
2947
2948static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2949                                    uint64_t value)
2950{
2951    /* Note that the 'ALL' scope must invalidate both stage 1 and
2952     * stage 2 translations, whereas most other scopes only invalidate
2953     * stage 1 translations.
2954     */
2955    bool sec = arm_is_secure_below_el3(env);
2956    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
2957    CPUState *other_cs;
2958
2959    CPU_FOREACH(other_cs) {
2960        if (sec) {
2961            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
2962        } else if (has_el2) {
2963            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2964                                ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
2965        } else {
2966            tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
2967                                ARMMMUIdx_S12NSE0, -1);
2968        }
2969    }
2970}
2971
2972static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2973                                    uint64_t value)
2974{
2975    CPUState *other_cs;
2976
2977    CPU_FOREACH(other_cs) {
2978        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
2979    }
2980}
2981
2982static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2983                                    uint64_t value)
2984{
2985    CPUState *other_cs;
2986
2987    CPU_FOREACH(other_cs) {
2988        tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
2989    }
2990}
2991
2992static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2993                                 uint64_t value)
2994{
2995    /* Invalidate by VA, EL1&0 (AArch64 version).
2996     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
2997     * since we don't support flush-for-specific-ASID-only or
2998     * flush-last-level-only.
2999     */
3000    ARMCPU *cpu = arm_env_get_cpu(env);
3001    CPUState *cs = CPU(cpu);
3002    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3003
3004    if (arm_is_secure_below_el3(env)) {
3005        tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
3006                                 ARMMMUIdx_S1SE0, -1);
3007    } else {
3008        tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
3009                                 ARMMMUIdx_S12NSE0, -1);
3010    }
3011}
3012
3013static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3014                                 uint64_t value)
3015{
3016    /* Invalidate by VA, EL2
3017     * Currently handles both VAE2 and VALE2, since we don't support
3018     * flush-last-level-only.
3019     */
3020    ARMCPU *cpu = arm_env_get_cpu(env);
3021    CPUState *cs = CPU(cpu);
3022    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3023
3024    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
3025}
3026
3027static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3028                                 uint64_t value)
3029{
3030    /* Invalidate by VA, EL3
3031     * Currently handles both VAE3 and VALE3, since we don't support
3032     * flush-last-level-only.
3033     */
3034    ARMCPU *cpu = arm_env_get_cpu(env);
3035    CPUState *cs = CPU(cpu);
3036    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3037
3038    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
3039}
3040
3041static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3042                                   uint64_t value)
3043{
3044    bool sec = arm_is_secure_below_el3(env);
3045    CPUState *other_cs;
3046    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3047
3048    CPU_FOREACH(other_cs) {
3049        if (sec) {
3050            tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
3051                                     ARMMMUIdx_S1SE0, -1);
3052        } else {
3053            tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
3054                                     ARMMMUIdx_S12NSE0, -1);
3055        }
3056    }
3057}
3058
3059static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3060                                   uint64_t value)
3061{
3062    CPUState *other_cs;
3063    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3064
3065    CPU_FOREACH(other_cs) {
3066        tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
3067    }
3068}
3069
3070static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3071                                   uint64_t value)
3072{
3073    CPUState *other_cs;
3074    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3075
3076    CPU_FOREACH(other_cs) {
3077        tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
3078    }
3079}
3080
3081static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082                                    uint64_t value)
3083{
3084    /* Invalidate by IPA. This has to invalidate any structures that
3085     * contain only stage 2 translation information, but does not need
3086     * to apply to structures that contain combined stage 1 and stage 2
3087     * translation information.
3088     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3089     */
3090    ARMCPU *cpu = arm_env_get_cpu(env);
3091    CPUState *cs = CPU(cpu);
3092    uint64_t pageaddr;
3093
3094    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3095        return;
3096    }
3097
3098    pageaddr = sextract64(value << 12, 0, 48);
3099
3100    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
3101}
3102
3103static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3104                                      uint64_t value)
3105{
3106    CPUState *other_cs;
3107    uint64_t pageaddr;
3108
3109    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3110        return;
3111    }
3112
3113    pageaddr = sextract64(value << 12, 0, 48);
3114
3115    CPU_FOREACH(other_cs) {
3116        tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
3117    }
3118}
3119
3120static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3121                                      bool isread)
3122{
3123    /* We don't implement EL2, so the only control on DC ZVA is the
3124     * bit in the SCTLR which can prohibit access for EL0.
3125     */
3126    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3127        return CP_ACCESS_TRAP;
3128    }
3129    return CP_ACCESS_OK;
3130}
3131
3132static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3133{
3134    ARMCPU *cpu = arm_env_get_cpu(env);
3135    int dzp_bit = 1 << 4;
3136
3137    /* DZP indicates whether DC ZVA access is allowed */
3138    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3139        dzp_bit = 0;
3140    }
3141    return cpu->dcz_blocksize | dzp_bit;
3142}
3143
3144static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3145                                    bool isread)
3146{
3147    if (!(env->pstate & PSTATE_SP)) {
3148        /* Access to SP_EL0 is undefined if it's being used as
3149         * the stack pointer.
3150         */
3151        return CP_ACCESS_TRAP_UNCATEGORIZED;
3152    }
3153    return CP_ACCESS_OK;
3154}
3155
3156static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3157{
3158    return env->pstate & PSTATE_SP;
3159}
3160
3161static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3162{
3163    update_spsel(env, val);
3164}
3165
3166static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3167                        uint64_t value)
3168{
3169    ARMCPU *cpu = arm_env_get_cpu(env);
3170
3171    if (raw_read(env, ri) == value) {
3172        /* Skip the TLB flush if nothing actually changed; Linux likes
3173         * to do a lot of pointless SCTLR writes.
3174         */
3175        return;
3176    }
3177
3178    raw_write(env, ri, value);
3179    /* ??? Lots of these bits are not implemented.  */
3180    /* This may enable/disable the MMU, so do a TLB flush.  */
3181    tlb_flush(CPU(cpu), 1);
3182}
3183
3184static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3185                                     bool isread)
3186{
3187    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3188        return CP_ACCESS_TRAP_FP_EL2;
3189    }
3190    if (env->cp15.cptr_el[3] & CPTR_TFP) {
3191        return CP_ACCESS_TRAP_FP_EL3;
3192    }
3193    return CP_ACCESS_OK;
3194}
3195
3196static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3197                       uint64_t value)
3198{
3199    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3200}
3201
3202static const ARMCPRegInfo v8_cp_reginfo[] = {
3203    /* Minimal set of EL0-visible registers. This will need to be expanded
3204     * significantly for system emulation of AArch64 CPUs.
3205     */
3206    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3207      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3208      .access = PL0_RW, .type = ARM_CP_NZCV },
3209    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3210      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3211      .type = ARM_CP_NO_RAW,
3212      .access = PL0_RW, .accessfn = aa64_daif_access,
3213      .fieldoffset = offsetof(CPUARMState, daif),
3214      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3215    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3216      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3217      .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3218    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3219      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3220      .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3221    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3222      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3223      .access = PL0_R, .type = ARM_CP_NO_RAW,
3224      .readfn = aa64_dczid_read },
3225    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3226      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3227      .access = PL0_W, .type = ARM_CP_DC_ZVA,
3228#ifndef CONFIG_USER_ONLY
3229      /* Avoid overhead of an access check that always passes in user-mode */
3230      .accessfn = aa64_zva_access,
3231#endif
3232    },
3233    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3234      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3235      .access = PL1_R, .type = ARM_CP_CURRENTEL },
3236    /* Cache ops: all NOPs since we don't emulate caches */
3237    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3238      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3239      .access = PL1_W, .type = ARM_CP_NOP },
3240    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3241      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3242      .access = PL1_W, .type = ARM_CP_NOP },
3243    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3244      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3245      .access = PL0_W, .type = ARM_CP_NOP,
3246      .accessfn = aa64_cacheop_access },
3247    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3248      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3249      .access = PL1_W, .type = ARM_CP_NOP },
3250    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3251      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3252      .access = PL1_W, .type = ARM_CP_NOP },
3253    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3254      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3255      .access = PL0_W, .type = ARM_CP_NOP,
3256      .accessfn = aa64_cacheop_access },
3257    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3258      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3259      .access = PL1_W, .type = ARM_CP_NOP },
3260    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3261      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3262      .access = PL0_W, .type = ARM_CP_NOP,
3263      .accessfn = aa64_cacheop_access },
3264    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3265      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3266      .access = PL0_W, .type = ARM_CP_NOP,
3267      .accessfn = aa64_cacheop_access },
3268    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3269      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3270      .access = PL1_W, .type = ARM_CP_NOP },
3271    /* TLBI operations */
3272    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3273      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3274      .access = PL1_W, .type = ARM_CP_NO_RAW,
3275      .writefn = tlbi_aa64_vmalle1is_write },
3276    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3277      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3278      .access = PL1_W, .type = ARM_CP_NO_RAW,
3279      .writefn = tlbi_aa64_vae1is_write },
3280    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3281      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3282      .access = PL1_W, .type = ARM_CP_NO_RAW,
3283      .writefn = tlbi_aa64_vmalle1is_write },
3284    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3285      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3286      .access = PL1_W, .type = ARM_CP_NO_RAW,
3287      .writefn = tlbi_aa64_vae1is_write },
3288    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3289      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3290      .access = PL1_W, .type = ARM_CP_NO_RAW,
3291      .writefn = tlbi_aa64_vae1is_write },
3292    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3293      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3294      .access = PL1_W, .type = ARM_CP_NO_RAW,
3295      .writefn = tlbi_aa64_vae1is_write },
3296    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3297      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3298      .access = PL1_W, .type = ARM_CP_NO_RAW,
3299      .writefn = tlbi_aa64_vmalle1_write },
3300    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3301      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3302      .access = PL1_W, .type = ARM_CP_NO_RAW,
3303      .writefn = tlbi_aa64_vae1_write },
3304    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3305      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3306      .access = PL1_W, .type = ARM_CP_NO_RAW,
3307      .writefn = tlbi_aa64_vmalle1_write },
3308    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3309      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3310      .access = PL1_W, .type = ARM_CP_NO_RAW,
3311      .writefn = tlbi_aa64_vae1_write },
3312    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3313      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3314      .access = PL1_W, .type = ARM_CP_NO_RAW,
3315      .writefn = tlbi_aa64_vae1_write },
3316    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3317      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3318      .access = PL1_W, .type = ARM_CP_NO_RAW,
3319      .writefn = tlbi_aa64_vae1_write },
3320    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3321      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3322      .access = PL2_W, .type = ARM_CP_NO_RAW,
3323      .writefn = tlbi_aa64_ipas2e1is_write },
3324    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3325      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3326      .access = PL2_W, .type = ARM_CP_NO_RAW,
3327      .writefn = tlbi_aa64_ipas2e1is_write },
3328    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3329      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3330      .access = PL2_W, .type = ARM_CP_NO_RAW,
3331      .writefn = tlbi_aa64_alle1is_write },
3332    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3333      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3334      .access = PL2_W, .type = ARM_CP_NO_RAW,
3335      .writefn = tlbi_aa64_alle1is_write },
3336    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3337      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3338      .access = PL2_W, .type = ARM_CP_NO_RAW,
3339      .writefn = tlbi_aa64_ipas2e1_write },
3340    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3341      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3342      .access = PL2_W, .type = ARM_CP_NO_RAW,
3343      .writefn = tlbi_aa64_ipas2e1_write },
3344    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3345      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3346      .access = PL2_W, .type = ARM_CP_NO_RAW,
3347      .writefn = tlbi_aa64_alle1_write },
3348    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3349      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3350      .access = PL2_W, .type = ARM_CP_NO_RAW,
3351      .writefn = tlbi_aa64_alle1is_write },
3352#ifndef CONFIG_USER_ONLY
3353    /* 64 bit address translation operations */
3354    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3355      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3356      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3357    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3358      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3359      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3360    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3361      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3362      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3363    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3364      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3365      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3366    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3367      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3368      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3369    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3370      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3371      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3372    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3373      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3374      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3375    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3376      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3377      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3378    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3379    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3380      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3381      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3382    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3383      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3384      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3385    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3386      .type = ARM_CP_ALIAS,
3387      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3388      .access = PL1_RW, .resetvalue = 0,
3389      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3390      .writefn = par_write },
3391#endif
3392    /* TLB invalidate last level of translation table walk */
3393    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3394      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3395    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3396      .type = ARM_CP_NO_RAW, .access = PL1_W,
3397      .writefn = tlbimvaa_is_write },
3398    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3399      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3400    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3401      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3402    /* 32 bit cache operations */
3403    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3404      .type = ARM_CP_NOP, .access = PL1_W },
3405    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3406      .type = ARM_CP_NOP, .access = PL1_W },
3407    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3408      .type = ARM_CP_NOP, .access = PL1_W },
3409    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3410      .type = ARM_CP_NOP, .access = PL1_W },
3411    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3412      .type = ARM_CP_NOP, .access = PL1_W },
3413    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3414      .type = ARM_CP_NOP, .access = PL1_W },
3415    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3416      .type = ARM_CP_NOP, .access = PL1_W },
3417    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3418      .type = ARM_CP_NOP, .access = PL1_W },
3419    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3420      .type = ARM_CP_NOP, .access = PL1_W },
3421    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3422      .type = ARM_CP_NOP, .access = PL1_W },
3423    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3424      .type = ARM_CP_NOP, .access = PL1_W },
3425    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3426      .type = ARM_CP_NOP, .access = PL1_W },
3427    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3428      .type = ARM_CP_NOP, .access = PL1_W },
3429    /* MMU Domain access control / MPU write buffer control */
3430    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3431      .access = PL1_RW, .resetvalue = 0,
3432      .writefn = dacr_write, .raw_writefn = raw_write,
3433      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3434                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3435    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3436      .type = ARM_CP_ALIAS,
3437      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3438      .access = PL1_RW,
3439      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3440    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3441      .type = ARM_CP_ALIAS,
3442      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3443      .access = PL1_RW,
3444      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3445    /* We rely on the access checks not allowing the guest to write to the
3446     * state field when SPSel indicates that it's being used as the stack
3447     * pointer.
3448     */
3449    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3450      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3451      .access = PL1_RW, .accessfn = sp_el0_access,
3452      .type = ARM_CP_ALIAS,
3453      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3454    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3455      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3456      .access = PL2_RW, .type = ARM_CP_ALIAS,
3457      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3458    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3459      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3460      .type = ARM_CP_NO_RAW,
3461      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3462    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3463      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3464      .type = ARM_CP_ALIAS,
3465      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3466      .access = PL2_RW, .accessfn = fpexc32_access },
3467    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3468      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3469      .access = PL2_RW, .resetvalue = 0,
3470      .writefn = dacr_write, .raw_writefn = raw_write,
3471      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3472    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3473      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3474      .access = PL2_RW, .resetvalue = 0,
3475      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3476    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3477      .type = ARM_CP_ALIAS,
3478      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3479      .access = PL2_RW,
3480      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3481    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3482      .type = ARM_CP_ALIAS,
3483      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3484      .access = PL2_RW,
3485      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3486    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3487      .type = ARM_CP_ALIAS,
3488      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3489      .access = PL2_RW,
3490      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3491    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3492      .type = ARM_CP_ALIAS,
3493      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3494      .access = PL2_RW,
3495      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3496    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3497      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3498      .resetvalue = 0,
3499      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3500    { .name = "SDCR", .type = ARM_CP_ALIAS,
3501      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3502      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3503      .writefn = sdcr_write,
3504      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3505    REGINFO_SENTINEL
3506};
3507
3508/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3509static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3510    { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3511      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3512      .access = PL2_RW,
3513      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3514    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3515      .type = ARM_CP_NO_RAW,
3516      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3517      .access = PL2_RW,
3518      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3519    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3520      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3521      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3522    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3523      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3524      .access = PL2_RW, .type = ARM_CP_CONST,
3525      .resetvalue = 0 },
3526    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3527      .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3528      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3529    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3530      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3531      .access = PL2_RW, .type = ARM_CP_CONST,
3532      .resetvalue = 0 },
3533    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3534      .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3535      .access = PL2_RW, .type = ARM_CP_CONST,
3536      .resetvalue = 0 },
3537    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3538      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3539      .access = PL2_RW, .type = ARM_CP_CONST,
3540      .resetvalue = 0 },
3541    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3542      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3543      .access = PL2_RW, .type = ARM_CP_CONST,
3544      .resetvalue = 0 },
3545    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3546      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3547      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3548    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3549      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3550      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3551      .type = ARM_CP_CONST, .resetvalue = 0 },
3552    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3553      .cp = 15, .opc1 = 6, .crm = 2,
3554      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3555      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3556    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3557      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3558      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3559    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3560      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3561      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3562    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3563      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3564      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3565    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3566      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3567      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3568    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3569      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3570      .resetvalue = 0 },
3571    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3572      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3573      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3574    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3575      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3576      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3577    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3578      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3579      .resetvalue = 0 },
3580    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3581      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3582      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3583    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3584      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3585      .resetvalue = 0 },
3586    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3587      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3588      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3589    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3590      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3591      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3592    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3593      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3594      .access = PL2_RW, .accessfn = access_tda,
3595      .type = ARM_CP_CONST, .resetvalue = 0 },
3596    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3597      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3598      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3599      .type = ARM_CP_CONST, .resetvalue = 0 },
3600    { .name = "HSTR_EL2", .state = ARM_CP_STATE_AA64,
3601      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3602      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3603    REGINFO_SENTINEL
3604};
3605
3606static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3607{
3608    ARMCPU *cpu = arm_env_get_cpu(env);
3609    uint64_t valid_mask = HCR_MASK;
3610
3611    if (arm_feature(env, ARM_FEATURE_EL3)) {
3612        valid_mask &= ~HCR_HCD;
3613    } else {
3614        valid_mask &= ~HCR_TSC;
3615    }
3616
3617    /* Clear RES0 bits.  */
3618    value &= valid_mask;
3619
3620    /* These bits change the MMU setup:
3621     * HCR_VM enables stage 2 translation
3622     * HCR_PTW forbids certain page-table setups
3623     * HCR_DC Disables stage1 and enables stage2 translation
3624     */
3625    if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3626        tlb_flush(CPU(cpu), 1);
3627    }
3628    raw_write(env, ri, value);
3629}
3630
3631static const ARMCPRegInfo el2_cp_reginfo[] = {
3632    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3633      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3634      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3635      .writefn = hcr_write },
3636    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3637      .type = ARM_CP_ALIAS,
3638      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3639      .access = PL2_RW,
3640      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3641    { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3642      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3643      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3644    { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3645      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3646      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3647    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3648      .type = ARM_CP_ALIAS,
3649      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3650      .access = PL2_RW,
3651      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3652    { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3653      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3654      .access = PL2_RW, .writefn = vbar_write,
3655      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3656      .resetvalue = 0 },
3657    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3658      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3659      .access = PL3_RW, .type = ARM_CP_ALIAS,
3660      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3661    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3662      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3663      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3664      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3665    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3666      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3667      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3668      .resetvalue = 0 },
3669    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3670      .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3671      .access = PL2_RW, .type = ARM_CP_ALIAS,
3672      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3673    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3674      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3675      .access = PL2_RW, .type = ARM_CP_CONST,
3676      .resetvalue = 0 },
3677    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3678    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3679      .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3680      .access = PL2_RW, .type = ARM_CP_CONST,
3681      .resetvalue = 0 },
3682    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3683      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3684      .access = PL2_RW, .type = ARM_CP_CONST,
3685      .resetvalue = 0 },
3686    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3687      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3688      .access = PL2_RW, .type = ARM_CP_CONST,
3689      .resetvalue = 0 },
3690    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3691      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3692      .access = PL2_RW, .writefn = vmsa_tcr_el1_write,
3693      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3694      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3695    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3696      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3697      .type = ARM_CP_ALIAS,
3698      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3699      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3700    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3701      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3702      .access = PL2_RW,
3703      /* no .writefn needed as this can't cause an ASID change;
3704       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3705       */
3706      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3707    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3708      .cp = 15, .opc1 = 6, .crm = 2,
3709      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3710      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3711      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3712      .writefn = vttbr_write },
3713    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3714      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3715      .access = PL2_RW, .writefn = vttbr_write,
3716      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3717    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3718      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3719      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3720      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3721    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3722      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3723      .access = PL2_RW, .resetvalue = 0,
3724      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3725    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3726      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3727      .access = PL2_RW, .resetvalue = 0,
3728      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3729    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3730      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3731      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3732    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3733      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3734      .type = ARM_CP_NO_RAW, .access = PL2_W,
3735      .writefn = tlbi_aa64_alle2_write },
3736    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
3737      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3738      .type = ARM_CP_NO_RAW, .access = PL2_W,
3739      .writefn = tlbi_aa64_vae2_write },
3740    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
3741      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3742      .access = PL2_W, .type = ARM_CP_NO_RAW,
3743      .writefn = tlbi_aa64_vae2_write },
3744    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
3745      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3746      .access = PL2_W, .type = ARM_CP_NO_RAW,
3747      .writefn = tlbi_aa64_alle2is_write },
3748    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
3749      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3750      .type = ARM_CP_NO_RAW, .access = PL2_W,
3751      .writefn = tlbi_aa64_vae2is_write },
3752    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
3753      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3754      .access = PL2_W, .type = ARM_CP_NO_RAW,
3755      .writefn = tlbi_aa64_vae2is_write },
3756#ifndef CONFIG_USER_ONLY
3757    /* Unlike the other EL2-related AT operations, these must
3758     * UNDEF from EL3 if EL2 is not implemented, which is why we
3759     * define them here rather than with the rest of the AT ops.
3760     */
3761    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
3762      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3763      .access = PL2_W, .accessfn = at_s1e2_access,
3764      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3765    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
3766      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3767      .access = PL2_W, .accessfn = at_s1e2_access,
3768      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3769    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3770     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3771     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3772     * to behave as if SCR.NS was 1.
3773     */
3774    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3775      .access = PL2_W,
3776      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3777    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3778      .access = PL2_W,
3779      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3780    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3781      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3782      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3783       * reset values as IMPDEF. We choose to reset to 3 to comply with
3784       * both ARMv7 and ARMv8.
3785       */
3786      .access = PL2_RW, .resetvalue = 3,
3787      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
3788    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3789      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3790      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3791      .writefn = gt_cntvoff_write,
3792      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3793    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3794      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
3795      .writefn = gt_cntvoff_write,
3796      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3797    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3798      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3799      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3800      .type = ARM_CP_IO, .access = PL2_RW,
3801      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3802    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3803      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3804      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
3805      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3806    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3807      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3808      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
3809      .resetfn = gt_hyp_timer_reset,
3810      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
3811    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3812      .type = ARM_CP_IO,
3813      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3814      .access = PL2_RW,
3815      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
3816      .resetvalue = 0,
3817      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
3818#endif
3819    /* The only field of MDCR_EL2 that has a defined architectural reset value
3820     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
3821     * don't impelment any PMU event counters, so using zero as a reset
3822     * value for MDCR_EL2 is okay
3823     */
3824    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3825      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3826      .access = PL2_RW, .resetvalue = 0,
3827      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
3828    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
3829      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3830      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3831      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3832    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
3833      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3834      .access = PL2_RW,
3835      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3836    { .name = "HSTR_EL2", .state = ARM_CP_STATE_AA64,
3837      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3838      .access = PL2_RW,
3839      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore, },
3840    REGINFO_SENTINEL
3841};
3842
3843static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
3844                                   bool isread)
3845{
3846    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
3847     * At Secure EL1 it traps to EL3.
3848     */
3849    if (arm_current_el(env) == 3) {
3850        return CP_ACCESS_OK;
3851    }
3852    if (arm_is_secure_below_el3(env)) {
3853        return CP_ACCESS_TRAP_EL3;
3854    }
3855    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
3856    if (isread) {
3857        return CP_ACCESS_OK;
3858    }
3859    return CP_ACCESS_TRAP_UNCATEGORIZED;
3860}
3861
3862static const ARMCPRegInfo el3_cp_reginfo[] = {
3863    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
3864      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
3865      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
3866      .resetvalue = 0, .writefn = scr_write },
3867    { .name = "SCR",  .type = ARM_CP_ALIAS,
3868      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
3869      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3870      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
3871      .writefn = scr_write },
3872    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
3873      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
3874      .access = PL3_RW, .resetvalue = 0,
3875      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
3876    { .name = "SDER",
3877      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
3878      .access = PL3_RW, .resetvalue = 0,
3879      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
3880    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
3881      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3882      .writefn = vbar_write, .resetvalue = 0,
3883      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
3884    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
3885      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
3886      .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3887      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
3888    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
3889      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
3890      .access = PL3_RW, .writefn = vmsa_tcr_el1_write,
3891      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3892      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
3893    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
3894      .type = ARM_CP_ALIAS,
3895      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
3896      .access = PL3_RW,
3897      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
3898    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
3899      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
3900      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
3901    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
3902      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
3903      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
3904    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
3905      .type = ARM_CP_ALIAS,
3906      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
3907      .access = PL3_RW,
3908      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
3909    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
3910      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
3911      .access = PL3_RW, .writefn = vbar_write,
3912      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
3913      .resetvalue = 0 },
3914    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
3915      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
3916      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
3917      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
3918    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
3919      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
3920      .access = PL3_RW, .resetvalue = 0,
3921      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
3922    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
3923      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
3924      .access = PL3_RW, .type = ARM_CP_CONST,
3925      .resetvalue = 0 },
3926    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
3927      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
3928      .access = PL3_RW, .type = ARM_CP_CONST,
3929      .resetvalue = 0 },
3930    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
3931      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
3932      .access = PL3_RW, .type = ARM_CP_CONST,
3933      .resetvalue = 0 },
3934    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
3935      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
3936      .access = PL3_W, .type = ARM_CP_NO_RAW,
3937      .writefn = tlbi_aa64_alle3is_write },
3938    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
3939      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
3940      .access = PL3_W, .type = ARM_CP_NO_RAW,
3941      .writefn = tlbi_aa64_vae3is_write },
3942    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
3943      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
3944      .access = PL3_W, .type = ARM_CP_NO_RAW,
3945      .writefn = tlbi_aa64_vae3is_write },
3946    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
3947      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
3948      .access = PL3_W, .type = ARM_CP_NO_RAW,
3949      .writefn = tlbi_aa64_alle3_write },
3950    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
3951      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
3952      .access = PL3_W, .type = ARM_CP_NO_RAW,
3953      .writefn = tlbi_aa64_vae3_write },
3954    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
3955      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
3956      .access = PL3_W, .type = ARM_CP_NO_RAW,
3957      .writefn = tlbi_aa64_vae3_write },
3958    REGINFO_SENTINEL
3959};
3960
3961static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3962                                     bool isread)
3963{
3964    /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
3965     * but the AArch32 CTR has its own reginfo struct)
3966     */
3967    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
3968        return CP_ACCESS_TRAP;
3969    }
3970    return CP_ACCESS_OK;
3971}
3972
3973static void dcc_write(CPUARMState *env, const ARMCPRegInfo *ri,
3974                        uint64_t value)
3975{
3976    putchar(value);
3977}
3978
3979static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3980                        uint64_t value)
3981{
3982    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
3983     * read via a bit in OSLSR_EL1.
3984     */
3985    int oslock;
3986
3987    if (ri->state == ARM_CP_STATE_AA32) {
3988        oslock = (value == 0xC5ACCE55);
3989    } else {
3990        oslock = value & 1;
3991    }
3992
3993    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
3994}
3995
3996static const ARMCPRegInfo debug_cp_reginfo[] = {
3997    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
3998     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
3999     * unlike DBGDRAR it is never accessible from EL0.
4000     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4001     * accessor.
4002     */
4003    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4004      .access = PL0_R, .accessfn = access_tdra,
4005      .type = ARM_CP_CONST, .resetvalue = 0 },
4006    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4007      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4008      .access = PL1_R, .accessfn = access_tdra,
4009      .type = ARM_CP_CONST, .resetvalue = 0 },
4010    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4011      .access = PL0_R, .accessfn = access_tdra,
4012      .type = ARM_CP_CONST, .resetvalue = 0 },
4013    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4014    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4015      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4016      .access = PL1_RW, .accessfn = access_tda,
4017      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4018      .resetvalue = 0 },
4019    /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4020     * We don't implement the configurable EL0 access.
4021     */
4022    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4023      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4024      .type = ARM_CP_ALIAS,
4025      .access = PL1_R, .accessfn = access_tda,
4026      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4027    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4028      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4029      .access = PL1_W, .type = ARM_CP_NO_RAW,
4030      .accessfn = access_tdosa,
4031      .writefn = oslar_write },
4032    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4033      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4034      .access = PL1_R, .resetvalue = 10,
4035      .accessfn = access_tdosa,
4036      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4037    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4038    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4039      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4040      .access = PL1_RW, .accessfn = access_tdosa,
4041      .type = ARM_CP_NOP },
4042    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4043     * implement vector catch debug events yet.
4044     */
4045    { .name = "DBGVCR",
4046      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4047      .access = PL1_RW, .accessfn = access_tda,
4048      .type = ARM_CP_NOP },
4049    { .name = "DBGDTRTX_EL0", .state = ARM_CP_STATE_AA64,
4050      .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0,
4051      .access = PL0_W, .writefn = dcc_write, .type = ARM_CP_NO_RAW },
4052    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_AA64,
4053      .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 1, .opc2 = 0,
4054      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
4055    REGINFO_SENTINEL
4056};
4057
4058static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4059    /* 64 bit access versions of the (dummy) debug registers */
4060    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4061      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4062    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4063      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4064    REGINFO_SENTINEL
4065};
4066
4067void hw_watchpoint_update(ARMCPU *cpu, int n)
4068{
4069    CPUARMState *env = &cpu->env;
4070    vaddr len = 0;
4071    vaddr wvr = env->cp15.dbgwvr[n];
4072    uint64_t wcr = env->cp15.dbgwcr[n];
4073    int mask;
4074    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4075
4076    if (env->cpu_watchpoint[n]) {
4077        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4078        env->cpu_watchpoint[n] = NULL;
4079    }
4080
4081    if (!extract64(wcr, 0, 1)) {
4082        /* E bit clear : watchpoint disabled */
4083        return;
4084    }
4085
4086    switch (extract64(wcr, 3, 2)) {
4087    case 0:
4088        /* LSC 00 is reserved and must behave as if the wp is disabled */
4089        return;
4090    case 1:
4091        flags |= BP_MEM_READ;
4092        break;
4093    case 2:
4094        flags |= BP_MEM_WRITE;
4095        break;
4096    case 3:
4097        flags |= BP_MEM_ACCESS;
4098        break;
4099    }
4100
4101    /* Attempts to use both MASK and BAS fields simultaneously are
4102     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4103     * thus generating a watchpoint for every byte in the masked region.
4104     */
4105    mask = extract64(wcr, 24, 4);
4106    if (mask == 1 || mask == 2) {
4107        /* Reserved values of MASK; we must act as if the mask value was
4108         * some non-reserved value, or as if the watchpoint were disabled.
4109         * We choose the latter.
4110         */
4111        return;
4112    } else if (mask) {
4113        /* Watchpoint covers an aligned area up to 2GB in size */
4114        len = 1ULL << mask;
4115        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4116         * whether the watchpoint fires when the unmasked bits match; we opt
4117         * to generate the exceptions.
4118         */
4119        wvr &= ~(len - 1);
4120    } else {
4121        /* Watchpoint covers bytes defined by the byte address select bits */
4122        int bas = extract64(wcr, 5, 8);
4123        int basstart;
4124
4125        if (bas == 0) {
4126            /* This must act as if the watchpoint is disabled */
4127            return;
4128        }
4129
4130        if (extract64(wvr, 2, 1)) {
4131            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4132             * ignored, and BAS[3:0] define which bytes to watch.
4133             */
4134            bas &= 0xf;
4135        }
4136        /* The BAS bits are supposed to be programmed to indicate a contiguous
4137         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4138         * we fire for each byte in the word/doubleword addressed by the WVR.
4139         * We choose to ignore any non-zero bits after the first range of 1s.
4140         */
4141        basstart = ctz32(bas);
4142        len = cto32(bas >> basstart);
4143        wvr += basstart;
4144    }
4145
4146    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4147                          &env->cpu_watchpoint[n]);
4148}
4149
4150void hw_watchpoint_update_all(ARMCPU *cpu)
4151{
4152    int i;
4153    CPUARMState *env = &cpu->env;
4154
4155    /* Completely clear out existing QEMU watchpoints and our array, to
4156     * avoid possible stale entries following migration load.
4157     */
4158    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4159    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4160
4161    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4162        hw_watchpoint_update(cpu, i);
4163    }
4164}
4165
4166static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4167                         uint64_t value)
4168{
4169    ARMCPU *cpu = arm_env_get_cpu(env);
4170    int i = ri->crm;
4171
4172    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4173     * register reads and behaves as if values written are sign extended.
4174     * Bits [1:0] are RES0.
4175     */
4176    value = sextract64(value, 0, 49) & ~3ULL;
4177
4178    raw_write(env, ri, value);
4179    hw_watchpoint_update(cpu, i);
4180}
4181
4182static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4183                         uint64_t value)
4184{
4185    ARMCPU *cpu = arm_env_get_cpu(env);
4186    int i = ri->crm;
4187
4188    raw_write(env, ri, value);
4189    hw_watchpoint_update(cpu, i);
4190}
4191
4192void hw_breakpoint_update(ARMCPU *cpu, int n)
4193{
4194    CPUARMState *env = &cpu->env;
4195    uint64_t bvr = env->cp15.dbgbvr[n];
4196    uint64_t bcr = env->cp15.dbgbcr[n];
4197    vaddr addr;
4198    int bt;
4199    int flags = BP_CPU;
4200
4201    if (env->cpu_breakpoint[n]) {
4202        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4203        env->cpu_breakpoint[n] = NULL;
4204    }
4205
4206    if (!extract64(bcr, 0, 1)) {
4207        /* E bit clear : watchpoint disabled */
4208        return;
4209    }
4210
4211    bt = extract64(bcr, 20, 4);
4212
4213    switch (bt) {
4214    case 4: /* unlinked address mismatch (reserved if AArch64) */
4215    case 5: /* linked address mismatch (reserved if AArch64) */
4216        qemu_log_mask(LOG_UNIMP,
4217                      "arm: address mismatch breakpoint types not implemented");
4218        return;
4219    case 0: /* unlinked address match */
4220    case 1: /* linked address match */
4221    {
4222        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4223         * we behave as if the register was sign extended. Bits [1:0] are
4224         * RES0. The BAS field is used to allow setting breakpoints on 16
4225         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4226         * a bp will fire if the addresses covered by the bp and the addresses
4227         * covered by the insn overlap but the insn doesn't start at the
4228         * start of the bp address range. We choose to require the insn and
4229         * the bp to have the same address. The constraints on writing to
4230         * BAS enforced in dbgbcr_write mean we have only four cases:
4231         *  0b0000  => no breakpoint
4232         *  0b0011  => breakpoint on addr
4233         *  0b1100  => breakpoint on addr + 2
4234         *  0b1111  => breakpoint on addr
4235         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4236         */
4237        int bas = extract64(bcr, 5, 4);
4238        addr = sextract64(bvr, 0, 49) & ~3ULL;
4239        if (bas == 0) {
4240            return;
4241        }
4242        if (bas == 0xc) {
4243            addr += 2;
4244        }
4245        break;
4246    }
4247    case 2: /* unlinked context ID match */
4248    case 8: /* unlinked VMID match (reserved if no EL2) */
4249    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4250        qemu_log_mask(LOG_UNIMP,
4251                      "arm: unlinked context breakpoint types not implemented");
4252        return;
4253    case 9: /* linked VMID match (reserved if no EL2) */
4254    case 11: /* linked context ID and VMID match (reserved if no EL2) */
4255    case 3: /* linked context ID match */
4256    default:
4257        /* We must generate no events for Linked context matches (unless
4258         * they are linked to by some other bp/wp, which is handled in
4259         * updates for the linking bp/wp). We choose to also generate no events
4260         * for reserved values.
4261         */
4262        return;
4263    }
4264
4265    cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4266}
4267
4268void hw_breakpoint_update_all(ARMCPU *cpu)
4269{
4270    int i;
4271    CPUARMState *env = &cpu->env;
4272
4273    /* Completely clear out existing QEMU breakpoints and our array, to
4274     * avoid possible stale entries following migration load.
4275     */
4276    cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4277    memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4278
4279    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4280        hw_breakpoint_update(cpu, i);
4281    }
4282}
4283
4284static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4285                         uint64_t value)
4286{
4287    ARMCPU *cpu = arm_env_get_cpu(env);
4288    int i = ri->crm;
4289
4290    raw_write(env, ri, value);
4291    hw_breakpoint_update(cpu, i);
4292}
4293
4294static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4295                         uint64_t value)
4296{
4297    ARMCPU *cpu = arm_env_get_cpu(env);
4298    int i = ri->crm;
4299
4300    /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4301     * copy of BAS[0].
4302     */
4303    value = deposit64(value, 6, 1, extract64(value, 5, 1));
4304    value = deposit64(value, 8, 1, extract64(value, 7, 1));
4305
4306    raw_write(env, ri, value);
4307    hw_breakpoint_update(cpu, i);
4308}
4309
4310static void define_debug_regs(ARMCPU *cpu)
4311{
4312    /* Define v7 and v8 architectural debug registers.
4313     * These are just dummy implementations for now.
4314     */
4315    int i;
4316    int wrps, brps, ctx_cmps;
4317    ARMCPRegInfo dbgdidr = {
4318        .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4319        .access = PL0_R, .accessfn = access_tda,
4320        .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4321    };
4322
4323    /* Note that all these register fields hold "number of Xs minus 1". */
4324    brps = extract32(cpu->dbgdidr, 24, 4);
4325    wrps = extract32(cpu->dbgdidr, 28, 4);
4326    ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4327
4328    assert(ctx_cmps <= brps);
4329
4330    /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4331     * of the debug registers such as number of breakpoints;
4332     * check that if they both exist then they agree.
4333     */
4334    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4335        assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4336        assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4337        assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4338    }
4339
4340    define_one_arm_cp_reg(cpu, &dbgdidr);
4341    define_arm_cp_regs(cpu, debug_cp_reginfo);
4342
4343    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4344        define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4345    }
4346
4347    for (i = 0; i < brps + 1; i++) {
4348        ARMCPRegInfo dbgregs[] = {
4349            { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4350              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4351              .access = PL1_RW, .accessfn = access_tda,
4352              .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4353              .writefn = dbgbvr_write, .raw_writefn = raw_write
4354            },
4355            { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4356              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4357              .access = PL1_RW, .accessfn = access_tda,
4358              .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4359              .writefn = dbgbcr_write, .raw_writefn = raw_write
4360            },
4361            REGINFO_SENTINEL
4362        };
4363        define_arm_cp_regs(cpu, dbgregs);
4364    }
4365
4366    for (i = 0; i < wrps + 1; i++) {
4367        ARMCPRegInfo dbgregs[] = {
4368            { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4369              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4370              .access = PL1_RW, .accessfn = access_tda,
4371              .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4372              .writefn = dbgwvr_write, .raw_writefn = raw_write
4373            },
4374            { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4375              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4376              .access = PL1_RW, .accessfn = access_tda,
4377              .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4378              .writefn = dbgwcr_write, .raw_writefn = raw_write
4379            },
4380            REGINFO_SENTINEL
4381        };
4382        define_arm_cp_regs(cpu, dbgregs);
4383    }
4384}
4385
4386void register_cp_regs_for_features(ARMCPU *cpu)
4387{
4388    /* Register all the coprocessor registers based on feature bits */
4389    CPUARMState *env = &cpu->env;
4390    if (arm_feature(env, ARM_FEATURE_M)) {
4391        /* M profile has no coprocessor registers */
4392        return;
4393    }
4394
4395    define_arm_cp_regs(cpu, cp_reginfo);
4396    if (!arm_feature(env, ARM_FEATURE_V8)) {
4397        /* Must go early as it is full of wildcards that may be
4398         * overridden by later definitions.
4399         */
4400        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4401    }
4402
4403    if (arm_feature(env, ARM_FEATURE_V6)) {
4404        /* The ID registers all have impdef reset values */
4405        ARMCPRegInfo v6_idregs[] = {
4406            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4407              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4408              .access = PL1_R, .type = ARM_CP_CONST,
4409              .resetvalue = cpu->id_pfr0 },
4410            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4411              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4412              .access = PL1_R, .type = ARM_CP_CONST,
4413              .resetvalue = cpu->id_pfr1 },
4414            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4415              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4416              .access = PL1_R, .type = ARM_CP_CONST,
4417              .resetvalue = cpu->id_dfr0 },
4418            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4419              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4420              .access = PL1_R, .type = ARM_CP_CONST,
4421              .resetvalue = cpu->id_afr0 },
4422            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4423              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4424              .access = PL1_R, .type = ARM_CP_CONST,
4425              .resetvalue = cpu->id_mmfr0 },
4426            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4427              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4428              .access = PL1_R, .type = ARM_CP_CONST,
4429              .resetvalue = cpu->id_mmfr1 },
4430            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4431              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4432              .access = PL1_R, .type = ARM_CP_CONST,
4433              .resetvalue = cpu->id_mmfr2 },
4434            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4435              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4436              .access = PL1_R, .type = ARM_CP_CONST,
4437              .resetvalue = cpu->id_mmfr3 },
4438            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4439              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4440              .access = PL1_R, .type = ARM_CP_CONST,
4441              .resetvalue = cpu->id_isar0 },
4442            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4443              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4444              .access = PL1_R, .type = ARM_CP_CONST,
4445              .resetvalue = cpu->id_isar1 },
4446            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4447              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4448              .access = PL1_R, .type = ARM_CP_CONST,
4449              .resetvalue = cpu->id_isar2 },
4450            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4451              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4452              .access = PL1_R, .type = ARM_CP_CONST,
4453              .resetvalue = cpu->id_isar3 },
4454            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4455              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4456              .access = PL1_R, .type = ARM_CP_CONST,
4457              .resetvalue = cpu->id_isar4 },
4458            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4459              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4460              .access = PL1_R, .type = ARM_CP_CONST,
4461              .resetvalue = cpu->id_isar5 },
4462            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4463              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4464              .access = PL1_R, .type = ARM_CP_CONST,
4465              .resetvalue = cpu->id_mmfr4 },
4466            /* 7 is as yet unallocated and must RAZ */
4467            { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
4468              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4469              .access = PL1_R, .type = ARM_CP_CONST,
4470              .resetvalue = 0 },
4471            REGINFO_SENTINEL
4472        };
4473        define_arm_cp_regs(cpu, v6_idregs);
4474        define_arm_cp_regs(cpu, v6_cp_reginfo);
4475    } else {
4476        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4477    }
4478    if (arm_feature(env, ARM_FEATURE_V6K)) {
4479        define_arm_cp_regs(cpu, v6k_cp_reginfo);
4480    }
4481    if (arm_feature(env, ARM_FEATURE_V7MP) &&
4482        !arm_feature(env, ARM_FEATURE_MPU)) {
4483        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4484    }
4485    if (arm_feature(env, ARM_FEATURE_V7)) {
4486        /* v7 performance monitor control register: same implementor
4487         * field as main ID register, and we implement only the cycle
4488         * count register.
4489         */
4490#ifndef CONFIG_USER_ONLY
4491        ARMCPRegInfo pmcr = {
4492            .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4493            .access = PL0_RW,
4494            .type = ARM_CP_IO | ARM_CP_ALIAS,
4495            .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4496            .accessfn = pmreg_access, .writefn = pmcr_write,
4497            .raw_writefn = raw_write,
4498        };
4499        ARMCPRegInfo pmcr64 = {
4500            .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4501            .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4502            .access = PL0_RW, .accessfn = pmreg_access,
4503            .type = ARM_CP_IO,
4504            .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4505            .resetvalue = cpu->midr & 0xff000000,
4506            .writefn = pmcr_write, .raw_writefn = raw_write,
4507        };
4508        define_one_arm_cp_reg(cpu, &pmcr);
4509        define_one_arm_cp_reg(cpu, &pmcr64);
4510#endif
4511        ARMCPRegInfo clidr = {
4512            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4513            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4514            .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4515        };
4516        define_one_arm_cp_reg(cpu, &clidr);
4517        define_arm_cp_regs(cpu, v7_cp_reginfo);
4518        define_debug_regs(cpu);
4519    } else {
4520        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4521    }
4522    if (arm_feature(env, ARM_FEATURE_V8)) {
4523        /* AArch64 ID registers, which all have impdef reset values.
4524         * Note that within the ID register ranges the unused slots
4525         * must all RAZ, not UNDEF; future architecture versions may
4526         * define new registers here.
4527         */
4528        ARMCPRegInfo v8_idregs[] = {
4529            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4530              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4531              .access = PL1_R, .type = ARM_CP_CONST,
4532              .resetvalue = cpu->id_aa64pfr0 },
4533            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4534              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4535              .access = PL1_R, .type = ARM_CP_CONST,
4536              .resetvalue = cpu->id_aa64pfr1},
4537            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4538              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4539              .access = PL1_R, .type = ARM_CP_CONST,
4540              .resetvalue = 0 },
4541            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4542              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4543              .access = PL1_R, .type = ARM_CP_CONST,
4544              .resetvalue = 0 },
4545            { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4546              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4547              .access = PL1_R, .type = ARM_CP_CONST,
4548              .resetvalue = 0 },
4549            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4550              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4551              .access = PL1_R, .type = ARM_CP_CONST,
4552              .resetvalue = 0 },
4553            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4554              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4555              .access = PL1_R, .type = ARM_CP_CONST,
4556              .resetvalue = 0 },
4557            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4558              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4559              .access = PL1_R, .type = ARM_CP_CONST,
4560              .resetvalue = 0 },
4561            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4562              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4563              .access = PL1_R, .type = ARM_CP_CONST,
4564              /* We mask out the PMUVer field, because we don't currently
4565               * implement the PMU. Not advertising it prevents the guest
4566               * from trying to use it and getting UNDEFs on registers we
4567               * don't implement.
4568               */
4569              .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
4570            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4571              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4572              .access = PL1_R, .type = ARM_CP_CONST,
4573              .resetvalue = cpu->id_aa64dfr1 },
4574            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4575              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4576              .access = PL1_R, .type = ARM_CP_CONST,
4577              .resetvalue = 0 },
4578            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4579              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4580              .access = PL1_R, .type = ARM_CP_CONST,
4581              .resetvalue = 0 },
4582            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4583              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4584              .access = PL1_R, .type = ARM_CP_CONST,
4585              .resetvalue = cpu->id_aa64afr0 },
4586            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4587              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4588              .access = PL1_R, .type = ARM_CP_CONST,
4589              .resetvalue = cpu->id_aa64afr1 },
4590            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4591              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
4592              .access = PL1_R, .type = ARM_CP_CONST,
4593              .resetvalue = 0 },
4594            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4595              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
4596              .access = PL1_R, .type = ARM_CP_CONST,
4597              .resetvalue = 0 },
4598            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
4599              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
4600              .access = PL1_R, .type = ARM_CP_CONST,
4601              .resetvalue = cpu->id_aa64isar0 },
4602            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
4603              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
4604              .access = PL1_R, .type = ARM_CP_CONST,
4605              .resetvalue = cpu->id_aa64isar1 },
4606            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4607              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
4608              .access = PL1_R, .type = ARM_CP_CONST,
4609              .resetvalue = 0 },
4610            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4611              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
4612              .access = PL1_R, .type = ARM_CP_CONST,
4613              .resetvalue = 0 },
4614            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4615              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
4616              .access = PL1_R, .type = ARM_CP_CONST,
4617              .resetvalue = 0 },
4618            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4619              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
4620              .access = PL1_R, .type = ARM_CP_CONST,
4621              .resetvalue = 0 },
4622            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4623              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
4624              .access = PL1_R, .type = ARM_CP_CONST,
4625              .resetvalue = 0 },
4626            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4627              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
4628              .access = PL1_R, .type = ARM_CP_CONST,
4629              .resetvalue = 0 },
4630            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
4631              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4632              .access = PL1_R, .type = ARM_CP_CONST,
4633              .resetvalue = cpu->id_aa64mmfr0 },
4634            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
4635              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
4636              .access = PL1_R, .type = ARM_CP_CONST,
4637              .resetvalue = cpu->id_aa64mmfr1 },
4638            { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4639              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
4640              .access = PL1_R, .type = ARM_CP_CONST,
4641              .resetvalue = 0 },
4642            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4643              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
4644              .access = PL1_R, .type = ARM_CP_CONST,
4645              .resetvalue = 0 },
4646            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4647              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
4648              .access = PL1_R, .type = ARM_CP_CONST,
4649              .resetvalue = 0 },
4650            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4651              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
4652              .access = PL1_R, .type = ARM_CP_CONST,
4653              .resetvalue = 0 },
4654            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4655              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
4656              .access = PL1_R, .type = ARM_CP_CONST,
4657              .resetvalue = 0 },
4658            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4659              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
4660              .access = PL1_R, .type = ARM_CP_CONST,
4661              .resetvalue = 0 },
4662            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
4663              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
4664              .access = PL1_R, .type = ARM_CP_CONST,
4665              .resetvalue = cpu->mvfr0 },
4666            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
4667              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
4668              .access = PL1_R, .type = ARM_CP_CONST,
4669              .resetvalue = cpu->mvfr1 },
4670            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
4671              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
4672              .access = PL1_R, .type = ARM_CP_CONST,
4673              .resetvalue = cpu->mvfr2 },
4674            { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4675              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
4676              .access = PL1_R, .type = ARM_CP_CONST,
4677              .resetvalue = 0 },
4678            { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4679              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
4680              .access = PL1_R, .type = ARM_CP_CONST,
4681              .resetvalue = 0 },
4682            { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4683              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
4684              .access = PL1_R, .type = ARM_CP_CONST,
4685              .resetvalue = 0 },
4686            { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4687              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
4688              .access = PL1_R, .type = ARM_CP_CONST,
4689              .resetvalue = 0 },
4690            { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4691              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
4692              .access = PL1_R, .type = ARM_CP_CONST,
4693              .resetvalue = 0 },
4694            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
4695              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
4696              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4697              .resetvalue = cpu->pmceid0 },
4698            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
4699              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
4700              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4701              .resetvalue = cpu->pmceid0 },
4702            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
4703              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
4704              .access =