qemu/target/arm/helper.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "trace.h"
   3#include "cpu.h"
   4#include "internals.h"
   5#include "exec/gdbstub.h"
   6#include "exec/helper-proto.h"
   7#include "qemu/host-utils.h"
   8#include "sysemu/arch_init.h"
   9#include "sysemu/sysemu.h"
  10#include "qemu/bitops.h"
  11#include "qemu/crc32c.h"
  12#include "exec/exec-all.h"
  13#include "exec/cpu_ldst.h"
  14#include "arm_ldst.h"
  15#include <zlib.h> /* For crc32 */
  16#include "exec/semihost.h"
  17#include "sysemu/kvm.h"
  18
  19#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  20
  21#ifndef CONFIG_USER_ONLY
  22static bool get_phys_addr(CPUARMState *env, target_ulong address,
  23                          int access_type, ARMMMUIdx mmu_idx,
  24                          hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  25                          target_ulong *page_size, uint32_t *fsr,
  26                          ARMMMUFaultInfo *fi);
  27
  28static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
  29                               int access_type, ARMMMUIdx mmu_idx,
  30                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  31                               target_ulong *page_size_ptr, uint32_t *fsr,
  32                               ARMMMUFaultInfo *fi);
  33
  34/* Definitions for the PMCCNTR and PMCR registers */
  35#define PMCRD   0x8
  36#define PMCRC   0x4
  37#define PMCRE   0x1
  38#endif
  39
  40static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  41{
  42    int nregs;
  43
  44    /* VFP data registers are always little-endian.  */
  45    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  46    if (reg < nregs) {
  47        stfq_le_p(buf, env->vfp.regs[reg]);
  48        return 8;
  49    }
  50    if (arm_feature(env, ARM_FEATURE_NEON)) {
  51        /* Aliases for Q regs.  */
  52        nregs += 16;
  53        if (reg < nregs) {
  54            stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
  55            stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
  56            return 16;
  57        }
  58    }
  59    switch (reg - nregs) {
  60    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
  61    case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
  62    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
  63    }
  64    return 0;
  65}
  66
  67static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  68{
  69    int nregs;
  70
  71    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  72    if (reg < nregs) {
  73        env->vfp.regs[reg] = ldfq_le_p(buf);
  74        return 8;
  75    }
  76    if (arm_feature(env, ARM_FEATURE_NEON)) {
  77        nregs += 16;
  78        if (reg < nregs) {
  79            env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
  80            env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
  81            return 16;
  82        }
  83    }
  84    switch (reg - nregs) {
  85    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
  86    case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
  87    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
  88    }
  89    return 0;
  90}
  91
  92static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  93{
  94    switch (reg) {
  95    case 0 ... 31:
  96        /* 128 bit FP register */
  97        stfq_le_p(buf, env->vfp.regs[reg * 2]);
  98        stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
  99        return 16;
 100    case 32:
 101        /* FPSR */
 102        stl_p(buf, vfp_get_fpsr(env));
 103        return 4;
 104    case 33:
 105        /* FPCR */
 106        stl_p(buf, vfp_get_fpcr(env));
 107        return 4;
 108    default:
 109        return 0;
 110    }
 111}
 112
 113static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 114{
 115    switch (reg) {
 116    case 0 ... 31:
 117        /* 128 bit FP register */
 118        env->vfp.regs[reg * 2] = ldfq_le_p(buf);
 119        env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
 120        return 16;
 121    case 32:
 122        /* FPSR */
 123        vfp_set_fpsr(env, ldl_p(buf));
 124        return 4;
 125    case 33:
 126        /* FPCR */
 127        vfp_set_fpcr(env, ldl_p(buf));
 128        return 4;
 129    default:
 130        return 0;
 131    }
 132}
 133
 134static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 135{
 136    assert(ri->fieldoffset);
 137    if (cpreg_field_is_64bit(ri)) {
 138        return CPREG_FIELD64(env, ri);
 139    } else {
 140        return CPREG_FIELD32(env, ri);
 141    }
 142}
 143
 144static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 145                      uint64_t value)
 146{
 147    assert(ri->fieldoffset);
 148    if (cpreg_field_is_64bit(ri)) {
 149        CPREG_FIELD64(env, ri) = value;
 150    } else {
 151        CPREG_FIELD32(env, ri) = value;
 152    }
 153}
 154
 155static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 156{
 157    return (char *)env + ri->fieldoffset;
 158}
 159
 160uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 161{
 162    /* Raw read of a coprocessor register (as needed for migration, etc). */
 163    if (ri->type & ARM_CP_CONST) {
 164        return ri->resetvalue;
 165    } else if (ri->raw_readfn) {
 166        return ri->raw_readfn(env, ri);
 167    } else if (ri->readfn) {
 168        return ri->readfn(env, ri);
 169    } else {
 170        return raw_read(env, ri);
 171    }
 172}
 173
 174static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 175                             uint64_t v)
 176{
 177    /* Raw write of a coprocessor register (as needed for migration, etc).
 178     * Note that constant registers are treated as write-ignored; the
 179     * caller should check for success by whether a readback gives the
 180     * value written.
 181     */
 182    if (ri->type & ARM_CP_CONST) {
 183        return;
 184    } else if (ri->raw_writefn) {
 185        ri->raw_writefn(env, ri, v);
 186    } else if (ri->writefn) {
 187        ri->writefn(env, ri, v);
 188    } else {
 189        raw_write(env, ri, v);
 190    }
 191}
 192
 193static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 194{
 195   /* Return true if the regdef would cause an assertion if you called
 196    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 197    * program bug for it not to have the NO_RAW flag).
 198    * NB that returning false here doesn't necessarily mean that calling
 199    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 200    * read/write access functions which are safe for raw use" from "has
 201    * read/write access functions which have side effects but has forgotten
 202    * to provide raw access functions".
 203    * The tests here line up with the conditions in read/write_raw_cp_reg()
 204    * and assertions in raw_read()/raw_write().
 205    */
 206    if ((ri->type & ARM_CP_CONST) ||
 207        ri->fieldoffset ||
 208        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 209        return false;
 210    }
 211    return true;
 212}
 213
 214bool write_cpustate_to_list(ARMCPU *cpu)
 215{
 216    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 217    int i;
 218    bool ok = true;
 219
 220    for (i = 0; i < cpu->cpreg_array_len; i++) {
 221        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 222        const ARMCPRegInfo *ri;
 223
 224        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 225        if (!ri) {
 226            ok = false;
 227            continue;
 228        }
 229        if (ri->type & ARM_CP_NO_RAW) {
 230            continue;
 231        }
 232        cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
 233    }
 234    return ok;
 235}
 236
 237bool write_list_to_cpustate(ARMCPU *cpu)
 238{
 239    int i;
 240    bool ok = true;
 241
 242    for (i = 0; i < cpu->cpreg_array_len; i++) {
 243        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 244        uint64_t v = cpu->cpreg_values[i];
 245        const ARMCPRegInfo *ri;
 246
 247        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 248        if (!ri) {
 249            ok = false;
 250            continue;
 251        }
 252        if (ri->type & ARM_CP_NO_RAW) {
 253            continue;
 254        }
 255        /* Write value and confirm it reads back as written
 256         * (to catch read-only registers and partially read-only
 257         * registers where the incoming migration value doesn't match)
 258         */
 259        write_raw_cp_reg(&cpu->env, ri, v);
 260        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 261            ok = false;
 262        }
 263    }
 264    return ok;
 265}
 266
 267static void add_cpreg_to_list(gpointer key, gpointer opaque)
 268{
 269    ARMCPU *cpu = opaque;
 270    uint64_t regidx;
 271    const ARMCPRegInfo *ri;
 272
 273    regidx = *(uint32_t *)key;
 274    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 275
 276    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 277        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 278        /* The value array need not be initialized at this point */
 279        cpu->cpreg_array_len++;
 280    }
 281}
 282
 283static void count_cpreg(gpointer key, gpointer opaque)
 284{
 285    ARMCPU *cpu = opaque;
 286    uint64_t regidx;
 287    const ARMCPRegInfo *ri;
 288
 289    regidx = *(uint32_t *)key;
 290    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 291
 292    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 293        cpu->cpreg_array_len++;
 294    }
 295}
 296
 297static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 298{
 299    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 300    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 301
 302    if (aidx > bidx) {
 303        return 1;
 304    }
 305    if (aidx < bidx) {
 306        return -1;
 307    }
 308    return 0;
 309}
 310
 311void init_cpreg_list(ARMCPU *cpu)
 312{
 313    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 314     * Note that we require cpreg_tuples[] to be sorted by key ID.
 315     */
 316    GList *keys;
 317    int arraylen;
 318
 319    keys = g_hash_table_get_keys(cpu->cp_regs);
 320    keys = g_list_sort(keys, cpreg_key_compare);
 321
 322    cpu->cpreg_array_len = 0;
 323
 324    g_list_foreach(keys, count_cpreg, cpu);
 325
 326    arraylen = cpu->cpreg_array_len;
 327    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 328    cpu->cpreg_values = g_new(uint64_t, arraylen);
 329    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 330    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 331    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 332    cpu->cpreg_array_len = 0;
 333
 334    g_list_foreach(keys, add_cpreg_to_list, cpu);
 335
 336    assert(cpu->cpreg_array_len == arraylen);
 337
 338    g_list_free(keys);
 339}
 340
 341/*
 342 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
 343 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
 344 *
 345 * access_el3_aa32ns: Used to check AArch32 register views.
 346 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
 347 */
 348static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 349                                        const ARMCPRegInfo *ri,
 350                                        bool isread)
 351{
 352    bool secure = arm_is_secure_below_el3(env);
 353
 354    assert(!arm_el_is_aa64(env, 3));
 355    if (secure) {
 356        return CP_ACCESS_TRAP_UNCATEGORIZED;
 357    }
 358    return CP_ACCESS_OK;
 359}
 360
 361static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
 362                                                const ARMCPRegInfo *ri,
 363                                                bool isread)
 364{
 365    if (!arm_el_is_aa64(env, 3)) {
 366        return access_el3_aa32ns(env, ri, isread);
 367    }
 368    return CP_ACCESS_OK;
 369}
 370
 371/* Some secure-only AArch32 registers trap to EL3 if used from
 372 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 373 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 374 * We assume that the .access field is set to PL1_RW.
 375 */
 376static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 377                                            const ARMCPRegInfo *ri,
 378                                            bool isread)
 379{
 380    if (arm_current_el(env) == 3) {
 381        return CP_ACCESS_OK;
 382    }
 383    if (arm_is_secure_below_el3(env)) {
 384        return CP_ACCESS_TRAP_EL3;
 385    }
 386    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 387    return CP_ACCESS_TRAP_UNCATEGORIZED;
 388}
 389
 390/* Check for traps to "powerdown debug" registers, which are controlled
 391 * by MDCR.TDOSA
 392 */
 393static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 394                                   bool isread)
 395{
 396    int el = arm_current_el(env);
 397
 398    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
 399        && !arm_is_secure_below_el3(env)) {
 400        return CP_ACCESS_TRAP_EL2;
 401    }
 402    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 403        return CP_ACCESS_TRAP_EL3;
 404    }
 405    return CP_ACCESS_OK;
 406}
 407
 408/* Check for traps to "debug ROM" registers, which are controlled
 409 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 410 */
 411static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 412                                  bool isread)
 413{
 414    int el = arm_current_el(env);
 415
 416    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
 417        && !arm_is_secure_below_el3(env)) {
 418        return CP_ACCESS_TRAP_EL2;
 419    }
 420    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 421        return CP_ACCESS_TRAP_EL3;
 422    }
 423    return CP_ACCESS_OK;
 424}
 425
 426/* Check for traps to general debug registers, which are controlled
 427 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 428 */
 429static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 430                                  bool isread)
 431{
 432    int el = arm_current_el(env);
 433
 434    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
 435        && !arm_is_secure_below_el3(env)) {
 436        return CP_ACCESS_TRAP_EL2;
 437    }
 438    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 439        return CP_ACCESS_TRAP_EL3;
 440    }
 441    return CP_ACCESS_OK;
 442}
 443
 444/* Check for traps to performance monitor registers, which are controlled
 445 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 446 */
 447static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 448                                 bool isread)
 449{
 450    int el = arm_current_el(env);
 451
 452    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 453        && !arm_is_secure_below_el3(env)) {
 454        return CP_ACCESS_TRAP_EL2;
 455    }
 456    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 457        return CP_ACCESS_TRAP_EL3;
 458    }
 459    return CP_ACCESS_OK;
 460}
 461
 462static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 463{
 464    ARMCPU *cpu = arm_env_get_cpu(env);
 465
 466    raw_write(env, ri, value);
 467    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
 468}
 469
 470static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 471{
 472    ARMCPU *cpu = arm_env_get_cpu(env);
 473
 474    if (raw_read(env, ri) != value) {
 475        /* Unlike real hardware the qemu TLB uses virtual addresses,
 476         * not modified virtual addresses, so this causes a TLB flush.
 477         */
 478        tlb_flush(CPU(cpu));
 479        raw_write(env, ri, value);
 480    }
 481}
 482
 483static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 484                             uint64_t value)
 485{
 486    ARMCPU *cpu = arm_env_get_cpu(env);
 487
 488    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
 489        && !extended_addresses_enabled(env)) {
 490        /* For VMSA (when not using the LPAE long descriptor page table
 491         * format) this register includes the ASID, so do a TLB flush.
 492         * For PMSA it is purely a process ID and no action is needed.
 493         */
 494        tlb_flush(CPU(cpu));
 495    }
 496    raw_write(env, ri, value);
 497}
 498
 499static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 500                          uint64_t value)
 501{
 502    /* Invalidate all (TLBIALL) */
 503    ARMCPU *cpu = arm_env_get_cpu(env);
 504
 505    tlb_flush(CPU(cpu));
 506}
 507
 508static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 509                          uint64_t value)
 510{
 511    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 512    ARMCPU *cpu = arm_env_get_cpu(env);
 513
 514    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 515}
 516
 517static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 518                           uint64_t value)
 519{
 520    /* Invalidate by ASID (TLBIASID) */
 521    ARMCPU *cpu = arm_env_get_cpu(env);
 522
 523    tlb_flush(CPU(cpu));
 524}
 525
 526static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 527                           uint64_t value)
 528{
 529    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 530    ARMCPU *cpu = arm_env_get_cpu(env);
 531
 532    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 533}
 534
 535/* IS variants of TLB operations must affect all cores */
 536static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 537                             uint64_t value)
 538{
 539    CPUState *cs = ENV_GET_CPU(env);
 540
 541    tlb_flush_all_cpus_synced(cs);
 542}
 543
 544static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 545                             uint64_t value)
 546{
 547    CPUState *cs = ENV_GET_CPU(env);
 548
 549    tlb_flush_all_cpus_synced(cs);
 550}
 551
 552static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 553                             uint64_t value)
 554{
 555    CPUState *cs = ENV_GET_CPU(env);
 556
 557    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 558}
 559
 560static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 561                             uint64_t value)
 562{
 563    CPUState *cs = ENV_GET_CPU(env);
 564
 565    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 566}
 567
 568static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 569                               uint64_t value)
 570{
 571    CPUState *cs = ENV_GET_CPU(env);
 572
 573    tlb_flush_by_mmuidx(cs,
 574                        ARMMMUIdxBit_S12NSE1 |
 575                        ARMMMUIdxBit_S12NSE0 |
 576                        ARMMMUIdxBit_S2NS);
 577}
 578
 579static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 580                                  uint64_t value)
 581{
 582    CPUState *cs = ENV_GET_CPU(env);
 583
 584    tlb_flush_by_mmuidx_all_cpus_synced(cs,
 585                                        ARMMMUIdxBit_S12NSE1 |
 586                                        ARMMMUIdxBit_S12NSE0 |
 587                                        ARMMMUIdxBit_S2NS);
 588}
 589
 590static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
 591                            uint64_t value)
 592{
 593    /* Invalidate by IPA. This has to invalidate any structures that
 594     * contain only stage 2 translation information, but does not need
 595     * to apply to structures that contain combined stage 1 and stage 2
 596     * translation information.
 597     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
 598     */
 599    CPUState *cs = ENV_GET_CPU(env);
 600    uint64_t pageaddr;
 601
 602    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 603        return;
 604    }
 605
 606    pageaddr = sextract64(value << 12, 0, 40);
 607
 608    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
 609}
 610
 611static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 612                               uint64_t value)
 613{
 614    CPUState *cs = ENV_GET_CPU(env);
 615    uint64_t pageaddr;
 616
 617    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 618        return;
 619    }
 620
 621    pageaddr = sextract64(value << 12, 0, 40);
 622
 623    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 624                                             ARMMMUIdxBit_S2NS);
 625}
 626
 627static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 628                              uint64_t value)
 629{
 630    CPUState *cs = ENV_GET_CPU(env);
 631
 632    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
 633}
 634
 635static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 636                                 uint64_t value)
 637{
 638    CPUState *cs = ENV_GET_CPU(env);
 639
 640    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
 641}
 642
 643static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 644                              uint64_t value)
 645{
 646    CPUState *cs = ENV_GET_CPU(env);
 647    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 648
 649    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
 650}
 651
 652static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 653                                 uint64_t value)
 654{
 655    CPUState *cs = ENV_GET_CPU(env);
 656    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 657
 658    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 659                                             ARMMMUIdxBit_S1E2);
 660}
 661
 662static const ARMCPRegInfo cp_reginfo[] = {
 663    /* Define the secure and non-secure FCSE identifier CP registers
 664     * separately because there is no secure bank in V8 (no _EL3).  This allows
 665     * the secure register to be properly reset and migrated. There is also no
 666     * v8 EL1 version of the register so the non-secure instance stands alone.
 667     */
 668    { .name = "FCSEIDR(NS)",
 669      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 670      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 671      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 672      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 673    { .name = "FCSEIDR(S)",
 674      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 675      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 676      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 677      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 678    /* Define the secure and non-secure context identifier CP registers
 679     * separately because there is no secure bank in V8 (no _EL3).  This allows
 680     * the secure register to be properly reset and migrated.  In the
 681     * non-secure case, the 32-bit register will have reset and migration
 682     * disabled during registration as it is handled by the 64-bit instance.
 683     */
 684    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 685      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 686      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 687      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 688      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 689    { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
 690      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 691      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 692      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 693      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 694    REGINFO_SENTINEL
 695};
 696
 697static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 698    /* NB: Some of these registers exist in v8 but with more precise
 699     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 700     */
 701    /* MMU Domain access control / MPU write buffer control */
 702    { .name = "DACR",
 703      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 704      .access = PL1_RW, .resetvalue = 0,
 705      .writefn = dacr_write, .raw_writefn = raw_write,
 706      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 707                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 708    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 709     * For v6 and v5, these mappings are overly broad.
 710     */
 711    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 712      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 713    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 714      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 715    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 716      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 717    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 718      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 719    /* Cache maintenance ops; some of this space may be overridden later. */
 720    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 721      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 722      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 723    REGINFO_SENTINEL
 724};
 725
 726static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 727    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 728     * over-broad.
 729     */
 730    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 731      .access = PL1_W, .type = ARM_CP_WFI },
 732    REGINFO_SENTINEL
 733};
 734
 735static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 736    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 737     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 738     */
 739    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 740      .access = PL1_W, .type = ARM_CP_WFI },
 741    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 742     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 743     * OMAPCP will override this space.
 744     */
 745    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 746      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 747      .resetvalue = 0 },
 748    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 749      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 750      .resetvalue = 0 },
 751    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 752    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 753      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 754      .resetvalue = 0 },
 755    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 756     * implementing it as RAZ means the "debug architecture version" bits
 757     * will read as a reserved value, which should cause Linux to not try
 758     * to use the debug hardware.
 759     */
 760    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 761      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 762    /* MMU TLB control. Note that the wildcarding means we cover not just
 763     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 764     */
 765    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 766      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 767      .type = ARM_CP_NO_RAW },
 768    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 769      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 770      .type = ARM_CP_NO_RAW },
 771    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 772      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 773      .type = ARM_CP_NO_RAW },
 774    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 775      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 776      .type = ARM_CP_NO_RAW },
 777    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 778      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 779    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 780      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 781    REGINFO_SENTINEL
 782};
 783
 784static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 785                        uint64_t value)
 786{
 787    uint32_t mask = 0;
 788
 789    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 790    if (!arm_feature(env, ARM_FEATURE_V8)) {
 791        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 792         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 793         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 794         */
 795        if (arm_feature(env, ARM_FEATURE_VFP)) {
 796            /* VFP coprocessor: cp10 & cp11 [23:20] */
 797            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 798
 799            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 800                /* ASEDIS [31] bit is RAO/WI */
 801                value |= (1 << 31);
 802            }
 803
 804            /* VFPv3 and upwards with NEON implement 32 double precision
 805             * registers (D0-D31).
 806             */
 807            if (!arm_feature(env, ARM_FEATURE_NEON) ||
 808                    !arm_feature(env, ARM_FEATURE_VFP3)) {
 809                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 810                value |= (1 << 30);
 811            }
 812        }
 813        value &= mask;
 814    }
 815    env->cp15.cpacr_el1 = value;
 816}
 817
 818static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 819                                   bool isread)
 820{
 821    if (arm_feature(env, ARM_FEATURE_V8)) {
 822        /* Check if CPACR accesses are to be trapped to EL2 */
 823        if (arm_current_el(env) == 1 &&
 824            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
 825            return CP_ACCESS_TRAP_EL2;
 826        /* Check if CPACR accesses are to be trapped to EL3 */
 827        } else if (arm_current_el(env) < 3 &&
 828                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 829            return CP_ACCESS_TRAP_EL3;
 830        }
 831    }
 832
 833    return CP_ACCESS_OK;
 834}
 835
 836static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 837                                  bool isread)
 838{
 839    /* Check if CPTR accesses are set to trap to EL3 */
 840    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 841        return CP_ACCESS_TRAP_EL3;
 842    }
 843
 844    return CP_ACCESS_OK;
 845}
 846
 847static const ARMCPRegInfo v6_cp_reginfo[] = {
 848    /* prefetch by MVA in v6, NOP in v7 */
 849    { .name = "MVA_prefetch",
 850      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 851      .access = PL1_W, .type = ARM_CP_NOP },
 852    /* We need to break the TB after ISB to execute self-modifying code
 853     * correctly and also to take any pending interrupts immediately.
 854     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 855     */
 856    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 857      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 858    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 859      .access = PL0_W, .type = ARM_CP_NOP },
 860    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 861      .access = PL0_W, .type = ARM_CP_NOP },
 862    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 863      .access = PL1_RW,
 864      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 865                             offsetof(CPUARMState, cp15.ifar_ns) },
 866      .resetvalue = 0, },
 867    /* Watchpoint Fault Address Register : should actually only be present
 868     * for 1136, 1176, 11MPCore.
 869     */
 870    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
 871      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
 872    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
 873      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
 874      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
 875      .resetvalue = 0, .writefn = cpacr_write },
 876    REGINFO_SENTINEL
 877};
 878
 879static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
 880                                   bool isread)
 881{
 882    /* Performance monitor registers user accessibility is controlled
 883     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
 884     * trapping to EL2 or EL3 for other accesses.
 885     */
 886    int el = arm_current_el(env);
 887
 888    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
 889        return CP_ACCESS_TRAP;
 890    }
 891    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 892        && !arm_is_secure_below_el3(env)) {
 893        return CP_ACCESS_TRAP_EL2;
 894    }
 895    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 896        return CP_ACCESS_TRAP_EL3;
 897    }
 898
 899    return CP_ACCESS_OK;
 900}
 901
 902static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
 903                                           const ARMCPRegInfo *ri,
 904                                           bool isread)
 905{
 906    /* ER: event counter read trap control */
 907    if (arm_feature(env, ARM_FEATURE_V8)
 908        && arm_current_el(env) == 0
 909        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
 910        && isread) {
 911        return CP_ACCESS_OK;
 912    }
 913
 914    return pmreg_access(env, ri, isread);
 915}
 916
 917static CPAccessResult pmreg_access_swinc(CPUARMState *env,
 918                                         const ARMCPRegInfo *ri,
 919                                         bool isread)
 920{
 921    /* SW: software increment write trap control */
 922    if (arm_feature(env, ARM_FEATURE_V8)
 923        && arm_current_el(env) == 0
 924        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
 925        && !isread) {
 926        return CP_ACCESS_OK;
 927    }
 928
 929    return pmreg_access(env, ri, isread);
 930}
 931
 932#ifndef CONFIG_USER_ONLY
 933
 934static CPAccessResult pmreg_access_selr(CPUARMState *env,
 935                                        const ARMCPRegInfo *ri,
 936                                        bool isread)
 937{
 938    /* ER: event counter read trap control */
 939    if (arm_feature(env, ARM_FEATURE_V8)
 940        && arm_current_el(env) == 0
 941        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
 942        return CP_ACCESS_OK;
 943    }
 944
 945    return pmreg_access(env, ri, isread);
 946}
 947
 948static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
 949                                         const ARMCPRegInfo *ri,
 950                                         bool isread)
 951{
 952    /* CR: cycle counter read trap control */
 953    if (arm_feature(env, ARM_FEATURE_V8)
 954        && arm_current_el(env) == 0
 955        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
 956        && isread) {
 957        return CP_ACCESS_OK;
 958    }
 959
 960    return pmreg_access(env, ri, isread);
 961}
 962
 963static inline bool arm_ccnt_enabled(CPUARMState *env)
 964{
 965    /* This does not support checking PMCCFILTR_EL0 register */
 966
 967    if (!(env->cp15.c9_pmcr & PMCRE)) {
 968        return false;
 969    }
 970
 971    return true;
 972}
 973
 974void pmccntr_sync(CPUARMState *env)
 975{
 976    uint64_t temp_ticks;
 977
 978    temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 979                          ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
 980
 981    if (env->cp15.c9_pmcr & PMCRD) {
 982        /* Increment once every 64 processor clock cycles */
 983        temp_ticks /= 64;
 984    }
 985
 986    if (arm_ccnt_enabled(env)) {
 987        env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
 988    }
 989}
 990
 991static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 992                       uint64_t value)
 993{
 994    pmccntr_sync(env);
 995
 996    if (value & PMCRC) {
 997        /* The counter has been reset */
 998        env->cp15.c15_ccnt = 0;
 999    }
1000
1001    /* only the DP, X, D and E bits are writable */
1002    env->cp15.c9_pmcr &= ~0x39;
1003    env->cp15.c9_pmcr |= (value & 0x39);
1004
1005    pmccntr_sync(env);
1006}
1007
1008static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1009{
1010    uint64_t total_ticks;
1011
1012    if (!arm_ccnt_enabled(env)) {
1013        /* Counter is disabled, do not change value */
1014        return env->cp15.c15_ccnt;
1015    }
1016
1017    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1018                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1019
1020    if (env->cp15.c9_pmcr & PMCRD) {
1021        /* Increment once every 64 processor clock cycles */
1022        total_ticks /= 64;
1023    }
1024    return total_ticks - env->cp15.c15_ccnt;
1025}
1026
1027static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1028                         uint64_t value)
1029{
1030    /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1031     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1032     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1033     * accessed.
1034     */
1035    env->cp15.c9_pmselr = value & 0x1f;
1036}
1037
1038static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1039                        uint64_t value)
1040{
1041    uint64_t total_ticks;
1042
1043    if (!arm_ccnt_enabled(env)) {
1044        /* Counter is disabled, set the absolute value */
1045        env->cp15.c15_ccnt = value;
1046        return;
1047    }
1048
1049    total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1050                           ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1051
1052    if (env->cp15.c9_pmcr & PMCRD) {
1053        /* Increment once every 64 processor clock cycles */
1054        total_ticks /= 64;
1055    }
1056    env->cp15.c15_ccnt = total_ticks - value;
1057}
1058
1059static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1060                            uint64_t value)
1061{
1062    uint64_t cur_val = pmccntr_read(env, NULL);
1063
1064    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1065}
1066
1067#else /* CONFIG_USER_ONLY */
1068
1069void pmccntr_sync(CPUARMState *env)
1070{
1071}
1072
1073#endif
1074
1075static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1076                            uint64_t value)
1077{
1078    pmccntr_sync(env);
1079    env->cp15.pmccfiltr_el0 = value & 0x7E000000;
1080    pmccntr_sync(env);
1081}
1082
1083static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1084                            uint64_t value)
1085{
1086    value &= (1 << 31);
1087    env->cp15.c9_pmcnten |= value;
1088}
1089
1090static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1091                             uint64_t value)
1092{
1093    value &= (1 << 31);
1094    env->cp15.c9_pmcnten &= ~value;
1095}
1096
1097static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1098                         uint64_t value)
1099{
1100    env->cp15.c9_pmovsr &= ~value;
1101}
1102
1103static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1104                             uint64_t value)
1105{
1106    /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1107     * PMSELR value is equal to or greater than the number of implemented
1108     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1109     */
1110    if (env->cp15.c9_pmselr == 0x1f) {
1111        pmccfiltr_write(env, ri, value);
1112    }
1113}
1114
1115static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1116{
1117    /* We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1118     * are CONSTRAINED UNPREDICTABLE. See comments in pmxevtyper_write().
1119     */
1120    if (env->cp15.c9_pmselr == 0x1f) {
1121        return env->cp15.pmccfiltr_el0;
1122    } else {
1123        return 0;
1124    }
1125}
1126
1127static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1128                            uint64_t value)
1129{
1130    if (arm_feature(env, ARM_FEATURE_V8)) {
1131        env->cp15.c9_pmuserenr = value & 0xf;
1132    } else {
1133        env->cp15.c9_pmuserenr = value & 1;
1134    }
1135}
1136
1137static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1138                             uint64_t value)
1139{
1140    /* We have no event counters so only the C bit can be changed */
1141    value &= (1 << 31);
1142    env->cp15.c9_pminten |= value;
1143}
1144
1145static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1146                             uint64_t value)
1147{
1148    value &= (1 << 31);
1149    env->cp15.c9_pminten &= ~value;
1150}
1151
1152static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1153                       uint64_t value)
1154{
1155    /* Note that even though the AArch64 view of this register has bits
1156     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1157     * architectural requirements for bits which are RES0 only in some
1158     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1159     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1160     */
1161    raw_write(env, ri, value & ~0x1FULL);
1162}
1163
1164static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1165{
1166    /* We only mask off bits that are RES0 both for AArch64 and AArch32.
1167     * For bits that vary between AArch32/64, code needs to check the
1168     * current execution mode before directly using the feature bit.
1169     */
1170    uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
1171
1172    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1173        valid_mask &= ~SCR_HCE;
1174
1175        /* On ARMv7, SMD (or SCD as it is called in v7) is only
1176         * supported if EL2 exists. The bit is UNK/SBZP when
1177         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1178         * when EL2 is unavailable.
1179         * On ARMv8, this bit is always available.
1180         */
1181        if (arm_feature(env, ARM_FEATURE_V7) &&
1182            !arm_feature(env, ARM_FEATURE_V8)) {
1183            valid_mask &= ~SCR_SMD;
1184        }
1185    }
1186
1187    /* Clear all-context RES0 bits.  */
1188    value &= valid_mask;
1189    raw_write(env, ri, value);
1190}
1191
1192static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1193{
1194    ARMCPU *cpu = arm_env_get_cpu(env);
1195
1196    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1197     * bank
1198     */
1199    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1200                                        ri->secure & ARM_CP_SECSTATE_S);
1201
1202    return cpu->ccsidr[index];
1203}
1204
1205static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1206                         uint64_t value)
1207{
1208    raw_write(env, ri, value & 0xf);
1209}
1210
1211static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1212{
1213    CPUState *cs = ENV_GET_CPU(env);
1214    uint64_t ret = 0;
1215
1216    if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1217        ret |= CPSR_I;
1218    }
1219    if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1220        ret |= CPSR_F;
1221    }
1222    /* External aborts are not possible in QEMU so A bit is always clear */
1223    return ret;
1224}
1225
1226static const ARMCPRegInfo v7_cp_reginfo[] = {
1227    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1228    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1229      .access = PL1_W, .type = ARM_CP_NOP },
1230    /* Performance monitors are implementation defined in v7,
1231     * but with an ARM recommended set of registers, which we
1232     * follow (although we don't actually implement any counters)
1233     *
1234     * Performance registers fall into three categories:
1235     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1236     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1237     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1238     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1239     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1240     */
1241    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1242      .access = PL0_RW, .type = ARM_CP_ALIAS,
1243      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1244      .writefn = pmcntenset_write,
1245      .accessfn = pmreg_access,
1246      .raw_writefn = raw_write },
1247    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1248      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1249      .access = PL0_RW, .accessfn = pmreg_access,
1250      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1251      .writefn = pmcntenset_write, .raw_writefn = raw_write },
1252    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1253      .access = PL0_RW,
1254      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1255      .accessfn = pmreg_access,
1256      .writefn = pmcntenclr_write,
1257      .type = ARM_CP_ALIAS },
1258    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1259      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1260      .access = PL0_RW, .accessfn = pmreg_access,
1261      .type = ARM_CP_ALIAS,
1262      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1263      .writefn = pmcntenclr_write },
1264    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1265      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1266      .accessfn = pmreg_access,
1267      .writefn = pmovsr_write,
1268      .raw_writefn = raw_write },
1269    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
1270      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
1271      .access = PL0_RW, .accessfn = pmreg_access,
1272      .type = ARM_CP_ALIAS,
1273      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
1274      .writefn = pmovsr_write,
1275      .raw_writefn = raw_write },
1276    /* Unimplemented so WI. */
1277    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
1278      .access = PL0_W, .accessfn = pmreg_access_swinc, .type = ARM_CP_NOP },
1279#ifndef CONFIG_USER_ONLY
1280    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
1281      .access = PL0_RW, .type = ARM_CP_ALIAS,
1282      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
1283      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
1284      .raw_writefn = raw_write},
1285    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
1286      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
1287      .access = PL0_RW, .accessfn = pmreg_access_selr,
1288      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
1289      .writefn = pmselr_write, .raw_writefn = raw_write, },
1290    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
1291      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
1292      .readfn = pmccntr_read, .writefn = pmccntr_write32,
1293      .accessfn = pmreg_access_ccntr },
1294    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
1295      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
1296      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
1297      .type = ARM_CP_IO,
1298      .readfn = pmccntr_read, .writefn = pmccntr_write, },
1299#endif
1300    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
1301      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
1302      .writefn = pmccfiltr_write,
1303      .access = PL0_RW, .accessfn = pmreg_access,
1304      .type = ARM_CP_IO,
1305      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
1306      .resetvalue = 0, },
1307    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
1308      .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1309      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1310    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
1311      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
1312      .access = PL0_RW, .type = ARM_CP_NO_RAW, .accessfn = pmreg_access,
1313      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
1314    /* Unimplemented, RAZ/WI. */
1315    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
1316      .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
1317      .accessfn = pmreg_access_xevcntr },
1318    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
1319      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
1320      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1321      .resetvalue = 0,
1322      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1323    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
1324      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
1325      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1326      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
1327      .resetvalue = 0,
1328      .writefn = pmuserenr_write, .raw_writefn = raw_write },
1329    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
1330      .access = PL1_RW, .accessfn = access_tpm,
1331      .type = ARM_CP_ALIAS,
1332      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
1333      .resetvalue = 0,
1334      .writefn = pmintenset_write, .raw_writefn = raw_write },
1335    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
1336      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
1337      .access = PL1_RW, .accessfn = access_tpm,
1338      .type = ARM_CP_IO,
1339      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1340      .writefn = pmintenset_write, .raw_writefn = raw_write,
1341      .resetvalue = 0x0 },
1342    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
1343      .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1344      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1345      .writefn = pmintenclr_write, },
1346    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
1347      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
1348      .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
1349      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
1350      .writefn = pmintenclr_write },
1351    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
1352      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
1353      .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
1354    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
1355      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
1356      .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
1357      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
1358                             offsetof(CPUARMState, cp15.csselr_ns) } },
1359    /* Auxiliary ID register: this actually has an IMPDEF value but for now
1360     * just RAZ for all cores:
1361     */
1362    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
1363      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
1364      .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
1365    /* Auxiliary fault status registers: these also are IMPDEF, and we
1366     * choose to RAZ/WI for all cores.
1367     */
1368    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
1369      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
1370      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1371    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
1372      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
1373      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
1374    /* MAIR can just read-as-written because we don't implement caches
1375     * and so don't need to care about memory attributes.
1376     */
1377    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
1378      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
1379      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
1380      .resetvalue = 0 },
1381    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
1382      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
1383      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
1384      .resetvalue = 0 },
1385    /* For non-long-descriptor page tables these are PRRR and NMRR;
1386     * regardless they still act as reads-as-written for QEMU.
1387     */
1388     /* MAIR0/1 are defined separately from their 64-bit counterpart which
1389      * allows them to assign the correct fieldoffset based on the endianness
1390      * handled in the field definitions.
1391      */
1392    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
1393      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
1394      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
1395                             offsetof(CPUARMState, cp15.mair0_ns) },
1396      .resetfn = arm_cp_reset_ignore },
1397    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
1398      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
1399      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
1400                             offsetof(CPUARMState, cp15.mair1_ns) },
1401      .resetfn = arm_cp_reset_ignore },
1402    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
1403      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
1404      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
1405    /* 32 bit ITLB invalidates */
1406    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
1407      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1408    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
1409      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1410    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
1411      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1412    /* 32 bit DTLB invalidates */
1413    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
1414      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1415    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
1416      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1417    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
1418      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1419    /* 32 bit TLB invalidates */
1420    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
1421      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
1422    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
1423      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
1424    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
1425      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
1426    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
1427      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
1428    REGINFO_SENTINEL
1429};
1430
1431static const ARMCPRegInfo v7mp_cp_reginfo[] = {
1432    /* 32 bit TLB invalidates, Inner Shareable */
1433    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
1434      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
1435    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
1436      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
1437    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
1438      .type = ARM_CP_NO_RAW, .access = PL1_W,
1439      .writefn = tlbiasid_is_write },
1440    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
1441      .type = ARM_CP_NO_RAW, .access = PL1_W,
1442      .writefn = tlbimvaa_is_write },
1443    REGINFO_SENTINEL
1444};
1445
1446static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1447                        uint64_t value)
1448{
1449    value &= 1;
1450    env->teecr = value;
1451}
1452
1453static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1454                                    bool isread)
1455{
1456    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
1457        return CP_ACCESS_TRAP;
1458    }
1459    return CP_ACCESS_OK;
1460}
1461
1462static const ARMCPRegInfo t2ee_cp_reginfo[] = {
1463    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
1464      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
1465      .resetvalue = 0,
1466      .writefn = teecr_write },
1467    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
1468      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
1469      .accessfn = teehbr_access, .resetvalue = 0 },
1470    REGINFO_SENTINEL
1471};
1472
1473static const ARMCPRegInfo v6k_cp_reginfo[] = {
1474    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
1475      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
1476      .access = PL0_RW,
1477      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
1478    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
1479      .access = PL0_RW,
1480      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
1481                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
1482      .resetfn = arm_cp_reset_ignore },
1483    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
1484      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
1485      .access = PL0_R|PL1_W,
1486      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
1487      .resetvalue = 0},
1488    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
1489      .access = PL0_R|PL1_W,
1490      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
1491                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
1492      .resetfn = arm_cp_reset_ignore },
1493    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
1494      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
1495      .access = PL1_RW,
1496      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
1497    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
1498      .access = PL1_RW,
1499      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
1500                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
1501      .resetvalue = 0 },
1502    REGINFO_SENTINEL
1503};
1504
1505#ifndef CONFIG_USER_ONLY
1506
1507static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
1508                                       bool isread)
1509{
1510    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
1511     * Writable only at the highest implemented exception level.
1512     */
1513    int el = arm_current_el(env);
1514
1515    switch (el) {
1516    case 0:
1517        if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
1518            return CP_ACCESS_TRAP;
1519        }
1520        break;
1521    case 1:
1522        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
1523            arm_is_secure_below_el3(env)) {
1524            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
1525            return CP_ACCESS_TRAP_UNCATEGORIZED;
1526        }
1527        break;
1528    case 2:
1529    case 3:
1530        break;
1531    }
1532
1533    if (!isread && el < arm_highest_el(env)) {
1534        return CP_ACCESS_TRAP_UNCATEGORIZED;
1535    }
1536
1537    return CP_ACCESS_OK;
1538}
1539
1540static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
1541                                        bool isread)
1542{
1543    unsigned int cur_el = arm_current_el(env);
1544    bool secure = arm_is_secure(env);
1545
1546    /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
1547    if (cur_el == 0 &&
1548        !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
1549        return CP_ACCESS_TRAP;
1550    }
1551
1552    if (arm_feature(env, ARM_FEATURE_EL2) &&
1553        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1554        !extract32(env->cp15.cnthctl_el2, 0, 1)) {
1555        return CP_ACCESS_TRAP_EL2;
1556    }
1557    return CP_ACCESS_OK;
1558}
1559
1560static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
1561                                      bool isread)
1562{
1563    unsigned int cur_el = arm_current_el(env);
1564    bool secure = arm_is_secure(env);
1565
1566    /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
1567     * EL0[PV]TEN is zero.
1568     */
1569    if (cur_el == 0 &&
1570        !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
1571        return CP_ACCESS_TRAP;
1572    }
1573
1574    if (arm_feature(env, ARM_FEATURE_EL2) &&
1575        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
1576        !extract32(env->cp15.cnthctl_el2, 1, 1)) {
1577        return CP_ACCESS_TRAP_EL2;
1578    }
1579    return CP_ACCESS_OK;
1580}
1581
1582static CPAccessResult gt_pct_access(CPUARMState *env,
1583                                    const ARMCPRegInfo *ri,
1584                                    bool isread)
1585{
1586    return gt_counter_access(env, GTIMER_PHYS, isread);
1587}
1588
1589static CPAccessResult gt_vct_access(CPUARMState *env,
1590                                    const ARMCPRegInfo *ri,
1591                                    bool isread)
1592{
1593    return gt_counter_access(env, GTIMER_VIRT, isread);
1594}
1595
1596static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1597                                       bool isread)
1598{
1599    return gt_timer_access(env, GTIMER_PHYS, isread);
1600}
1601
1602static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
1603                                       bool isread)
1604{
1605    return gt_timer_access(env, GTIMER_VIRT, isread);
1606}
1607
1608static CPAccessResult gt_stimer_access(CPUARMState *env,
1609                                       const ARMCPRegInfo *ri,
1610                                       bool isread)
1611{
1612    /* The AArch64 register view of the secure physical timer is
1613     * always accessible from EL3, and configurably accessible from
1614     * Secure EL1.
1615     */
1616    switch (arm_current_el(env)) {
1617    case 1:
1618        if (!arm_is_secure(env)) {
1619            return CP_ACCESS_TRAP;
1620        }
1621        if (!(env->cp15.scr_el3 & SCR_ST)) {
1622            return CP_ACCESS_TRAP_EL3;
1623        }
1624        return CP_ACCESS_OK;
1625    case 0:
1626    case 2:
1627        return CP_ACCESS_TRAP;
1628    case 3:
1629        return CP_ACCESS_OK;
1630    default:
1631        g_assert_not_reached();
1632    }
1633}
1634
1635static uint64_t gt_get_countervalue(CPUARMState *env)
1636{
1637    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
1638}
1639
1640static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
1641{
1642    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
1643
1644    if (gt->ctl & 1) {
1645        /* Timer enabled: calculate and set current ISTATUS, irq, and
1646         * reset timer to when ISTATUS next has to change
1647         */
1648        uint64_t offset = timeridx == GTIMER_VIRT ?
1649                                      cpu->env.cp15.cntvoff_el2 : 0;
1650        uint64_t count = gt_get_countervalue(&cpu->env);
1651        /* Note that this must be unsigned 64 bit arithmetic: */
1652        int istatus = count - offset >= gt->cval;
1653        uint64_t nexttick;
1654        int irqstate;
1655
1656        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
1657
1658        irqstate = (istatus && !(gt->ctl & 2));
1659        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1660
1661        if (istatus) {
1662            /* Next transition is when count rolls back over to zero */
1663            nexttick = UINT64_MAX;
1664        } else {
1665            /* Next transition is when we hit cval */
1666            nexttick = gt->cval + offset;
1667        }
1668        /* Note that the desired next expiry time might be beyond the
1669         * signed-64-bit range of a QEMUTimer -- in this case we just
1670         * set the timer for as far in the future as possible. When the
1671         * timer expires we will reset the timer for any remaining period.
1672         */
1673        if (nexttick > INT64_MAX / GTIMER_SCALE) {
1674            nexttick = INT64_MAX / GTIMER_SCALE;
1675        }
1676        timer_mod(cpu->gt_timer[timeridx], nexttick);
1677        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
1678    } else {
1679        /* Timer disabled: ISTATUS and timer output always clear */
1680        gt->ctl &= ~4;
1681        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
1682        timer_del(cpu->gt_timer[timeridx]);
1683        trace_arm_gt_recalc_disabled(timeridx);
1684    }
1685}
1686
1687static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
1688                           int timeridx)
1689{
1690    ARMCPU *cpu = arm_env_get_cpu(env);
1691
1692    timer_del(cpu->gt_timer[timeridx]);
1693}
1694
1695static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1696{
1697    return gt_get_countervalue(env);
1698}
1699
1700static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
1701{
1702    return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
1703}
1704
1705static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1706                          int timeridx,
1707                          uint64_t value)
1708{
1709    trace_arm_gt_cval_write(timeridx, value);
1710    env->cp15.c14_timer[timeridx].cval = value;
1711    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1712}
1713
1714static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
1715                             int timeridx)
1716{
1717    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1718
1719    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
1720                      (gt_get_countervalue(env) - offset));
1721}
1722
1723static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1724                          int timeridx,
1725                          uint64_t value)
1726{
1727    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
1728
1729    trace_arm_gt_tval_write(timeridx, value);
1730    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
1731                                         sextract64(value, 0, 32);
1732    gt_recalc_timer(arm_env_get_cpu(env), timeridx);
1733}
1734
1735static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1736                         int timeridx,
1737                         uint64_t value)
1738{
1739    ARMCPU *cpu = arm_env_get_cpu(env);
1740    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
1741
1742    trace_arm_gt_ctl_write(timeridx, value);
1743    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
1744    if ((oldval ^ value) & 1) {
1745        /* Enable toggled */
1746        gt_recalc_timer(cpu, timeridx);
1747    } else if ((oldval ^ value) & 2) {
1748        /* IMASK toggled: don't need to recalculate,
1749         * just set the interrupt line based on ISTATUS
1750         */
1751        int irqstate = (oldval & 4) && !(value & 2);
1752
1753        trace_arm_gt_imask_toggle(timeridx, irqstate);
1754        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
1755    }
1756}
1757
1758static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1759{
1760    gt_timer_reset(env, ri, GTIMER_PHYS);
1761}
1762
1763static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1764                               uint64_t value)
1765{
1766    gt_cval_write(env, ri, GTIMER_PHYS, value);
1767}
1768
1769static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1770{
1771    return gt_tval_read(env, ri, GTIMER_PHYS);
1772}
1773
1774static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1775                               uint64_t value)
1776{
1777    gt_tval_write(env, ri, GTIMER_PHYS, value);
1778}
1779
1780static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1781                              uint64_t value)
1782{
1783    gt_ctl_write(env, ri, GTIMER_PHYS, value);
1784}
1785
1786static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1787{
1788    gt_timer_reset(env, ri, GTIMER_VIRT);
1789}
1790
1791static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1792                               uint64_t value)
1793{
1794    gt_cval_write(env, ri, GTIMER_VIRT, value);
1795}
1796
1797static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1798{
1799    return gt_tval_read(env, ri, GTIMER_VIRT);
1800}
1801
1802static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1803                               uint64_t value)
1804{
1805    gt_tval_write(env, ri, GTIMER_VIRT, value);
1806}
1807
1808static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1809                              uint64_t value)
1810{
1811    gt_ctl_write(env, ri, GTIMER_VIRT, value);
1812}
1813
1814static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
1815                              uint64_t value)
1816{
1817    ARMCPU *cpu = arm_env_get_cpu(env);
1818
1819    trace_arm_gt_cntvoff_write(value);
1820    raw_write(env, ri, value);
1821    gt_recalc_timer(cpu, GTIMER_VIRT);
1822}
1823
1824static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1825{
1826    gt_timer_reset(env, ri, GTIMER_HYP);
1827}
1828
1829static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1830                              uint64_t value)
1831{
1832    gt_cval_write(env, ri, GTIMER_HYP, value);
1833}
1834
1835static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1836{
1837    return gt_tval_read(env, ri, GTIMER_HYP);
1838}
1839
1840static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1841                              uint64_t value)
1842{
1843    gt_tval_write(env, ri, GTIMER_HYP, value);
1844}
1845
1846static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1847                              uint64_t value)
1848{
1849    gt_ctl_write(env, ri, GTIMER_HYP, value);
1850}
1851
1852static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1853{
1854    gt_timer_reset(env, ri, GTIMER_SEC);
1855}
1856
1857static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1858                              uint64_t value)
1859{
1860    gt_cval_write(env, ri, GTIMER_SEC, value);
1861}
1862
1863static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
1864{
1865    return gt_tval_read(env, ri, GTIMER_SEC);
1866}
1867
1868static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
1869                              uint64_t value)
1870{
1871    gt_tval_write(env, ri, GTIMER_SEC, value);
1872}
1873
1874static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
1875                              uint64_t value)
1876{
1877    gt_ctl_write(env, ri, GTIMER_SEC, value);
1878}
1879
1880void arm_gt_ptimer_cb(void *opaque)
1881{
1882    ARMCPU *cpu = opaque;
1883
1884    gt_recalc_timer(cpu, GTIMER_PHYS);
1885}
1886
1887void arm_gt_vtimer_cb(void *opaque)
1888{
1889    ARMCPU *cpu = opaque;
1890
1891    gt_recalc_timer(cpu, GTIMER_VIRT);
1892}
1893
1894void arm_gt_htimer_cb(void *opaque)
1895{
1896    ARMCPU *cpu = opaque;
1897
1898    gt_recalc_timer(cpu, GTIMER_HYP);
1899}
1900
1901void arm_gt_stimer_cb(void *opaque)
1902{
1903    ARMCPU *cpu = opaque;
1904
1905    gt_recalc_timer(cpu, GTIMER_SEC);
1906}
1907
1908static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
1909    /* Note that CNTFRQ is purely reads-as-written for the benefit
1910     * of software; writing it doesn't actually change the timer frequency.
1911     * Our reset value matches the fixed frequency we implement the timer at.
1912     */
1913    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
1914      .type = ARM_CP_ALIAS,
1915      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1916      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
1917    },
1918    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
1919      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
1920      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
1921      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
1922      .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
1923    },
1924    /* overall control: mostly access permissions */
1925    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
1926      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
1927      .access = PL1_RW,
1928      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
1929      .resetvalue = 0,
1930    },
1931    /* per-timer control */
1932    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1933      .secure = ARM_CP_SECSTATE_NS,
1934      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1935      .accessfn = gt_ptimer_access,
1936      .fieldoffset = offsetoflow32(CPUARMState,
1937                                   cp15.c14_timer[GTIMER_PHYS].ctl),
1938      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1939    },
1940    { .name = "CNTP_CTL(S)",
1941      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
1942      .secure = ARM_CP_SECSTATE_S,
1943      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1944      .accessfn = gt_ptimer_access,
1945      .fieldoffset = offsetoflow32(CPUARMState,
1946                                   cp15.c14_timer[GTIMER_SEC].ctl),
1947      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
1948    },
1949    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
1950      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
1951      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1952      .accessfn = gt_ptimer_access,
1953      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
1954      .resetvalue = 0,
1955      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
1956    },
1957    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
1958      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
1959      .accessfn = gt_vtimer_access,
1960      .fieldoffset = offsetoflow32(CPUARMState,
1961                                   cp15.c14_timer[GTIMER_VIRT].ctl),
1962      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1963    },
1964    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
1965      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
1966      .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
1967      .accessfn = gt_vtimer_access,
1968      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
1969      .resetvalue = 0,
1970      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
1971    },
1972    /* TimerValue views: a 32 bit downcounting view of the underlying state */
1973    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1974      .secure = ARM_CP_SECSTATE_NS,
1975      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1976      .accessfn = gt_ptimer_access,
1977      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1978    },
1979    { .name = "CNTP_TVAL(S)",
1980      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
1981      .secure = ARM_CP_SECSTATE_S,
1982      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1983      .accessfn = gt_ptimer_access,
1984      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
1985    },
1986    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1987      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
1988      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1989      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
1990      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
1991    },
1992    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
1993      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
1994      .accessfn = gt_vtimer_access,
1995      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
1996    },
1997    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
1998      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
1999      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
2000      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2001      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2002    },
2003    /* The counter itself */
2004    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2005      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2006      .accessfn = gt_pct_access,
2007      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2008    },
2009    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2010      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2011      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2012      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2013    },
2014    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2015      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2016      .accessfn = gt_vct_access,
2017      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2018    },
2019    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2020      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2021      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2022      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2023    },
2024    /* Comparison value, indicating when the timer goes off */
2025    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2026      .secure = ARM_CP_SECSTATE_NS,
2027      .access = PL1_RW | PL0_R,
2028      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2029      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2030      .accessfn = gt_ptimer_access,
2031      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2032    },
2033    { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
2034      .secure = ARM_CP_SECSTATE_S,
2035      .access = PL1_RW | PL0_R,
2036      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2037      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2038      .accessfn = gt_ptimer_access,
2039      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2040    },
2041    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2042      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2043      .access = PL1_RW | PL0_R,
2044      .type = ARM_CP_IO,
2045      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2046      .resetvalue = 0, .accessfn = gt_ptimer_access,
2047      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2048    },
2049    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2050      .access = PL1_RW | PL0_R,
2051      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2052      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2053      .accessfn = gt_vtimer_access,
2054      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2055    },
2056    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2057      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2058      .access = PL1_RW | PL0_R,
2059      .type = ARM_CP_IO,
2060      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2061      .resetvalue = 0, .accessfn = gt_vtimer_access,
2062      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2063    },
2064    /* Secure timer -- this is actually restricted to only EL3
2065     * and configurably Secure-EL1 via the accessfn.
2066     */
2067    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2068      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2069      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2070      .accessfn = gt_stimer_access,
2071      .readfn = gt_sec_tval_read,
2072      .writefn = gt_sec_tval_write,
2073      .resetfn = gt_sec_timer_reset,
2074    },
2075    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2076      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2077      .type = ARM_CP_IO, .access = PL1_RW,
2078      .accessfn = gt_stimer_access,
2079      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2080      .resetvalue = 0,
2081      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2082    },
2083    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2084      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2085      .type = ARM_CP_IO, .access = PL1_RW,
2086      .accessfn = gt_stimer_access,
2087      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2088      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2089    },
2090    REGINFO_SENTINEL
2091};
2092
2093#else
2094/* In user-mode none of the generic timer registers are accessible,
2095 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
2096 * so instead just don't register any of them.
2097 */
2098static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2099    REGINFO_SENTINEL
2100};
2101
2102#endif
2103
2104static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2105{
2106    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2107        raw_write(env, ri, value);
2108    } else if (arm_feature(env, ARM_FEATURE_V7)) {
2109        raw_write(env, ri, value & 0xfffff6ff);
2110    } else {
2111        raw_write(env, ri, value & 0xfffff1ff);
2112    }
2113}
2114
2115#ifndef CONFIG_USER_ONLY
2116/* get_phys_addr() isn't present for user-mode-only targets */
2117
2118static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2119                                 bool isread)
2120{
2121    if (ri->opc2 & 4) {
2122        /* The ATS12NSO* operations must trap to EL3 if executed in
2123         * Secure EL1 (which can only happen if EL3 is AArch64).
2124         * They are simply UNDEF if executed from NS EL1.
2125         * They function normally from EL2 or EL3.
2126         */
2127        if (arm_current_el(env) == 1) {
2128            if (arm_is_secure_below_el3(env)) {
2129                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2130            }
2131            return CP_ACCESS_TRAP_UNCATEGORIZED;
2132        }
2133    }
2134    return CP_ACCESS_OK;
2135}
2136
2137static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2138                             int access_type, ARMMMUIdx mmu_idx)
2139{
2140    hwaddr phys_addr;
2141    target_ulong page_size;
2142    int prot;
2143    uint32_t fsr;
2144    bool ret;
2145    uint64_t par64;
2146    MemTxAttrs attrs = {};
2147    ARMMMUFaultInfo fi = {};
2148
2149    ret = get_phys_addr(env, value, access_type, mmu_idx,
2150                        &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
2151    if (extended_addresses_enabled(env)) {
2152        /* fsr is a DFSR/IFSR value for the long descriptor
2153         * translation table format, but with WnR always clear.
2154         * Convert it to a 64-bit PAR.
2155         */
2156        par64 = (1 << 11); /* LPAE bit always set */
2157        if (!ret) {
2158            par64 |= phys_addr & ~0xfffULL;
2159            if (!attrs.secure) {
2160                par64 |= (1 << 9); /* NS */
2161            }
2162            /* We don't set the ATTR or SH fields in the PAR. */
2163        } else {
2164            par64 |= 1; /* F */
2165            par64 |= (fsr & 0x3f) << 1; /* FS */
2166            /* Note that S2WLK and FSTAGE are always zero, because we don't
2167             * implement virtualization and therefore there can't be a stage 2
2168             * fault.
2169             */
2170        }
2171    } else {
2172        /* fsr is a DFSR/IFSR value for the short descriptor
2173         * translation table format (with WnR always clear).
2174         * Convert it to a 32-bit PAR.
2175         */
2176        if (!ret) {
2177            /* We do not set any attribute bits in the PAR */
2178            if (page_size == (1 << 24)
2179                && arm_feature(env, ARM_FEATURE_V7)) {
2180                par64 = (phys_addr & 0xff000000) | (1 << 1);
2181            } else {
2182                par64 = phys_addr & 0xfffff000;
2183            }
2184            if (!attrs.secure) {
2185                par64 |= (1 << 9); /* NS */
2186            }
2187        } else {
2188            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
2189                    ((fsr & 0xf) << 1) | 1;
2190        }
2191    }
2192    return par64;
2193}
2194
2195static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2196{
2197    int access_type = ri->opc2 & 1;
2198    uint64_t par64;
2199    ARMMMUIdx mmu_idx;
2200    int el = arm_current_el(env);
2201    bool secure = arm_is_secure_below_el3(env);
2202
2203    switch (ri->opc2 & 6) {
2204    case 0:
2205        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
2206        switch (el) {
2207        case 3:
2208            mmu_idx = ARMMMUIdx_S1E3;
2209            break;
2210        case 2:
2211            mmu_idx = ARMMMUIdx_S1NSE1;
2212            break;
2213        case 1:
2214            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2215            break;
2216        default:
2217            g_assert_not_reached();
2218        }
2219        break;
2220    case 2:
2221        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
2222        switch (el) {
2223        case 3:
2224            mmu_idx = ARMMMUIdx_S1SE0;
2225            break;
2226        case 2:
2227            mmu_idx = ARMMMUIdx_S1NSE0;
2228            break;
2229        case 1:
2230            mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2231            break;
2232        default:
2233            g_assert_not_reached();
2234        }
2235        break;
2236    case 4:
2237        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
2238        mmu_idx = ARMMMUIdx_S12NSE1;
2239        break;
2240    case 6:
2241        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
2242        mmu_idx = ARMMMUIdx_S12NSE0;
2243        break;
2244    default:
2245        g_assert_not_reached();
2246    }
2247
2248    par64 = do_ats_write(env, value, access_type, mmu_idx);
2249
2250    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2251}
2252
2253static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
2254                        uint64_t value)
2255{
2256    int access_type = ri->opc2 & 1;
2257    uint64_t par64;
2258
2259    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
2260
2261    A32_BANKED_CURRENT_REG_SET(env, par, par64);
2262}
2263
2264static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
2265                                     bool isread)
2266{
2267    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
2268        return CP_ACCESS_TRAP;
2269    }
2270    return CP_ACCESS_OK;
2271}
2272
2273static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
2274                        uint64_t value)
2275{
2276    int access_type = ri->opc2 & 1;
2277    ARMMMUIdx mmu_idx;
2278    int secure = arm_is_secure_below_el3(env);
2279
2280    switch (ri->opc2 & 6) {
2281    case 0:
2282        switch (ri->opc1) {
2283        case 0: /* AT S1E1R, AT S1E1W */
2284            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
2285            break;
2286        case 4: /* AT S1E2R, AT S1E2W */
2287            mmu_idx = ARMMMUIdx_S1E2;
2288            break;
2289        case 6: /* AT S1E3R, AT S1E3W */
2290            mmu_idx = ARMMMUIdx_S1E3;
2291            break;
2292        default:
2293            g_assert_not_reached();
2294        }
2295        break;
2296    case 2: /* AT S1E0R, AT S1E0W */
2297        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
2298        break;
2299    case 4: /* AT S12E1R, AT S12E1W */
2300        mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
2301        break;
2302    case 6: /* AT S12E0R, AT S12E0W */
2303        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
2304        break;
2305    default:
2306        g_assert_not_reached();
2307    }
2308
2309    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
2310}
2311#endif
2312
2313static const ARMCPRegInfo vapa_cp_reginfo[] = {
2314    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
2315      .access = PL1_RW, .resetvalue = 0,
2316      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
2317                             offsetoflow32(CPUARMState, cp15.par_ns) },
2318      .writefn = par_write },
2319#ifndef CONFIG_USER_ONLY
2320    /* This underdecoding is safe because the reginfo is NO_RAW. */
2321    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
2322      .access = PL1_W, .accessfn = ats_access,
2323      .writefn = ats_write, .type = ARM_CP_NO_RAW },
2324#endif
2325    REGINFO_SENTINEL
2326};
2327
2328/* Return basic MPU access permission bits.  */
2329static uint32_t simple_mpu_ap_bits(uint32_t val)
2330{
2331    uint32_t ret;
2332    uint32_t mask;
2333    int i;
2334    ret = 0;
2335    mask = 3;
2336    for (i = 0; i < 16; i += 2) {
2337        ret |= (val >> i) & mask;
2338        mask <<= 2;
2339    }
2340    return ret;
2341}
2342
2343/* Pad basic MPU access permission bits to extended format.  */
2344static uint32_t extended_mpu_ap_bits(uint32_t val)
2345{
2346    uint32_t ret;
2347    uint32_t mask;
2348    int i;
2349    ret = 0;
2350    mask = 3;
2351    for (i = 0; i < 16; i += 2) {
2352        ret |= (val & mask) << i;
2353        mask <<= 2;
2354    }
2355    return ret;
2356}
2357
2358static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2359                                 uint64_t value)
2360{
2361    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
2362}
2363
2364static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2365{
2366    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
2367}
2368
2369static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
2370                                 uint64_t value)
2371{
2372    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
2373}
2374
2375static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
2376{
2377    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
2378}
2379
2380static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
2381{
2382    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2383
2384    if (!u32p) {
2385        return 0;
2386    }
2387
2388    u32p += env->pmsav7.rnr;
2389    return *u32p;
2390}
2391
2392static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
2393                         uint64_t value)
2394{
2395    ARMCPU *cpu = arm_env_get_cpu(env);
2396    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
2397
2398    if (!u32p) {
2399        return;
2400    }
2401
2402    u32p += env->pmsav7.rnr;
2403    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
2404    *u32p = value;
2405}
2406
2407static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2408                              uint64_t value)
2409{
2410    ARMCPU *cpu = arm_env_get_cpu(env);
2411    uint32_t nrgs = cpu->pmsav7_dregion;
2412
2413    if (value >= nrgs) {
2414        qemu_log_mask(LOG_GUEST_ERROR,
2415                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
2416                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
2417        return;
2418    }
2419
2420    raw_write(env, ri, value);
2421}
2422
2423static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
2424    /* Reset for all these registers is handled in arm_cpu_reset(),
2425     * because the PMSAv7 is also used by M-profile CPUs, which do
2426     * not register cpregs but still need the state to be reset.
2427     */
2428    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
2429      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2430      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
2431      .readfn = pmsav7_read, .writefn = pmsav7_write,
2432      .resetfn = arm_cp_reset_ignore },
2433    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
2434      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2435      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
2436      .readfn = pmsav7_read, .writefn = pmsav7_write,
2437      .resetfn = arm_cp_reset_ignore },
2438    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
2439      .access = PL1_RW, .type = ARM_CP_NO_RAW,
2440      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
2441      .readfn = pmsav7_read, .writefn = pmsav7_write,
2442      .resetfn = arm_cp_reset_ignore },
2443    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
2444      .access = PL1_RW,
2445      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr),
2446      .writefn = pmsav7_rgnr_write,
2447      .resetfn = arm_cp_reset_ignore },
2448    REGINFO_SENTINEL
2449};
2450
2451static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
2452    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2453      .access = PL1_RW, .type = ARM_CP_ALIAS,
2454      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2455      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
2456    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2457      .access = PL1_RW, .type = ARM_CP_ALIAS,
2458      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2459      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
2460    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
2461      .access = PL1_RW,
2462      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
2463      .resetvalue = 0, },
2464    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
2465      .access = PL1_RW,
2466      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
2467      .resetvalue = 0, },
2468    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
2469      .access = PL1_RW,
2470      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
2471    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
2472      .access = PL1_RW,
2473      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
2474    /* Protection region base and size registers */
2475    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
2476      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2477      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
2478    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
2479      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2480      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
2481    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
2482      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2483      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
2484    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
2485      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2486      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
2487    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
2488      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2489      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
2490    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
2491      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2492      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
2493    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
2494      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2495      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
2496    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
2497      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
2498      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
2499    REGINFO_SENTINEL
2500};
2501
2502static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
2503                                 uint64_t value)
2504{
2505    TCR *tcr = raw_ptr(env, ri);
2506    int maskshift = extract32(value, 0, 3);
2507
2508    if (!arm_feature(env, ARM_FEATURE_V8)) {
2509        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
2510            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
2511             * using Long-desciptor translation table format */
2512            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
2513        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
2514            /* In an implementation that includes the Security Extensions
2515             * TTBCR has additional fields PD0 [4] and PD1 [5] for
2516             * Short-descriptor translation table format.
2517             */
2518            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
2519        } else {
2520            value &= TTBCR_N;
2521        }
2522    }
2523
2524    /* Update the masks corresponding to the TCR bank being written
2525     * Note that we always calculate mask and base_mask, but
2526     * they are only used for short-descriptor tables (ie if EAE is 0);
2527     * for long-descriptor tables the TCR fields are used differently
2528     * and the mask and base_mask values are meaningless.
2529     */
2530    tcr->raw_tcr = value;
2531    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
2532    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
2533}
2534
2535static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2536                             uint64_t value)
2537{
2538    ARMCPU *cpu = arm_env_get_cpu(env);
2539
2540    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2541        /* With LPAE the TTBCR could result in a change of ASID
2542         * via the TTBCR.A1 bit, so do a TLB flush.
2543         */
2544        tlb_flush(CPU(cpu));
2545    }
2546    vmsa_ttbcr_raw_write(env, ri, value);
2547}
2548
2549static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2550{
2551    TCR *tcr = raw_ptr(env, ri);
2552
2553    /* Reset both the TCR as well as the masks corresponding to the bank of
2554     * the TCR being reset.
2555     */
2556    tcr->raw_tcr = 0;
2557    tcr->mask = 0;
2558    tcr->base_mask = 0xffffc000u;
2559}
2560
2561static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2562                               uint64_t value)
2563{
2564    ARMCPU *cpu = arm_env_get_cpu(env);
2565    TCR *tcr = raw_ptr(env, ri);
2566
2567    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
2568    tlb_flush(CPU(cpu));
2569    tcr->raw_tcr = value;
2570}
2571
2572static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2573                            uint64_t value)
2574{
2575    /* 64 bit accesses to the TTBRs can change the ASID and so we
2576     * must flush the TLB.
2577     */
2578    if (cpreg_field_is_64bit(ri)) {
2579        ARMCPU *cpu = arm_env_get_cpu(env);
2580
2581        tlb_flush(CPU(cpu));
2582    }
2583    raw_write(env, ri, value);
2584}
2585
2586static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2587                        uint64_t value)
2588{
2589    ARMCPU *cpu = arm_env_get_cpu(env);
2590    CPUState *cs = CPU(cpu);
2591
2592    /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
2593    if (raw_read(env, ri) != value) {
2594        tlb_flush_by_mmuidx(cs,
2595                            ARMMMUIdxBit_S12NSE1 |
2596                            ARMMMUIdxBit_S12NSE0 |
2597                            ARMMMUIdxBit_S2NS);
2598        raw_write(env, ri, value);
2599    }
2600}
2601
2602static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
2603    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
2604      .access = PL1_RW, .type = ARM_CP_ALIAS,
2605      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
2606                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
2607    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
2608      .access = PL1_RW, .resetvalue = 0,
2609      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
2610                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
2611    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
2612      .access = PL1_RW, .resetvalue = 0,
2613      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
2614                             offsetof(CPUARMState, cp15.dfar_ns) } },
2615    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
2616      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
2617      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
2618      .resetvalue = 0, },
2619    REGINFO_SENTINEL
2620};
2621
2622static const ARMCPRegInfo vmsa_cp_reginfo[] = {
2623    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
2624      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
2625      .access = PL1_RW,
2626      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
2627    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
2628      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
2629      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2630      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2631                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
2632    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
2633      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
2634      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
2635      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2636                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
2637    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
2638      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2639      .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
2640      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
2641      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
2642    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
2643      .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
2644      .raw_writefn = vmsa_ttbcr_raw_write,
2645      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
2646                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
2647    REGINFO_SENTINEL
2648};
2649
2650static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
2651                                uint64_t value)
2652{
2653    env->cp15.c15_ticonfig = value & 0xe7;
2654    /* The OS_TYPE bit in this register changes the reported CPUID! */
2655    env->cp15.c0_cpuid = (value & (1 << 5)) ?
2656        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
2657}
2658
2659static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
2660                                uint64_t value)
2661{
2662    env->cp15.c15_threadid = value & 0xffff;
2663}
2664
2665static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
2666                           uint64_t value)
2667{
2668    /* Wait-for-interrupt (deprecated) */
2669    cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
2670}
2671
2672static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
2673                                  uint64_t value)
2674{
2675    /* On OMAP there are registers indicating the max/min index of dcache lines
2676     * containing a dirty line; cache flush operations have to reset these.
2677     */
2678    env->cp15.c15_i_max = 0x000;
2679    env->cp15.c15_i_min = 0xff0;
2680}
2681
2682static const ARMCPRegInfo omap_cp_reginfo[] = {
2683    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
2684      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
2685      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
2686      .resetvalue = 0, },
2687    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
2688      .access = PL1_RW, .type = ARM_CP_NOP },
2689    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
2690      .access = PL1_RW,
2691      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
2692      .writefn = omap_ticonfig_write },
2693    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
2694      .access = PL1_RW,
2695      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
2696    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
2697      .access = PL1_RW, .resetvalue = 0xff0,
2698      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
2699    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
2700      .access = PL1_RW,
2701      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
2702      .writefn = omap_threadid_write },
2703    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
2704      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2705      .type = ARM_CP_NO_RAW,
2706      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
2707    /* TODO: Peripheral port remap register:
2708     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
2709     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
2710     * when MMU is off.
2711     */
2712    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
2713      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
2714      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
2715      .writefn = omap_cachemaint_write },
2716    { .name = "C9", .cp = 15, .crn = 9,
2717      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
2718      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
2719    REGINFO_SENTINEL
2720};
2721
2722static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
2723                              uint64_t value)
2724{
2725    env->cp15.c15_cpar = value & 0x3fff;
2726}
2727
2728static const ARMCPRegInfo xscale_cp_reginfo[] = {
2729    { .name = "XSCALE_CPAR",
2730      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
2731      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
2732      .writefn = xscale_cpar_write, },
2733    { .name = "XSCALE_AUXCR",
2734      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
2735      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
2736      .resetvalue = 0, },
2737    /* XScale specific cache-lockdown: since we have no cache we NOP these
2738     * and hope the guest does not really rely on cache behaviour.
2739     */
2740    { .name = "XSCALE_LOCK_ICACHE_LINE",
2741      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
2742      .access = PL1_W, .type = ARM_CP_NOP },
2743    { .name = "XSCALE_UNLOCK_ICACHE",
2744      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
2745      .access = PL1_W, .type = ARM_CP_NOP },
2746    { .name = "XSCALE_DCACHE_LOCK",
2747      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
2748      .access = PL1_RW, .type = ARM_CP_NOP },
2749    { .name = "XSCALE_UNLOCK_DCACHE",
2750      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
2751      .access = PL1_W, .type = ARM_CP_NOP },
2752    REGINFO_SENTINEL
2753};
2754
2755static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
2756    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
2757     * implementation of this implementation-defined space.
2758     * Ideally this should eventually disappear in favour of actually
2759     * implementing the correct behaviour for all cores.
2760     */
2761    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
2762      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2763      .access = PL1_RW,
2764      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
2765      .resetvalue = 0 },
2766    REGINFO_SENTINEL
2767};
2768
2769static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
2770    /* Cache status: RAZ because we have no cache so it's always clean */
2771    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
2772      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2773      .resetvalue = 0 },
2774    REGINFO_SENTINEL
2775};
2776
2777static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
2778    /* We never have a a block transfer operation in progress */
2779    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
2780      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2781      .resetvalue = 0 },
2782    /* The cache ops themselves: these all NOP for QEMU */
2783    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
2784      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2785    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
2786      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2787    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
2788      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2789    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
2790      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2791    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
2792      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2793    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
2794      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
2795    REGINFO_SENTINEL
2796};
2797
2798static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
2799    /* The cache test-and-clean instructions always return (1 << 30)
2800     * to indicate that there are no dirty cache lines.
2801     */
2802    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
2803      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2804      .resetvalue = (1 << 30) },
2805    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
2806      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
2807      .resetvalue = (1 << 30) },
2808    REGINFO_SENTINEL
2809};
2810
2811static const ARMCPRegInfo strongarm_cp_reginfo[] = {
2812    /* Ignore ReadBuffer accesses */
2813    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
2814      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
2815      .access = PL1_RW, .resetvalue = 0,
2816      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
2817    REGINFO_SENTINEL
2818};
2819
2820static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2821{
2822    ARMCPU *cpu = arm_env_get_cpu(env);
2823    unsigned int cur_el = arm_current_el(env);
2824    bool secure = arm_is_secure(env);
2825
2826    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2827        return env->cp15.vpidr_el2;
2828    }
2829    return raw_read(env, ri);
2830}
2831
2832static uint64_t mpidr_read_val(CPUARMState *env)
2833{
2834    ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
2835    uint64_t mpidr = cpu->mp_affinity;
2836
2837    if (arm_feature(env, ARM_FEATURE_V7MP)) {
2838        mpidr |= (1U << 31);
2839        /* Cores which are uniprocessor (non-coherent)
2840         * but still implement the MP extensions set
2841         * bit 30. (For instance, Cortex-R5).
2842         */
2843        if (cpu->mp_is_up) {
2844            mpidr |= (1u << 30);
2845        }
2846    }
2847    return mpidr;
2848}
2849
2850static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2851{
2852    unsigned int cur_el = arm_current_el(env);
2853    bool secure = arm_is_secure(env);
2854
2855    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
2856        return env->cp15.vmpidr_el2;
2857    }
2858    return mpidr_read_val(env);
2859}
2860
2861static const ARMCPRegInfo mpidr_cp_reginfo[] = {
2862    { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
2863      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
2864      .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
2865    REGINFO_SENTINEL
2866};
2867
2868static const ARMCPRegInfo lpae_cp_reginfo[] = {
2869    /* NOP AMAIR0/1 */
2870    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
2871      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
2872      .access = PL1_RW, .type = ARM_CP_CONST,
2873      .resetvalue = 0 },
2874    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
2875    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
2876      .access = PL1_RW, .type = ARM_CP_CONST,
2877      .resetvalue = 0 },
2878    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
2879      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
2880      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
2881                             offsetof(CPUARMState, cp15.par_ns)} },
2882    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
2883      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2884      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
2885                             offsetof(CPUARMState, cp15.ttbr0_ns) },
2886      .writefn = vmsa_ttbr_write, },
2887    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
2888      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
2889      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
2890                             offsetof(CPUARMState, cp15.ttbr1_ns) },
2891      .writefn = vmsa_ttbr_write, },
2892    REGINFO_SENTINEL
2893};
2894
2895static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2896{
2897    return vfp_get_fpcr(env);
2898}
2899
2900static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2901                            uint64_t value)
2902{
2903    vfp_set_fpcr(env, value);
2904}
2905
2906static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2907{
2908    return vfp_get_fpsr(env);
2909}
2910
2911static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2912                            uint64_t value)
2913{
2914    vfp_set_fpsr(env, value);
2915}
2916
2917static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
2918                                       bool isread)
2919{
2920    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
2921        return CP_ACCESS_TRAP;
2922    }
2923    return CP_ACCESS_OK;
2924}
2925
2926static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
2927                            uint64_t value)
2928{
2929    env->daif = value & PSTATE_DAIF;
2930}
2931
2932static CPAccessResult aa64_cacheop_access(CPUARMState *env,
2933                                          const ARMCPRegInfo *ri,
2934                                          bool isread)
2935{
2936    /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
2937     * SCTLR_EL1.UCI is set.
2938     */
2939    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
2940        return CP_ACCESS_TRAP;
2941    }
2942    return CP_ACCESS_OK;
2943}
2944
2945/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
2946 * Page D4-1736 (DDI0487A.b)
2947 */
2948
2949static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2950                                    uint64_t value)
2951{
2952    CPUState *cs = ENV_GET_CPU(env);
2953
2954    if (arm_is_secure_below_el3(env)) {
2955        tlb_flush_by_mmuidx(cs,
2956                            ARMMMUIdxBit_S1SE1 |
2957                            ARMMMUIdxBit_S1SE0);
2958    } else {
2959        tlb_flush_by_mmuidx(cs,
2960                            ARMMMUIdxBit_S12NSE1 |
2961                            ARMMMUIdxBit_S12NSE0);
2962    }
2963}
2964
2965static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
2966                                      uint64_t value)
2967{
2968    CPUState *cs = ENV_GET_CPU(env);
2969    bool sec = arm_is_secure_below_el3(env);
2970
2971    if (sec) {
2972        tlb_flush_by_mmuidx_all_cpus_synced(cs,
2973                                            ARMMMUIdxBit_S1SE1 |
2974                                            ARMMMUIdxBit_S1SE0);
2975    } else {
2976        tlb_flush_by_mmuidx_all_cpus_synced(cs,
2977                                            ARMMMUIdxBit_S12NSE1 |
2978                                            ARMMMUIdxBit_S12NSE0);
2979    }
2980}
2981
2982static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
2983                                  uint64_t value)
2984{
2985    /* Note that the 'ALL' scope must invalidate both stage 1 and
2986     * stage 2 translations, whereas most other scopes only invalidate
2987     * stage 1 translations.
2988     */
2989    ARMCPU *cpu = arm_env_get_cpu(env);
2990    CPUState *cs = CPU(cpu);
2991
2992    if (arm_is_secure_below_el3(env)) {
2993        tlb_flush_by_mmuidx(cs,
2994                            ARMMMUIdxBit_S1SE1 |
2995                            ARMMMUIdxBit_S1SE0);
2996    } else {
2997        if (arm_feature(env, ARM_FEATURE_EL2)) {
2998            tlb_flush_by_mmuidx(cs,
2999                                ARMMMUIdxBit_S12NSE1 |
3000                                ARMMMUIdxBit_S12NSE0 |
3001                                ARMMMUIdxBit_S2NS);
3002        } else {
3003            tlb_flush_by_mmuidx(cs,
3004                                ARMMMUIdxBit_S12NSE1 |
3005                                ARMMMUIdxBit_S12NSE0);
3006        }
3007    }
3008}
3009
3010static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3011                                  uint64_t value)
3012{
3013    ARMCPU *cpu = arm_env_get_cpu(env);
3014    CPUState *cs = CPU(cpu);
3015
3016    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3017}
3018
3019static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3020                                  uint64_t value)
3021{
3022    ARMCPU *cpu = arm_env_get_cpu(env);
3023    CPUState *cs = CPU(cpu);
3024
3025    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3026}
3027
3028static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3029                                    uint64_t value)
3030{
3031    /* Note that the 'ALL' scope must invalidate both stage 1 and
3032     * stage 2 translations, whereas most other scopes only invalidate
3033     * stage 1 translations.
3034     */
3035    CPUState *cs = ENV_GET_CPU(env);
3036    bool sec = arm_is_secure_below_el3(env);
3037    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3038
3039    if (sec) {
3040        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3041                                            ARMMMUIdxBit_S1SE1 |
3042                                            ARMMMUIdxBit_S1SE0);
3043    } else if (has_el2) {
3044        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3045                                            ARMMMUIdxBit_S12NSE1 |
3046                                            ARMMMUIdxBit_S12NSE0 |
3047                                            ARMMMUIdxBit_S2NS);
3048    } else {
3049          tlb_flush_by_mmuidx_all_cpus_synced(cs,
3050                                              ARMMMUIdxBit_S12NSE1 |
3051                                              ARMMMUIdxBit_S12NSE0);
3052    }
3053}
3054
3055static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3056                                    uint64_t value)
3057{
3058    CPUState *cs = ENV_GET_CPU(env);
3059
3060    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3061}
3062
3063static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3064                                    uint64_t value)
3065{
3066    CPUState *cs = ENV_GET_CPU(env);
3067
3068    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3069}
3070
3071static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3072                                 uint64_t value)
3073{
3074    /* Invalidate by VA, EL1&0 (AArch64 version).
3075     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3076     * since we don't support flush-for-specific-ASID-only or
3077     * flush-last-level-only.
3078     */
3079    ARMCPU *cpu = arm_env_get_cpu(env);
3080    CPUState *cs = CPU(cpu);
3081    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3082
3083    if (arm_is_secure_below_el3(env)) {
3084        tlb_flush_page_by_mmuidx(cs, pageaddr,
3085                                 ARMMMUIdxBit_S1SE1 |
3086                                 ARMMMUIdxBit_S1SE0);
3087    } else {
3088        tlb_flush_page_by_mmuidx(cs, pageaddr,
3089                                 ARMMMUIdxBit_S12NSE1 |
3090                                 ARMMMUIdxBit_S12NSE0);
3091    }
3092}
3093
3094static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3095                                 uint64_t value)
3096{
3097    /* Invalidate by VA, EL2
3098     * Currently handles both VAE2 and VALE2, since we don't support
3099     * flush-last-level-only.
3100     */
3101    ARMCPU *cpu = arm_env_get_cpu(env);
3102    CPUState *cs = CPU(cpu);
3103    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3104
3105    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3106}
3107
3108static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3109                                 uint64_t value)
3110{
3111    /* Invalidate by VA, EL3
3112     * Currently handles both VAE3 and VALE3, since we don't support
3113     * flush-last-level-only.
3114     */
3115    ARMCPU *cpu = arm_env_get_cpu(env);
3116    CPUState *cs = CPU(cpu);
3117    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3118
3119    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3120}
3121
3122static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3123                                   uint64_t value)
3124{
3125    ARMCPU *cpu = arm_env_get_cpu(env);
3126    CPUState *cs = CPU(cpu);
3127    bool sec = arm_is_secure_below_el3(env);
3128    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3129
3130    if (sec) {
3131        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3132                                                 ARMMMUIdxBit_S1SE1 |
3133                                                 ARMMMUIdxBit_S1SE0);
3134    } else {
3135        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3136                                                 ARMMMUIdxBit_S12NSE1 |
3137                                                 ARMMMUIdxBit_S12NSE0);
3138    }
3139}
3140
3141static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3142                                   uint64_t value)
3143{
3144    CPUState *cs = ENV_GET_CPU(env);
3145    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3146
3147    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3148                                             ARMMMUIdxBit_S1E2);
3149}
3150
3151static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3152                                   uint64_t value)
3153{
3154    CPUState *cs = ENV_GET_CPU(env);
3155    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3156
3157    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3158                                             ARMMMUIdxBit_S1E3);
3159}
3160
3161static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3162                                    uint64_t value)
3163{
3164    /* Invalidate by IPA. This has to invalidate any structures that
3165     * contain only stage 2 translation information, but does not need
3166     * to apply to structures that contain combined stage 1 and stage 2
3167     * translation information.
3168     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
3169     */
3170    ARMCPU *cpu = arm_env_get_cpu(env);
3171    CPUState *cs = CPU(cpu);
3172    uint64_t pageaddr;
3173
3174    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3175        return;
3176    }
3177
3178    pageaddr = sextract64(value << 12, 0, 48);
3179
3180    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
3181}
3182
3183static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3184                                      uint64_t value)
3185{
3186    CPUState *cs = ENV_GET_CPU(env);
3187    uint64_t pageaddr;
3188
3189    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
3190        return;
3191    }
3192
3193    pageaddr = sextract64(value << 12, 0, 48);
3194
3195    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3196                                             ARMMMUIdxBit_S2NS);
3197}
3198
3199static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
3200                                      bool isread)
3201{
3202    /* We don't implement EL2, so the only control on DC ZVA is the
3203     * bit in the SCTLR which can prohibit access for EL0.
3204     */
3205    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
3206        return CP_ACCESS_TRAP;
3207    }
3208    return CP_ACCESS_OK;
3209}
3210
3211static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
3212{
3213    ARMCPU *cpu = arm_env_get_cpu(env);
3214    int dzp_bit = 1 << 4;
3215
3216    /* DZP indicates whether DC ZVA access is allowed */
3217    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
3218        dzp_bit = 0;
3219    }
3220    return cpu->dcz_blocksize | dzp_bit;
3221}
3222
3223static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
3224                                    bool isread)
3225{
3226    if (!(env->pstate & PSTATE_SP)) {
3227        /* Access to SP_EL0 is undefined if it's being used as
3228         * the stack pointer.
3229         */
3230        return CP_ACCESS_TRAP_UNCATEGORIZED;
3231    }
3232    return CP_ACCESS_OK;
3233}
3234
3235static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
3236{
3237    return env->pstate & PSTATE_SP;
3238}
3239
3240static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
3241{
3242    update_spsel(env, val);
3243}
3244
3245static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3246                        uint64_t value)
3247{
3248    ARMCPU *cpu = arm_env_get_cpu(env);
3249
3250    if (raw_read(env, ri) == value) {
3251        /* Skip the TLB flush if nothing actually changed; Linux likes
3252         * to do a lot of pointless SCTLR writes.
3253         */
3254        return;
3255    }
3256
3257    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
3258        /* M bit is RAZ/WI for PMSA with no MPU implemented */
3259        value &= ~SCTLR_M;
3260    }
3261
3262    raw_write(env, ri, value);
3263    /* ??? Lots of these bits are not implemented.  */
3264    /* This may enable/disable the MMU, so do a TLB flush.  */
3265    tlb_flush(CPU(cpu));
3266}
3267
3268static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
3269                                     bool isread)
3270{
3271    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
3272        return CP_ACCESS_TRAP_FP_EL2;
3273    }
3274    if (env->cp15.cptr_el[3] & CPTR_TFP) {
3275        return CP_ACCESS_TRAP_FP_EL3;
3276    }
3277    return CP_ACCESS_OK;
3278}
3279
3280static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3281                       uint64_t value)
3282{
3283    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
3284}
3285
3286static const ARMCPRegInfo v8_cp_reginfo[] = {
3287    /* Minimal set of EL0-visible registers. This will need to be expanded
3288     * significantly for system emulation of AArch64 CPUs.
3289     */
3290    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
3291      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
3292      .access = PL0_RW, .type = ARM_CP_NZCV },
3293    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
3294      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
3295      .type = ARM_CP_NO_RAW,
3296      .access = PL0_RW, .accessfn = aa64_daif_access,
3297      .fieldoffset = offsetof(CPUARMState, daif),
3298      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
3299    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
3300      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
3301      .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
3302    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
3303      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
3304      .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
3305    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
3306      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
3307      .access = PL0_R, .type = ARM_CP_NO_RAW,
3308      .readfn = aa64_dczid_read },
3309    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
3310      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
3311      .access = PL0_W, .type = ARM_CP_DC_ZVA,
3312#ifndef CONFIG_USER_ONLY
3313      /* Avoid overhead of an access check that always passes in user-mode */
3314      .accessfn = aa64_zva_access,
3315#endif
3316    },
3317    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
3318      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
3319      .access = PL1_R, .type = ARM_CP_CURRENTEL },
3320    /* Cache ops: all NOPs since we don't emulate caches */
3321    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
3322      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3323      .access = PL1_W, .type = ARM_CP_NOP },
3324    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
3325      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3326      .access = PL1_W, .type = ARM_CP_NOP },
3327    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
3328      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
3329      .access = PL0_W, .type = ARM_CP_NOP,
3330      .accessfn = aa64_cacheop_access },
3331    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
3332      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3333      .access = PL1_W, .type = ARM_CP_NOP },
3334    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
3335      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3336      .access = PL1_W, .type = ARM_CP_NOP },
3337    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
3338      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
3339      .access = PL0_W, .type = ARM_CP_NOP,
3340      .accessfn = aa64_cacheop_access },
3341    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
3342      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3343      .access = PL1_W, .type = ARM_CP_NOP },
3344    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
3345      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
3346      .access = PL0_W, .type = ARM_CP_NOP,
3347      .accessfn = aa64_cacheop_access },
3348    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
3349      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
3350      .access = PL0_W, .type = ARM_CP_NOP,
3351      .accessfn = aa64_cacheop_access },
3352    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
3353      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3354      .access = PL1_W, .type = ARM_CP_NOP },
3355    /* TLBI operations */
3356    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
3357      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
3358      .access = PL1_W, .type = ARM_CP_NO_RAW,
3359      .writefn = tlbi_aa64_vmalle1is_write },
3360    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
3361      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
3362      .access = PL1_W, .type = ARM_CP_NO_RAW,
3363      .writefn = tlbi_aa64_vae1is_write },
3364    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
3365      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
3366      .access = PL1_W, .type = ARM_CP_NO_RAW,
3367      .writefn = tlbi_aa64_vmalle1is_write },
3368    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
3369      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
3370      .access = PL1_W, .type = ARM_CP_NO_RAW,
3371      .writefn = tlbi_aa64_vae1is_write },
3372    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
3373      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3374      .access = PL1_W, .type = ARM_CP_NO_RAW,
3375      .writefn = tlbi_aa64_vae1is_write },
3376    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
3377      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3378      .access = PL1_W, .type = ARM_CP_NO_RAW,
3379      .writefn = tlbi_aa64_vae1is_write },
3380    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
3381      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
3382      .access = PL1_W, .type = ARM_CP_NO_RAW,
3383      .writefn = tlbi_aa64_vmalle1_write },
3384    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
3385      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
3386      .access = PL1_W, .type = ARM_CP_NO_RAW,
3387      .writefn = tlbi_aa64_vae1_write },
3388    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
3389      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
3390      .access = PL1_W, .type = ARM_CP_NO_RAW,
3391      .writefn = tlbi_aa64_vmalle1_write },
3392    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
3393      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
3394      .access = PL1_W, .type = ARM_CP_NO_RAW,
3395      .writefn = tlbi_aa64_vae1_write },
3396    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
3397      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3398      .access = PL1_W, .type = ARM_CP_NO_RAW,
3399      .writefn = tlbi_aa64_vae1_write },
3400    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
3401      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3402      .access = PL1_W, .type = ARM_CP_NO_RAW,
3403      .writefn = tlbi_aa64_vae1_write },
3404    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
3405      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3406      .access = PL2_W, .type = ARM_CP_NO_RAW,
3407      .writefn = tlbi_aa64_ipas2e1is_write },
3408    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
3409      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3410      .access = PL2_W, .type = ARM_CP_NO_RAW,
3411      .writefn = tlbi_aa64_ipas2e1is_write },
3412    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
3413      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3414      .access = PL2_W, .type = ARM_CP_NO_RAW,
3415      .writefn = tlbi_aa64_alle1is_write },
3416    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
3417      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
3418      .access = PL2_W, .type = ARM_CP_NO_RAW,
3419      .writefn = tlbi_aa64_alle1is_write },
3420    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
3421      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3422      .access = PL2_W, .type = ARM_CP_NO_RAW,
3423      .writefn = tlbi_aa64_ipas2e1_write },
3424    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
3425      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3426      .access = PL2_W, .type = ARM_CP_NO_RAW,
3427      .writefn = tlbi_aa64_ipas2e1_write },
3428    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
3429      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3430      .access = PL2_W, .type = ARM_CP_NO_RAW,
3431      .writefn = tlbi_aa64_alle1_write },
3432    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
3433      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
3434      .access = PL2_W, .type = ARM_CP_NO_RAW,
3435      .writefn = tlbi_aa64_alle1is_write },
3436#ifndef CONFIG_USER_ONLY
3437    /* 64 bit address translation operations */
3438    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
3439      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
3440      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3441    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
3442      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
3443      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3444    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
3445      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
3446      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3447    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
3448      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
3449      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3450    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
3451      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
3452      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3453    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
3454      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
3455      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3456    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
3457      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
3458      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3459    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
3460      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
3461      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3462    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
3463    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
3464      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
3465      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3466    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
3467      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
3468      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3469    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
3470      .type = ARM_CP_ALIAS,
3471      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
3472      .access = PL1_RW, .resetvalue = 0,
3473      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
3474      .writefn = par_write },
3475#endif
3476    /* TLB invalidate last level of translation table walk */
3477    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
3478      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
3479    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
3480      .type = ARM_CP_NO_RAW, .access = PL1_W,
3481      .writefn = tlbimvaa_is_write },
3482    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
3483      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
3484    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
3485      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
3486    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3487      .type = ARM_CP_NO_RAW, .access = PL2_W,
3488      .writefn = tlbimva_hyp_write },
3489    { .name = "TLBIMVALHIS",
3490      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3491      .type = ARM_CP_NO_RAW, .access = PL2_W,
3492      .writefn = tlbimva_hyp_is_write },
3493    { .name = "TLBIIPAS2",
3494      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
3495      .type = ARM_CP_NO_RAW, .access = PL2_W,
3496      .writefn = tlbiipas2_write },
3497    { .name = "TLBIIPAS2IS",
3498      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
3499      .type = ARM_CP_NO_RAW, .access = PL2_W,
3500      .writefn = tlbiipas2_is_write },
3501    { .name = "TLBIIPAS2L",
3502      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
3503      .type = ARM_CP_NO_RAW, .access = PL2_W,
3504      .writefn = tlbiipas2_write },
3505    { .name = "TLBIIPAS2LIS",
3506      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
3507      .type = ARM_CP_NO_RAW, .access = PL2_W,
3508      .writefn = tlbiipas2_is_write },
3509    /* 32 bit cache operations */
3510    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
3511      .type = ARM_CP_NOP, .access = PL1_W },
3512    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
3513      .type = ARM_CP_NOP, .access = PL1_W },
3514    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
3515      .type = ARM_CP_NOP, .access = PL1_W },
3516    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
3517      .type = ARM_CP_NOP, .access = PL1_W },
3518    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
3519      .type = ARM_CP_NOP, .access = PL1_W },
3520    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
3521      .type = ARM_CP_NOP, .access = PL1_W },
3522    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
3523      .type = ARM_CP_NOP, .access = PL1_W },
3524    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
3525      .type = ARM_CP_NOP, .access = PL1_W },
3526    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
3527      .type = ARM_CP_NOP, .access = PL1_W },
3528    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
3529      .type = ARM_CP_NOP, .access = PL1_W },
3530    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
3531      .type = ARM_CP_NOP, .access = PL1_W },
3532    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
3533      .type = ARM_CP_NOP, .access = PL1_W },
3534    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
3535      .type = ARM_CP_NOP, .access = PL1_W },
3536    /* MMU Domain access control / MPU write buffer control */
3537    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
3538      .access = PL1_RW, .resetvalue = 0,
3539      .writefn = dacr_write, .raw_writefn = raw_write,
3540      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
3541                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
3542    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
3543      .type = ARM_CP_ALIAS,
3544      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
3545      .access = PL1_RW,
3546      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
3547    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
3548      .type = ARM_CP_ALIAS,
3549      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
3550      .access = PL1_RW,
3551      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
3552    /* We rely on the access checks not allowing the guest to write to the
3553     * state field when SPSel indicates that it's being used as the stack
3554     * pointer.
3555     */
3556    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
3557      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
3558      .access = PL1_RW, .accessfn = sp_el0_access,
3559      .type = ARM_CP_ALIAS,
3560      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
3561    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
3562      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
3563      .access = PL2_RW, .type = ARM_CP_ALIAS,
3564      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
3565    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
3566      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
3567      .type = ARM_CP_NO_RAW,
3568      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
3569    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
3570      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
3571      .type = ARM_CP_ALIAS,
3572      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
3573      .access = PL2_RW, .accessfn = fpexc32_access },
3574    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
3575      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
3576      .access = PL2_RW, .resetvalue = 0,
3577      .writefn = dacr_write, .raw_writefn = raw_write,
3578      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
3579    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
3580      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
3581      .access = PL2_RW, .resetvalue = 0,
3582      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
3583    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
3584      .type = ARM_CP_ALIAS,
3585      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
3586      .access = PL2_RW,
3587      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
3588    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
3589      .type = ARM_CP_ALIAS,
3590      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
3591      .access = PL2_RW,
3592      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
3593    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
3594      .type = ARM_CP_ALIAS,
3595      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
3596      .access = PL2_RW,
3597      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
3598    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
3599      .type = ARM_CP_ALIAS,
3600      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
3601      .access = PL2_RW,
3602      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
3603    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
3604      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
3605      .resetvalue = 0,
3606      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
3607    { .name = "SDCR", .type = ARM_CP_ALIAS,
3608      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
3609      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3610      .writefn = sdcr_write,
3611      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
3612    REGINFO_SENTINEL
3613};
3614
3615/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
3616static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
3617    { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3618      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3619      .access = PL2_RW,
3620      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3621    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3622      .type = ARM_CP_NO_RAW,
3623      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3624      .access = PL2_RW,
3625      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
3626    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3627      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3628      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3629    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3630      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3631      .access = PL2_RW, .type = ARM_CP_CONST,
3632      .resetvalue = 0 },
3633    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3634      .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3635      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3636    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3637      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3638      .access = PL2_RW, .type = ARM_CP_CONST,
3639      .resetvalue = 0 },
3640    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3641      .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3642      .access = PL2_RW, .type = ARM_CP_CONST,
3643      .resetvalue = 0 },
3644    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3645      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3646      .access = PL2_RW, .type = ARM_CP_CONST,
3647      .resetvalue = 0 },
3648    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3649      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3650      .access = PL2_RW, .type = ARM_CP_CONST,
3651      .resetvalue = 0 },
3652    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3653      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3654      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3655    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
3656      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3657      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3658      .type = ARM_CP_CONST, .resetvalue = 0 },
3659    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3660      .cp = 15, .opc1 = 6, .crm = 2,
3661      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3662      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
3663    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3664      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3665      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3666    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3667      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3668      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3669    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3670      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3671      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3672    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3673      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3674      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3675    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3676      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3677      .resetvalue = 0 },
3678    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3679      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3680      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3681    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3682      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3683      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3684    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3685      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3686      .resetvalue = 0 },
3687    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3688      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3689      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3690    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3691      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
3692      .resetvalue = 0 },
3693    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3694      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3695      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3696    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3697      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3698      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3699    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3700      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3701      .access = PL2_RW, .accessfn = access_tda,
3702      .type = ARM_CP_CONST, .resetvalue = 0 },
3703    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
3704      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3705      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
3706      .type = ARM_CP_CONST, .resetvalue = 0 },
3707    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3708      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3709      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
3710    REGINFO_SENTINEL
3711};
3712
3713static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3714{
3715    ARMCPU *cpu = arm_env_get_cpu(env);
3716    uint64_t valid_mask = HCR_MASK;
3717
3718    if (arm_feature(env, ARM_FEATURE_EL3)) {
3719        valid_mask &= ~HCR_HCD;
3720    } else {
3721        valid_mask &= ~HCR_TSC;
3722    }
3723
3724    /* Clear RES0 bits.  */
3725    value &= valid_mask;
3726
3727    /* These bits change the MMU setup:
3728     * HCR_VM enables stage 2 translation
3729     * HCR_PTW forbids certain page-table setups
3730     * HCR_DC Disables stage1 and enables stage2 translation
3731     */
3732    if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
3733        tlb_flush(CPU(cpu));
3734    }
3735    raw_write(env, ri, value);
3736}
3737
3738static const ARMCPRegInfo el2_cp_reginfo[] = {
3739    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
3740      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
3741      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
3742      .writefn = hcr_write },
3743    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
3744      .type = ARM_CP_ALIAS,
3745      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
3746      .access = PL2_RW,
3747      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
3748    { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
3749      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
3750      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
3751    { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
3752      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
3753      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
3754    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
3755      .type = ARM_CP_ALIAS,
3756      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
3757      .access = PL2_RW,
3758      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
3759    { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
3760      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
3761      .access = PL2_RW, .writefn = vbar_write,
3762      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
3763      .resetvalue = 0 },
3764    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
3765      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
3766      .access = PL3_RW, .type = ARM_CP_ALIAS,
3767      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
3768    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
3769      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
3770      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
3771      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
3772    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
3773      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
3774      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
3775      .resetvalue = 0 },
3776    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3777      .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
3778      .access = PL2_RW, .type = ARM_CP_ALIAS,
3779      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
3780    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
3781      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
3782      .access = PL2_RW, .type = ARM_CP_CONST,
3783      .resetvalue = 0 },
3784    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
3785    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
3786      .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
3787      .access = PL2_RW, .type = ARM_CP_CONST,
3788      .resetvalue = 0 },
3789    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
3790      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
3791      .access = PL2_RW, .type = ARM_CP_CONST,
3792      .resetvalue = 0 },
3793    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
3794      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
3795      .access = PL2_RW, .type = ARM_CP_CONST,
3796      .resetvalue = 0 },
3797    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
3798      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
3799      .access = PL2_RW,
3800      /* no .writefn needed as this can't cause an ASID change;
3801       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3802       */
3803      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
3804    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
3805      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3806      .type = ARM_CP_ALIAS,
3807      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3808      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3809    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
3810      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
3811      .access = PL2_RW,
3812      /* no .writefn needed as this can't cause an ASID change;
3813       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
3814       */
3815      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
3816    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
3817      .cp = 15, .opc1 = 6, .crm = 2,
3818      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3819      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3820      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
3821      .writefn = vttbr_write },
3822    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
3823      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
3824      .access = PL2_RW, .writefn = vttbr_write,
3825      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
3826    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
3827      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
3828      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
3829      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
3830    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
3831      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
3832      .access = PL2_RW, .resetvalue = 0,
3833      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
3834    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
3835      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
3836      .access = PL2_RW, .resetvalue = 0,
3837      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3838    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
3839      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3840      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
3841    { .name = "TLBIALLNSNH",
3842      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
3843      .type = ARM_CP_NO_RAW, .access = PL2_W,
3844      .writefn = tlbiall_nsnh_write },
3845    { .name = "TLBIALLNSNHIS",
3846      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
3847      .type = ARM_CP_NO_RAW, .access = PL2_W,
3848      .writefn = tlbiall_nsnh_is_write },
3849    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3850      .type = ARM_CP_NO_RAW, .access = PL2_W,
3851      .writefn = tlbiall_hyp_write },
3852    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3853      .type = ARM_CP_NO_RAW, .access = PL2_W,
3854      .writefn = tlbiall_hyp_is_write },
3855    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3856      .type = ARM_CP_NO_RAW, .access = PL2_W,
3857      .writefn = tlbimva_hyp_write },
3858    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3859      .type = ARM_CP_NO_RAW, .access = PL2_W,
3860      .writefn = tlbimva_hyp_is_write },
3861    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
3862      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
3863      .type = ARM_CP_NO_RAW, .access = PL2_W,
3864      .writefn = tlbi_aa64_alle2_write },
3865    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
3866      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
3867      .type = ARM_CP_NO_RAW, .access = PL2_W,
3868      .writefn = tlbi_aa64_vae2_write },
3869    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
3870      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
3871      .access = PL2_W, .type = ARM_CP_NO_RAW,
3872      .writefn = tlbi_aa64_vae2_write },
3873    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
3874      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
3875      .access = PL2_W, .type = ARM_CP_NO_RAW,
3876      .writefn = tlbi_aa64_alle2is_write },
3877    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
3878      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
3879      .type = ARM_CP_NO_RAW, .access = PL2_W,
3880      .writefn = tlbi_aa64_vae2is_write },
3881    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
3882      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
3883      .access = PL2_W, .type = ARM_CP_NO_RAW,
3884      .writefn = tlbi_aa64_vae2is_write },
3885#ifndef CONFIG_USER_ONLY
3886    /* Unlike the other EL2-related AT operations, these must
3887     * UNDEF from EL3 if EL2 is not implemented, which is why we
3888     * define them here rather than with the rest of the AT ops.
3889     */
3890    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
3891      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3892      .access = PL2_W, .accessfn = at_s1e2_access,
3893      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3894    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
3895      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3896      .access = PL2_W, .accessfn = at_s1e2_access,
3897      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
3898    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
3899     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
3900     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
3901     * to behave as if SCR.NS was 1.
3902     */
3903    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
3904      .access = PL2_W,
3905      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3906    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
3907      .access = PL2_W,
3908      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
3909    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
3910      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
3911      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
3912       * reset values as IMPDEF. We choose to reset to 3 to comply with
3913       * both ARMv7 and ARMv8.
3914       */
3915      .access = PL2_RW, .resetvalue = 3,
3916      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
3917    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
3918      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
3919      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
3920      .writefn = gt_cntvoff_write,
3921      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3922    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
3923      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
3924      .writefn = gt_cntvoff_write,
3925      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
3926    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
3927      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
3928      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3929      .type = ARM_CP_IO, .access = PL2_RW,
3930      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3931    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
3932      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
3933      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
3934      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
3935    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
3936      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
3937      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
3938      .resetfn = gt_hyp_timer_reset,
3939      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
3940    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
3941      .type = ARM_CP_IO,
3942      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
3943      .access = PL2_RW,
3944      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
3945      .resetvalue = 0,
3946      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
3947#endif
3948    /* The only field of MDCR_EL2 that has a defined architectural reset value
3949     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
3950     * don't impelment any PMU event counters, so using zero as a reset
3951     * value for MDCR_EL2 is okay
3952     */
3953    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
3954      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
3955      .access = PL2_RW, .resetvalue = 0,
3956      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
3957    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
3958      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3959      .access = PL2_RW, .accessfn = access_el3_aa32ns,
3960      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3961    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
3962      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
3963      .access = PL2_RW,
3964      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
3965    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
3966      .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
3967      .access = PL2_RW,
3968      .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
3969    REGINFO_SENTINEL
3970};
3971
3972static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
3973                                   bool isread)
3974{
3975    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
3976     * At Secure EL1 it traps to EL3.
3977     */
3978    if (arm_current_el(env) == 3) {
3979        return CP_ACCESS_OK;
3980    }
3981    if (arm_is_secure_below_el3(env)) {
3982        return CP_ACCESS_TRAP_EL3;
3983    }
3984    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
3985    if (isread) {
3986        return CP_ACCESS_OK;
3987    }
3988    return CP_ACCESS_TRAP_UNCATEGORIZED;
3989}
3990
3991static const ARMCPRegInfo el3_cp_reginfo[] = {
3992    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
3993      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
3994      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
3995      .resetvalue = 0, .writefn = scr_write },
3996    { .name = "SCR",  .type = ARM_CP_ALIAS,
3997      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
3998      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
3999      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
4000      .writefn = scr_write },
4001    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
4002      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
4003      .access = PL3_RW, .resetvalue = 0,
4004      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
4005    { .name = "SDER",
4006      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
4007      .access = PL3_RW, .resetvalue = 0,
4008      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
4009    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4010      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4011      .writefn = vbar_write, .resetvalue = 0,
4012      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
4013    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
4014      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
4015      .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
4016      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
4017    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
4018      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
4019      .access = PL3_RW,
4020      /* no .writefn needed as this can't cause an ASID change;
4021       * we must provide a .raw_writefn and .resetfn because we handle
4022       * reset and migration for the AArch32 TTBCR(S), which might be
4023       * using mask and base_mask.
4024       */
4025      .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
4026      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
4027    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
4028      .type = ARM_CP_ALIAS,
4029      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
4030      .access = PL3_RW,
4031      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
4032    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
4033      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
4034      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
4035    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
4036      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
4037      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
4038    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
4039      .type = ARM_CP_ALIAS,
4040      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
4041      .access = PL3_RW,
4042      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
4043    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
4044      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
4045      .access = PL3_RW, .writefn = vbar_write,
4046      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
4047      .resetvalue = 0 },
4048    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
4049      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
4050      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
4051      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
4052    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
4053      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
4054      .access = PL3_RW, .resetvalue = 0,
4055      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
4056    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
4057      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
4058      .access = PL3_RW, .type = ARM_CP_CONST,
4059      .resetvalue = 0 },
4060    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
4061      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
4062      .access = PL3_RW, .type = ARM_CP_CONST,
4063      .resetvalue = 0 },
4064    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
4065      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
4066      .access = PL3_RW, .type = ARM_CP_CONST,
4067      .resetvalue = 0 },
4068    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
4069      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
4070      .access = PL3_W, .type = ARM_CP_NO_RAW,
4071      .writefn = tlbi_aa64_alle3is_write },
4072    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
4073      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
4074      .access = PL3_W, .type = ARM_CP_NO_RAW,
4075      .writefn = tlbi_aa64_vae3is_write },
4076    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
4077      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
4078      .access = PL3_W, .type = ARM_CP_NO_RAW,
4079      .writefn = tlbi_aa64_vae3is_write },
4080    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
4081      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
4082      .access = PL3_W, .type = ARM_CP_NO_RAW,
4083      .writefn = tlbi_aa64_alle3_write },
4084    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
4085      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
4086      .access = PL3_W, .type = ARM_CP_NO_RAW,
4087      .writefn = tlbi_aa64_vae3_write },
4088    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
4089      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
4090      .access = PL3_W, .type = ARM_CP_NO_RAW,
4091      .writefn = tlbi_aa64_vae3_write },
4092    REGINFO_SENTINEL
4093};
4094
4095static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4096                                     bool isread)
4097{
4098    /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
4099     * but the AArch32 CTR has its own reginfo struct)
4100     */
4101    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
4102        return CP_ACCESS_TRAP;
4103    }
4104    return CP_ACCESS_OK;
4105}
4106
4107static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4108                        uint64_t value)
4109{
4110    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
4111     * read via a bit in OSLSR_EL1.
4112     */
4113    int oslock;
4114
4115    if (ri->state == ARM_CP_STATE_AA32) {
4116        oslock = (value == 0xC5ACCE55);
4117    } else {
4118        oslock = value & 1;
4119    }
4120
4121    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
4122}
4123
4124static const ARMCPRegInfo debug_cp_reginfo[] = {
4125    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
4126     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
4127     * unlike DBGDRAR it is never accessible from EL0.
4128     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
4129     * accessor.
4130     */
4131    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
4132      .access = PL0_R, .accessfn = access_tdra,
4133      .type = ARM_CP_CONST, .resetvalue = 0 },
4134    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
4135      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
4136      .access = PL1_R, .accessfn = access_tdra,
4137      .type = ARM_CP_CONST, .resetvalue = 0 },
4138    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
4139      .access = PL0_R, .accessfn = access_tdra,
4140      .type = ARM_CP_CONST, .resetvalue = 0 },
4141    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
4142    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
4143      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4144      .access = PL1_RW, .accessfn = access_tda,
4145      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
4146      .resetvalue = 0 },
4147    /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
4148     * We don't implement the configurable EL0 access.
4149     */
4150    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
4151      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4152      .type = ARM_CP_ALIAS,
4153      .access = PL1_R, .accessfn = access_tda,
4154      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
4155    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
4156      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
4157      .access = PL1_W, .type = ARM_CP_NO_RAW,
4158      .accessfn = access_tdosa,
4159      .writefn = oslar_write },
4160    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
4161      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
4162      .access = PL1_R, .resetvalue = 10,
4163      .accessfn = access_tdosa,
4164      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
4165    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
4166    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
4167      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
4168      .access = PL1_RW, .accessfn = access_tdosa,
4169      .type = ARM_CP_NOP },
4170    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
4171     * implement vector catch debug events yet.
4172     */
4173    { .name = "DBGVCR",
4174      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4175      .access = PL1_RW, .accessfn = access_tda,
4176      .type = ARM_CP_NOP },
4177    /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
4178     * to save and restore a 32-bit guest's DBGVCR)
4179     */
4180    { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
4181      .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
4182      .access = PL2_RW, .accessfn = access_tda,
4183      .type = ARM_CP_NOP },
4184    /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
4185     * Channel but Linux may try to access this register. The 32-bit
4186     * alias is DBGDCCINT.
4187     */
4188    { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
4189      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4190      .access = PL1_RW, .accessfn = access_tda,
4191      .type = ARM_CP_NOP },
4192    REGINFO_SENTINEL
4193};
4194
4195static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
4196    /* 64 bit access versions of the (dummy) debug registers */
4197    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
4198      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4199    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
4200      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
4201    REGINFO_SENTINEL
4202};
4203
4204void hw_watchpoint_update(ARMCPU *cpu, int n)
4205{
4206    CPUARMState *env = &cpu->env;
4207    vaddr len = 0;
4208    vaddr wvr = env->cp15.dbgwvr[n];
4209    uint64_t wcr = env->cp15.dbgwcr[n];
4210    int mask;
4211    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
4212
4213    if (env->cpu_watchpoint[n]) {
4214        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
4215        env->cpu_watchpoint[n] = NULL;
4216    }
4217
4218    if (!extract64(wcr, 0, 1)) {
4219        /* E bit clear : watchpoint disabled */
4220        return;
4221    }
4222
4223    switch (extract64(wcr, 3, 2)) {
4224    case 0:
4225        /* LSC 00 is reserved and must behave as if the wp is disabled */
4226        return;
4227    case 1:
4228        flags |= BP_MEM_READ;
4229        break;
4230    case 2:
4231        flags |= BP_MEM_WRITE;
4232        break;
4233    case 3:
4234        flags |= BP_MEM_ACCESS;
4235        break;
4236    }
4237
4238    /* Attempts to use both MASK and BAS fields simultaneously are
4239     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
4240     * thus generating a watchpoint for every byte in the masked region.
4241     */
4242    mask = extract64(wcr, 24, 4);
4243    if (mask == 1 || mask == 2) {
4244        /* Reserved values of MASK; we must act as if the mask value was
4245         * some non-reserved value, or as if the watchpoint were disabled.
4246         * We choose the latter.
4247         */
4248        return;
4249    } else if (mask) {
4250        /* Watchpoint covers an aligned area up to 2GB in size */
4251        len = 1ULL << mask;
4252        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
4253         * whether the watchpoint fires when the unmasked bits match; we opt
4254         * to generate the exceptions.
4255         */
4256        wvr &= ~(len - 1);
4257    } else {
4258        /* Watchpoint covers bytes defined by the byte address select bits */
4259        int bas = extract64(wcr, 5, 8);
4260        int basstart;
4261
4262        if (bas == 0) {
4263            /* This must act as if the watchpoint is disabled */
4264            return;
4265        }
4266
4267        if (extract64(wvr, 2, 1)) {
4268            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
4269             * ignored, and BAS[3:0] define which bytes to watch.
4270             */
4271            bas &= 0xf;
4272        }
4273        /* The BAS bits are supposed to be programmed to indicate a contiguous
4274         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
4275         * we fire for each byte in the word/doubleword addressed by the WVR.
4276         * We choose to ignore any non-zero bits after the first range of 1s.
4277         */
4278        basstart = ctz32(bas);
4279        len = cto32(bas >> basstart);
4280        wvr += basstart;
4281    }
4282
4283    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
4284                          &env->cpu_watchpoint[n]);
4285}
4286
4287void hw_watchpoint_update_all(ARMCPU *cpu)
4288{
4289    int i;
4290    CPUARMState *env = &cpu->env;
4291
4292    /* Completely clear out existing QEMU watchpoints and our array, to
4293     * avoid possible stale entries following migration load.
4294     */
4295    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
4296    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
4297
4298    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
4299        hw_watchpoint_update(cpu, i);
4300    }
4301}
4302
4303static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4304                         uint64_t value)
4305{
4306    ARMCPU *cpu = arm_env_get_cpu(env);
4307    int i = ri->crm;
4308
4309    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
4310     * register reads and behaves as if values written are sign extended.
4311     * Bits [1:0] are RES0.
4312     */
4313    value = sextract64(value, 0, 49) & ~3ULL;
4314
4315    raw_write(env, ri, value);
4316    hw_watchpoint_update(cpu, i);
4317}
4318
4319static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4320                         uint64_t value)
4321{
4322    ARMCPU *cpu = arm_env_get_cpu(env);
4323    int i = ri->crm;
4324
4325    raw_write(env, ri, value);
4326    hw_watchpoint_update(cpu, i);
4327}
4328
4329void hw_breakpoint_update(ARMCPU *cpu, int n)
4330{
4331    CPUARMState *env = &cpu->env;
4332    uint64_t bvr = env->cp15.dbgbvr[n];
4333    uint64_t bcr = env->cp15.dbgbcr[n];
4334    vaddr addr;
4335    int bt;
4336    int flags = BP_CPU;
4337
4338    if (env->cpu_breakpoint[n]) {
4339        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
4340        env->cpu_breakpoint[n] = NULL;
4341    }
4342
4343    if (!extract64(bcr, 0, 1)) {
4344        /* E bit clear : watchpoint disabled */
4345        return;
4346    }
4347
4348    bt = extract64(bcr, 20, 4);
4349
4350    switch (bt) {
4351    case 4: /* unlinked address mismatch (reserved if AArch64) */
4352    case 5: /* linked address mismatch (reserved if AArch64) */
4353        qemu_log_mask(LOG_UNIMP,
4354                      "arm: address mismatch breakpoint types not implemented");
4355        return;
4356    case 0: /* unlinked address match */
4357    case 1: /* linked address match */
4358    {
4359        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
4360         * we behave as if the register was sign extended. Bits [1:0] are
4361         * RES0. The BAS field is used to allow setting breakpoints on 16
4362         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
4363         * a bp will fire if the addresses covered by the bp and the addresses
4364         * covered by the insn overlap but the insn doesn't start at the
4365         * start of the bp address range. We choose to require the insn and
4366         * the bp to have the same address. The constraints on writing to
4367         * BAS enforced in dbgbcr_write mean we have only four cases:
4368         *  0b0000  => no breakpoint
4369         *  0b0011  => breakpoint on addr
4370         *  0b1100  => breakpoint on addr + 2
4371         *  0b1111  => breakpoint on addr
4372         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
4373         */
4374        int bas = extract64(bcr, 5, 4);
4375        addr = sextract64(bvr, 0, 49) & ~3ULL;
4376        if (bas == 0) {
4377            return;
4378        }
4379        if (bas == 0xc) {
4380            addr += 2;
4381        }
4382        break;
4383    }
4384    case 2: /* unlinked context ID match */
4385    case 8: /* unlinked VMID match (reserved if no EL2) */
4386    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
4387        qemu_log_mask(LOG_UNIMP,
4388                      "arm: unlinked context breakpoint types not implemented");
4389        return;
4390    case 9: /* linked VMID match (reserved if no EL2) */
4391    case 11: /* linked context ID and VMID match (reserved if no EL2) */
4392    case 3: /* linked context ID match */
4393    default:
4394        /* We must generate no events for Linked context matches (unless
4395         * they are linked to by some other bp/wp, which is handled in
4396         * updates for the linking bp/wp). We choose to also generate no events
4397         * for reserved values.
4398         */
4399        return;
4400    }
4401
4402    cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
4403}
4404
4405void hw_breakpoint_update_all(ARMCPU *cpu)
4406{
4407    int i;
4408    CPUARMState *env = &cpu->env;
4409
4410    /* Completely clear out existing QEMU breakpoints and our array, to
4411     * avoid possible stale entries following migration load.
4412     */
4413    cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
4414    memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
4415
4416    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
4417        hw_breakpoint_update(cpu, i);
4418    }
4419}
4420
4421static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4422                         uint64_t value)
4423{
4424    ARMCPU *cpu = arm_env_get_cpu(env);
4425    int i = ri->crm;
4426
4427    raw_write(env, ri, value);
4428    hw_breakpoint_update(cpu, i);
4429}
4430
4431static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4432                         uint64_t value)
4433{
4434    ARMCPU *cpu = arm_env_get_cpu(env);
4435    int i = ri->crm;
4436
4437    /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
4438     * copy of BAS[0].
4439     */
4440    value = deposit64(value, 6, 1, extract64(value, 5, 1));
4441    value = deposit64(value, 8, 1, extract64(value, 7, 1));
4442
4443    raw_write(env, ri, value);
4444    hw_breakpoint_update(cpu, i);
4445}
4446
4447static void define_debug_regs(ARMCPU *cpu)
4448{
4449    /* Define v7 and v8 architectural debug registers.
4450     * These are just dummy implementations for now.
4451     */
4452    int i;
4453    int wrps, brps, ctx_cmps;
4454    ARMCPRegInfo dbgdidr = {
4455        .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
4456        .access = PL0_R, .accessfn = access_tda,
4457        .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
4458    };
4459
4460    /* Note that all these register fields hold "number of Xs minus 1". */
4461    brps = extract32(cpu->dbgdidr, 24, 4);
4462    wrps = extract32(cpu->dbgdidr, 28, 4);
4463    ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
4464
4465    assert(ctx_cmps <= brps);
4466
4467    /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
4468     * of the debug registers such as number of breakpoints;
4469     * check that if they both exist then they agree.
4470     */
4471    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
4472        assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
4473        assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
4474        assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
4475    }
4476
4477    define_one_arm_cp_reg(cpu, &dbgdidr);
4478    define_arm_cp_regs(cpu, debug_cp_reginfo);
4479
4480    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
4481        define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
4482    }
4483
4484    for (i = 0; i < brps + 1; i++) {
4485        ARMCPRegInfo dbgregs[] = {
4486            { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
4487              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
4488              .access = PL1_RW, .accessfn = access_tda,
4489              .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
4490              .writefn = dbgbvr_write, .raw_writefn = raw_write
4491            },
4492            { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
4493              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
4494              .access = PL1_RW, .accessfn = access_tda,
4495              .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
4496              .writefn = dbgbcr_write, .raw_writefn = raw_write
4497            },
4498            REGINFO_SENTINEL
4499        };
4500        define_arm_cp_regs(cpu, dbgregs);
4501    }
4502
4503    for (i = 0; i < wrps + 1; i++) {
4504        ARMCPRegInfo dbgregs[] = {
4505            { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
4506              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
4507              .access = PL1_RW, .accessfn = access_tda,
4508              .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
4509              .writefn = dbgwvr_write, .raw_writefn = raw_write
4510            },
4511            { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
4512              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
4513              .access = PL1_RW, .accessfn = access_tda,
4514              .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
4515              .writefn = dbgwcr_write, .raw_writefn = raw_write
4516            },
4517            REGINFO_SENTINEL
4518        };
4519        define_arm_cp_regs(cpu, dbgregs);
4520    }
4521}
4522
4523void register_cp_regs_for_features(ARMCPU *cpu)
4524{
4525    /* Register all the coprocessor registers based on feature bits */
4526    CPUARMState *env = &cpu->env;
4527    if (arm_feature(env, ARM_FEATURE_M)) {
4528        /* M profile has no coprocessor registers */
4529        return;
4530    }
4531
4532    define_arm_cp_regs(cpu, cp_reginfo);
4533    if (!arm_feature(env, ARM_FEATURE_V8)) {
4534        /* Must go early as it is full of wildcards that may be
4535         * overridden by later definitions.
4536         */
4537        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
4538    }
4539
4540    if (arm_feature(env, ARM_FEATURE_V6)) {
4541        /* The ID registers all have impdef reset values */
4542        ARMCPRegInfo v6_idregs[] = {
4543            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
4544              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
4545              .access = PL1_R, .type = ARM_CP_CONST,
4546              .resetvalue = cpu->id_pfr0 },
4547            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
4548              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
4549              .access = PL1_R, .type = ARM_CP_CONST,
4550              .resetvalue = cpu->id_pfr1 },
4551            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
4552              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
4553              .access = PL1_R, .type = ARM_CP_CONST,
4554              .resetvalue = cpu->id_dfr0 },
4555            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
4556              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
4557              .access = PL1_R, .type = ARM_CP_CONST,
4558              .resetvalue = cpu->id_afr0 },
4559            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
4560              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
4561              .access = PL1_R, .type = ARM_CP_CONST,
4562              .resetvalue = cpu->id_mmfr0 },
4563            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
4564              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
4565              .access = PL1_R, .type = ARM_CP_CONST,
4566              .resetvalue = cpu->id_mmfr1 },
4567            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
4568              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
4569              .access = PL1_R, .type = ARM_CP_CONST,
4570              .resetvalue = cpu->id_mmfr2 },
4571            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
4572              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
4573              .access = PL1_R, .type = ARM_CP_CONST,
4574              .resetvalue = cpu->id_mmfr3 },
4575            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
4576              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
4577              .access = PL1_R, .type = ARM_CP_CONST,
4578              .resetvalue = cpu->id_isar0 },
4579            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
4580              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
4581              .access = PL1_R, .type = ARM_CP_CONST,
4582              .resetvalue = cpu->id_isar1 },
4583            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
4584              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
4585              .access = PL1_R, .type = ARM_CP_CONST,
4586              .resetvalue = cpu->id_isar2 },
4587            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
4588              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
4589              .access = PL1_R, .type = ARM_CP_CONST,
4590              .resetvalue = cpu->id_isar3 },
4591            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
4592              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
4593              .access = PL1_R, .type = ARM_CP_CONST,
4594              .resetvalue = cpu->id_isar4 },
4595            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
4596              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
4597              .access = PL1_R, .type = ARM_CP_CONST,
4598              .resetvalue = cpu->id_isar5 },
4599            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
4600              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
4601              .access = PL1_R, .type = ARM_CP_CONST,
4602              .resetvalue = cpu->id_mmfr4 },
4603            /* 7 is as yet unallocated and must RAZ */
4604            { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
4605              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
4606              .access = PL1_R, .type = ARM_CP_CONST,
4607              .resetvalue = 0 },
4608            REGINFO_SENTINEL
4609        };
4610        define_arm_cp_regs(cpu, v6_idregs);
4611        define_arm_cp_regs(cpu, v6_cp_reginfo);
4612    } else {
4613        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
4614    }
4615    if (arm_feature(env, ARM_FEATURE_V6K)) {
4616        define_arm_cp_regs(cpu, v6k_cp_reginfo);
4617    }
4618    if (arm_feature(env, ARM_FEATURE_V7MP) &&
4619        !arm_feature(env, ARM_FEATURE_PMSA)) {
4620        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
4621    }
4622    if (arm_feature(env, ARM_FEATURE_V7)) {
4623        /* v7 performance monitor control register: same implementor
4624         * field as main ID register, and we implement only the cycle
4625         * count register.
4626         */
4627#ifndef CONFIG_USER_ONLY
4628        ARMCPRegInfo pmcr = {
4629            .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
4630            .access = PL0_RW,
4631            .type = ARM_CP_IO | ARM_CP_ALIAS,
4632            .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
4633            .accessfn = pmreg_access, .writefn = pmcr_write,
4634            .raw_writefn = raw_write,
4635        };
4636        ARMCPRegInfo pmcr64 = {
4637            .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
4638            .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
4639            .access = PL0_RW, .accessfn = pmreg_access,
4640            .type = ARM_CP_IO,
4641            .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
4642            .resetvalue = cpu->midr & 0xff000000,
4643            .writefn = pmcr_write, .raw_writefn = raw_write,
4644        };
4645        define_one_arm_cp_reg(cpu, &pmcr);
4646        define_one_arm_cp_reg(cpu, &pmcr64);
4647#endif
4648        ARMCPRegInfo clidr = {
4649            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
4650            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
4651            .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
4652        };
4653        define_one_arm_cp_reg(cpu, &clidr);
4654        define_arm_cp_regs(cpu, v7_cp_reginfo);
4655        define_debug_regs(cpu);
4656    } else {
4657        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
4658    }
4659    if (arm_feature(env, ARM_FEATURE_V8)) {
4660        /* AArch64 ID registers, which all have impdef reset values.
4661         * Note that within the ID register ranges the unused slots
4662         * must all RAZ, not UNDEF; future architecture versions may
4663         * define new registers here.
4664         */
4665        ARMCPRegInfo v8_idregs[] = {
4666            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
4667              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
4668              .access = PL1_R, .type = ARM_CP_CONST,
4669              .resetvalue = cpu->id_aa64pfr0 },
4670            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
4671              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
4672              .access = PL1_R, .type = ARM_CP_CONST,
4673              .resetvalue = cpu->id_aa64pfr1},
4674            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4675              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
4676              .access = PL1_R, .type = ARM_CP_CONST,
4677              .resetvalue = 0 },
4678            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4679              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
4680              .access = PL1_R, .type = ARM_CP_CONST,
4681              .resetvalue = 0 },
4682            { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4683              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
4684              .access = PL1_R, .type = ARM_CP_CONST,
4685              .resetvalue = 0 },
4686            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4687              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
4688              .access = PL1_R, .type = ARM_CP_CONST,
4689              .resetvalue = 0 },
4690            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4691              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
4692              .access = PL1_R, .type = ARM_CP_CONST,
4693              .resetvalue = 0 },
4694            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4695              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
4696              .access = PL1_R, .type = ARM_CP_CONST,
4697              .resetvalue = 0 },
4698            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
4699              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
4700              .access = PL1_R, .type = ARM_CP_CONST,
4701              .resetvalue = cpu->id_aa64dfr0 },
4702            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
4703              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
4704              .access = PL1_R, .type = ARM_CP_CONST,
4705              .resetvalue = cpu->id_aa64dfr1 },
4706            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4707              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
4708              .access = PL1_R, .type = ARM_CP_CONST,
4709              .resetvalue = 0 },
4710            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4711              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
4712              .access = PL1_R, .type = ARM_CP_CONST,
4713              .resetvalue = 0 },
4714            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
4715              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
4716              .access = PL1_R, .type = ARM_CP_CONST,
4717              .resetvalue = cpu->id_aa64afr0 },
4718            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
4719              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
4720              .access = PL1_R, .type = ARM_CP_CONST,
4721              .resetvalue = cpu->id_aa64afr1 },
4722            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4723              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
4724              .access = PL1_R, .type = ARM_CP_CONST,
4725              .resetvalue = 0 },
4726            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4727              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
4728              .access = PL1_R, .type = ARM_CP_CONST,
4729              .resetvalue = 0 },
4730            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
4731              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
4732              .access = PL1_R, .type = ARM_CP_CONST,
4733              .resetvalue = cpu->id_aa64isar0 },
4734            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
4735              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
4736              .access = PL1_R, .type = ARM_CP_CONST,
4737              .resetvalue = cpu->id_aa64isar1 },
4738            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4739              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
4740              .access = PL1_R, .type = ARM_CP_CONST,
4741              .resetvalue = 0 },
4742            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4743              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
4744              .access = PL1_R, .type = ARM_CP_CONST,
4745              .resetvalue = 0 },
4746            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4747              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
4748              .access = PL1_R, .type = ARM_CP_CONST,
4749              .resetvalue = 0 },
4750            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4751              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
4752              .access = PL1_R, .type = ARM_CP_CONST,
4753              .resetvalue = 0 },
4754            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4755              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
4756              .access = PL1_R, .type = ARM_CP_CONST,
4757              .resetvalue = 0 },
4758            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4759              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
4760              .access = PL1_R, .type = ARM_CP_CONST,
4761              .resetvalue = 0 },
4762            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
4763              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
4764              .access = PL1_R, .type = ARM_CP_CONST,
4765              .resetvalue = cpu->id_aa64mmfr0 },
4766            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
4767              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
4768              .access = PL1_R, .type = ARM_CP_CONST,
4769              .resetvalue = cpu->id_aa64mmfr1 },
4770            { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4771              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
4772              .access = PL1_R, .type = ARM_CP_CONST,
4773              .resetvalue = 0 },
4774            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4775              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
4776              .access = PL1_R, .type = ARM_CP_CONST,
4777              .resetvalue = 0 },
4778            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4779              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
4780              .access = PL1_R, .type = ARM_CP_CONST,
4781              .resetvalue = 0 },
4782            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4783              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
4784              .access = PL1_R, .type = ARM_CP_CONST,
4785              .resetvalue = 0 },
4786            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4787              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
4788              .access = PL1_R, .type = ARM_CP_CONST,
4789              .resetvalue = 0 },
4790            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4791              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
4792              .access = PL1_R, .type = ARM_CP_CONST,
4793              .resetvalue = 0 },
4794            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
4795              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
4796              .access = PL1_R, .type = ARM_CP_CONST,
4797              .resetvalue = cpu->mvfr0 },
4798            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
4799              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
4800              .access = PL1_R, .type = ARM_CP_CONST,
4801              .resetvalue = cpu->mvfr1 },
4802            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
4803              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
4804              .access = PL1_R, .type = ARM_CP_CONST,
4805              .resetvalue = cpu->mvfr2 },
4806            { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4807              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
4808              .access = PL1_R, .type = ARM_CP_CONST,
4809              .resetvalue = 0 },
4810            { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4811              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
4812              .access = PL1_R, .type = ARM_CP_CONST,
4813              .resetvalue = 0 },
4814            { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4815              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
4816              .access = PL1_R, .type = ARM_CP_CONST,
4817              .resetvalue = 0 },
4818            { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4819              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
4820              .access = PL1_R, .type = ARM_CP_CONST,
4821              .resetvalue = 0 },
4822            { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
4823              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
4824              .access = PL1_R, .type = ARM_CP_CONST,
4825              .resetvalue = 0 },
4826            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
4827              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
4828              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4829              .resetvalue = cpu->pmceid0 },
4830            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
4831              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
4832              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4833              .resetvalue = cpu->pmceid0 },
4834            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
4835              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
4836              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4837              .resetvalue = cpu->pmceid1 },
4838            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
4839              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
4840              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
4841              .resetvalue = cpu->pmceid1 },
4842            REGINFO_SENTINEL
4843        };
4844        /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
4845        if (!arm_feature(env, ARM_FEATURE_EL3) &&
4846            !arm_feature(env, ARM_FEATURE_EL2)) {
4847            ARMCPRegInfo rvbar = {
4848                .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
4849                .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
4850                .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
4851            };
4852            define_one_arm_cp_reg(cpu, &rvbar);
4853        }
4854        define_arm_cp_regs(cpu, v8_idregs);
4855        define_arm_cp_regs(cpu, v8_cp_reginfo);
4856    }
4857    if (arm_feature(env, ARM_FEATURE_EL2)) {
4858        uint64_t vmpidr_def = mpidr_read_val(env);
4859        ARMCPRegInfo vpidr_regs[] = {
4860            { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
4861              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4862              .access = PL2_RW, .accessfn = access_el3_aa32ns,
4863              .resetvalue = cpu->midr,
4864              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4865            { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
4866              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4867              .access = PL2_RW, .resetvalue = cpu->midr,
4868              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4869            { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
4870              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4871              .access = PL2_RW, .accessfn = access_el3_aa32ns,
4872              .resetvalue = vmpidr_def,
4873              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4874            { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
4875              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4876              .access = PL2_RW,
4877              .resetvalue = vmpidr_def,
4878              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
4879            REGINFO_SENTINEL
4880        };
4881        define_arm_cp_regs(cpu, vpidr_regs);
4882        define_arm_cp_regs(cpu, el2_cp_reginfo);
4883        /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
4884        if (!arm_feature(env, ARM_FEATURE_EL3)) {
4885            ARMCPRegInfo rvbar = {
4886                .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
4887                .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
4888                .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
4889            };
4890            define_one_arm_cp_reg(cpu, &rvbar);
4891        }
4892    } else {
4893        /* If EL2 is missing but higher ELs are enabled, we need to
4894         * register the no_el2 reginfos.
4895         */
4896        if (arm_feature(env, ARM_FEATURE_EL3)) {
4897            /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
4898             * of MIDR_EL1 and MPIDR_EL1.
4899             */
4900            ARMCPRegInfo vpidr_regs[] = {
4901                { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4902                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
4903                  .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4904                  .type = ARM_CP_CONST, .resetvalue = cpu->midr,
4905                  .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
4906                { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4907                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
4908                  .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4909                  .type = ARM_CP_NO_RAW,
4910                  .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
4911                REGINFO_SENTINEL
4912            };
4913            define_arm_cp_regs(cpu, vpidr_regs);
4914            define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
4915        }
4916    }
4917    if (arm_feature(env, ARM_FEATURE_EL3)) {
4918        define_arm_cp_regs(cpu, el3_cp_reginfo);
4919        ARMCPRegInfo el3_regs[] = {
4920            { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
4921              .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
4922              .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
4923            { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
4924              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
4925              .access = PL3_RW,
4926              .raw_writefn = raw_write, .writefn = sctlr_write,
4927              .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
4928              .resetvalue = cpu->reset_sctlr },
4929            REGINFO_SENTINEL
4930        };
4931
4932        define_arm_cp_regs(cpu, el3_regs);
4933    }
4934    /* The behaviour of NSACR is sufficiently various that we don't
4935     * try to describe it in a single reginfo:
4936     *  if EL3 is 64 bit, then trap to EL3 from S EL1,
4937     *     reads as constant 0xc00 from NS EL1 and NS EL2
4938     *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
4939     *  if v7 without EL3, register doesn't exist
4940     *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
4941     */
4942    if (arm_feature(env, ARM_FEATURE_EL3)) {
4943        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
4944            ARMCPRegInfo nsacr = {
4945                .name = "NSACR", .type = ARM_CP_CONST,
4946                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4947                .access = PL1_RW, .accessfn = nsacr_access,
4948                .resetvalue = 0xc00
4949            };
4950            define_one_arm_cp_reg(cpu, &nsacr);
4951        } else {
4952            ARMCPRegInfo nsacr = {
4953                .name = "NSACR",
4954                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4955                .access = PL3_RW | PL1_R,
4956                .resetvalue = 0,
4957                .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
4958            };
4959            define_one_arm_cp_reg(cpu, &nsacr);
4960        }
4961    } else {
4962        if (arm_feature(env, ARM_FEATURE_V8)) {
4963            ARMCPRegInfo nsacr = {
4964                .name = "NSACR", .type = ARM_CP_CONST,
4965                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
4966                .access = PL1_R,
4967                .resetvalue = 0xc00
4968            };
4969            define_one_arm_cp_reg(cpu, &nsacr);
4970        }
4971    }
4972
4973    if (arm_feature(env, ARM_FEATURE_PMSA)) {
4974        if (arm_feature(env, ARM_FEATURE_V6)) {
4975            /* PMSAv6 not implemented */
4976            assert(arm_feature(env, ARM_FEATURE_V7));
4977            define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4978            define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
4979        } else {
4980            define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
4981        }
4982    } else {
4983        define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
4984        define_arm_cp_regs(cpu, vmsa_cp_reginfo);
4985    }
4986    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
4987        define_arm_cp_regs(cpu, t2ee_cp_reginfo);
4988    }
4989    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
4990        define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
4991    }
4992    if (arm_feature(env, ARM_FEATURE_VAPA)) {
4993        define_arm_cp_regs(cpu, vapa_cp_reginfo);
4994    }
4995    if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
4996        define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
4997    }
4998    if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
4999        define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
5000    }
5001    if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
5002        define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
5003    }
5004    if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
5005        define_arm_cp_regs(cpu, omap_cp_reginfo);
5006    }
5007    if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
5008        define_arm_cp_regs(cpu, strongarm_cp_reginfo);
5009    }
5010    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5011        define_arm_cp_regs(cpu, xscale_cp_reginfo);
5012    }
5013    if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
5014        define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
5015    }
5016    if (arm_feature(env, ARM_FEATURE_LPAE)) {
5017        define_arm_cp_regs(cpu, lpae_cp_reginfo);
5018    }
5019    /* Slightly awkwardly, the OMAP and StrongARM cores need all of
5020     * cp15 crn=0 to be writes-ignored, whereas for other cores they should
5021     * be read-only (ie write causes UNDEF exception).
5022     */
5023    {
5024        ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
5025            /* Pre-v8 MIDR space.
5026             * Note that the MIDR isn't a simple constant register because
5027             * of the TI925 behaviour where writes to another register can
5028             * cause the MIDR value to change.
5029             *
5030             * Unimplemented registers in the c15 0 0 0 space default to
5031             * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
5032             * and friends override accordingly.
5033             */
5034            { .name = "MIDR",
5035              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
5036              .access = PL1_R, .resetvalue = cpu->midr,
5037              .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
5038              .readfn = midr_read,
5039              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5040              .type = ARM_CP_OVERRIDE },
5041            /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
5042            { .name = "DUMMY",
5043              .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
5044              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5045            { .name = "DUMMY",
5046              .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
5047              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5048            { .name = "DUMMY",
5049              .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
5050              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5051            { .name = "DUMMY",
5052              .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
5053              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5054            { .name = "DUMMY",
5055              .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
5056              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5057            REGINFO_SENTINEL
5058        };
5059        ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
5060            { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
5061              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
5062              .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
5063              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
5064              .readfn = midr_read },
5065            /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
5066            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5067              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5068              .access = PL1_R, .resetvalue = cpu->midr },
5069            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
5070              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
5071              .access = PL1_R, .resetvalue = cpu->midr },
5072            { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
5073              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
5074              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
5075            REGINFO_SENTINEL
5076        };
5077        ARMCPRegInfo id_cp_reginfo[] = {
5078            /* These are common to v8 and pre-v8 */
5079            { .name = "CTR",
5080              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
5081              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5082            { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
5083              .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
5084              .access = PL0_R, .accessfn = ctr_el0_access,
5085              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
5086            /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
5087            { .name = "TCMTR",
5088              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
5089              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
5090            REGINFO_SENTINEL
5091        };
5092        /* TLBTR is specific to VMSA */
5093        ARMCPRegInfo id_tlbtr_reginfo = {
5094              .name = "TLBTR",
5095              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
5096              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
5097        };
5098        /* MPUIR is specific to PMSA V6+ */
5099        ARMCPRegInfo id_mpuir_reginfo = {
5100              .name = "MPUIR",
5101              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
5102              .access = PL1_R, .type = ARM_CP_CONST,
5103              .resetvalue = cpu->pmsav7_dregion << 8
5104        };
5105        ARMCPRegInfo crn0_wi_reginfo = {
5106            .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
5107            .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
5108            .type = ARM_CP_NOP | ARM_CP_OVERRIDE
5109        };
5110        if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
5111            arm_feature(env, ARM_FEATURE_STRONGARM)) {
5112            ARMCPRegInfo *r;
5113            /* Register the blanket "writes ignored" value first to cover the
5114             * whole space. Then update the specific ID registers to allow write
5115             * access, so that they ignore writes rather than causing them to
5116             * UNDEF.
5117             */
5118            define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
5119            for (r = id_pre_v8_midr_cp_reginfo;
5120                 r->type != ARM_CP_SENTINEL; r++) {
5121                r->access = PL1_RW;
5122            }
5123            for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
5124                r->access = PL1_RW;
5125            }
5126            id_tlbtr_reginfo.access = PL1_RW;
5127            id_tlbtr_reginfo.access = PL1_RW;
5128        }
5129        if (arm_feature(env, ARM_FEATURE_V8)) {
5130            define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
5131        } else {
5132            define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
5133        }
5134        define_arm_cp_regs(cpu, id_cp_reginfo);
5135        if (!arm_feature(env, ARM_FEATURE_PMSA)) {
5136            define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
5137        } else if (arm_feature(env, ARM_FEATURE_V7)) {
5138            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
5139        }
5140    }
5141
5142    if (arm_feature(env, ARM_FEATURE_MPIDR)) {
5143        define_arm_cp_regs(cpu, mpidr_cp_reginfo);
5144    }
5145
5146    if (arm_feature(env, ARM_FEATURE_AUXCR)) {
5147        ARMCPRegInfo auxcr_reginfo[] = {
5148            { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
5149              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
5150              .access = PL1_RW, .type = ARM_CP_CONST,
5151              .resetvalue = cpu->reset_auxcr },
5152            { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
5153              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
5154              .access = PL2_RW, .type = ARM_CP_CONST,
5155              .resetvalue = 0 },
5156            { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
5157              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
5158              .access = PL3_RW, .type = ARM_CP_CONST,
5159              .resetvalue = 0 },
5160            REGINFO_SENTINEL
5161        };
5162        define_arm_cp_regs(cpu, auxcr_reginfo);
5163    }
5164
5165    if (arm_feature(env, ARM_FEATURE_CBAR)) {
5166        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5167            /* 32 bit view is [31:18] 0...0 [43:32]. */
5168            uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
5169                | extract64(cpu->reset_cbar, 32, 12);
5170            ARMCPRegInfo cbar_reginfo[] = {
5171                { .name = "CBAR",
5172                  .type = ARM_CP_CONST,
5173                  .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5174                  .access = PL1_R, .resetvalue = cpu->reset_cbar },
5175                { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
5176                  .type = ARM_CP_CONST,
5177                  .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
5178                  .access = PL1_R, .resetvalue = cbar32 },
5179                REGINFO_SENTINEL
5180            };
5181            /* We don't implement a r/w 64 bit CBAR currently */
5182            assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
5183            define_arm_cp_regs(cpu, cbar_reginfo);
5184        } else {
5185            ARMCPRegInfo cbar = {
5186                .name = "CBAR",
5187                .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
5188                .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
5189                .fieldoffset = offsetof(CPUARMState,
5190                                        cp15.c15_config_base_address)
5191            };
5192            if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
5193                cbar.access = PL1_R;
5194                cbar.fieldoffset = 0;
5195                cbar.type = ARM_CP_CONST;
5196            }
5197            define_one_arm_cp_reg(cpu, &cbar);
5198        }
5199    }
5200
5201    if (arm_feature(env, ARM_FEATURE_VBAR)) {
5202        ARMCPRegInfo vbar_cp_reginfo[] = {
5203            { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
5204              .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
5205              .access = PL1_RW, .writefn = vbar_write,
5206              .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
5207                                     offsetof(CPUARMState, cp15.vbar_ns) },
5208              .resetvalue = 0 },
5209            REGINFO_SENTINEL
5210        };
5211        define_arm_cp_regs(cpu, vbar_cp_reginfo);
5212    }
5213
5214    /* Generic registers whose values depend on the implementation */
5215    {
5216        ARMCPRegInfo sctlr = {
5217            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
5218            .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5219            .access = PL1_RW,
5220            .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
5221                                   offsetof(CPUARMState, cp15.sctlr_ns) },
5222            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
5223            .raw_writefn = raw_write,
5224        };
5225        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5226            /* Normally we would always end the TB on an SCTLR write, but Linux
5227             * arch/arm/mach-pxa/sleep.S expects two instructions following
5228             * an MMU enable to execute from cache.  Imitate this behaviour.
5229             */
5230            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
5231        }
5232        define_one_arm_cp_reg(cpu, &sctlr);
5233    }
5234}
5235
5236ARMCPU *cpu_arm_init(const char *cpu_model)
5237{
5238    return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
5239}
5240
5241void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
5242{
5243    CPUState *cs = CPU(cpu);
5244    CPUARMState *env = &cpu->env;
5245
5246    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5247        gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
5248                                 aarch64_fpu_gdb_set_reg,
5249                                 34, "aarch64-fpu.xml", 0);
5250    } else if (arm_feature(env, ARM_FEATURE_NEON)) {
5251        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5252                                 51, "arm-neon.xml", 0);
5253    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
5254        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5255                                 35, "arm-vfp3.xml", 0);
5256    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
5257        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
5258                                 19, "arm-vfp.xml", 0);
5259    }
5260}
5261
5262/* Sort alphabetically by type name, except for "any". */
5263static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
5264{
5265    ObjectClass *class_a = (ObjectClass *)a;
5266    ObjectClass *class_b = (ObjectClass *)b;
5267    const char *name_a, *name_b;
5268
5269    name_a = object_class_get_name(class_a);
5270    name_b = object_class_get_name(class_b);
5271    if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
5272        return 1;
5273    } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
5274        return -1;
5275    } else {
5276        return strcmp(name_a, name_b);
5277    }
5278}
5279
5280static void arm_cpu_list_entry(gpointer data, gpointer user_data)
5281{
5282    ObjectClass *oc = data;
5283    CPUListState *s = user_data;
5284    const char *typename;
5285    char *name;
5286
5287    typename = object_class_get_name(oc);
5288    name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
5289    (*s->cpu_fprintf)(s->file, "  %s\n",
5290                      name);
5291    g_free(name);
5292}
5293
5294void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
5295{
5296    CPUListState s = {
5297        .file = f,
5298        .cpu_fprintf = cpu_fprintf,
5299    };
5300    GSList *list;
5301
5302    list = object_class_get_list(TYPE_ARM_CPU, false);
5303    list = g_slist_sort(list, arm_cpu_list_compare);
5304    (*cpu_fprintf)(f, "Available CPUs:\n");
5305    g_slist_foreach(list, arm_cpu_list_entry, &s);
5306    g_slist_free(list);
5307#ifdef CONFIG_KVM
5308    /* The 'host' CPU type is dynamically registered only if KVM is
5309     * enabled, so we have to special-case it here:
5310     */
5311    (*cpu_fprintf)(f, "  host (only available in KVM mode)\n");
5312#endif
5313}
5314
5315static void arm_cpu_add_definition(gpointer data, gpointer user_data)
5316{
5317    ObjectClass *oc = data;
5318    CpuDefinitionInfoList **cpu_list = user_data;
5319    CpuDefinitionInfoList *entry;
5320    CpuDefinitionInfo *info;
5321    const char *typename;
5322
5323    typename = object_class_get_name(oc);
5324    info = g_malloc0(sizeof(*info));
5325    info->name = g_strndup(typename,
5326                           strlen(typename) - strlen("-" TYPE_ARM_CPU));
5327    info->q_typename = g_strdup(typename);
5328
5329    entry = g_malloc0(sizeof(*entry));
5330    entry->value = info;
5331    entry->next = *cpu_list;
5332    *cpu_list = entry;
5333}
5334
5335CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
5336{
5337    CpuDefinitionInfoList *cpu_list = NULL;
5338    GSList *list;
5339
5340    list = object_class_get_list(TYPE_ARM_CPU, false);
5341    g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
5342    g_slist_free(list);
5343
5344    return cpu_list;
5345}
5346
5347static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
5348                                   void *opaque, int state, int secstate,
5349                                   int crm, int opc1, int opc2)
5350{
5351    /* Private utility function for define_one_arm_cp_reg_with_opaque():
5352     * add a single reginfo struct to the hash table.
5353     */
5354    uint32_t *key = g_new(uint32_t, 1);
5355    ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
5356    int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
5357    int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
5358
5359    /* Reset the secure state to the specific incoming state.  This is
5360     * necessary as the register may have been defined with both states.
5361     */
5362    r2->secure = secstate;
5363
5364    if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5365        /* Register is banked (using both entries in array).
5366         * Overwriting fieldoffset as the array is only used to define
5367         * banked registers but later only fieldoffset is used.
5368         */
5369        r2->fieldoffset = r->bank_fieldoffsets[ns];
5370    }
5371
5372    if (state == ARM_CP_STATE_AA32) {
5373        if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
5374            /* If the register is banked then we don't need to migrate or
5375             * reset the 32-bit instance in certain cases:
5376             *
5377             * 1) If the register has both 32-bit and 64-bit instances then we
5378             *    can count on the 64-bit instance taking care of the
5379             *    non-secure bank.
5380             * 2) If ARMv8 is enabled then we can count on a 64-bit version
5381             *    taking care of the secure bank.  This requires that separate
5382             *    32 and 64-bit definitions are provided.
5383             */
5384            if ((r->state == ARM_CP_STATE_BOTH && ns) ||
5385                (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
5386                r2->type |= ARM_CP_ALIAS;
5387            }
5388        } else if ((secstate != r->secure) && !ns) {
5389            /* The register is not banked so we only want to allow migration of
5390             * the non-secure instance.
5391             */
5392            r2->type |= ARM_CP_ALIAS;
5393        }
5394
5395        if (r->state == ARM_CP_STATE_BOTH) {
5396            /* We assume it is a cp15 register if the .cp field is left unset.
5397             */
5398            if (r2->cp == 0) {
5399                r2->cp = 15;
5400            }
5401
5402#ifdef HOST_WORDS_BIGENDIAN
5403            if (r2->fieldoffset) {
5404                r2->fieldoffset += sizeof(uint32_t);
5405            }
5406#endif
5407        }
5408    }
5409    if (state == ARM_CP_STATE_AA64) {
5410        /* To allow abbreviation of ARMCPRegInfo
5411         * definitions, we treat cp == 0 as equivalent to
5412         * the value for "standard guest-visible sysreg".
5413         * STATE_BOTH definitions are also always "standard
5414         * sysreg" in their AArch64 view (the .cp value may
5415         * be non-zero for the benefit of the AArch32 view).
5416         */
5417        if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
5418            r2->cp = CP_REG_ARM64_SYSREG_CP;
5419        }
5420        *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
5421                                  r2->opc0, opc1, opc2);
5422    } else {
5423        *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
5424    }
5425    if (opaque) {
5426        r2->opaque = opaque;
5427    }
5428    /* reginfo passed to helpers is correct for the actual access,
5429     * and is never ARM_CP_STATE_BOTH:
5430     */
5431    r2->state = state;
5432    /* Make sure reginfo passed to helpers for wildcarded regs
5433     * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
5434     */
5435    r2->crm = crm;
5436    r2->opc1 = opc1;
5437    r2->opc2 = opc2;
5438    /* By convention, for wildcarded registers only the first
5439     * entry is used for migration; the others are marked as
5440     * ALIAS so we don't try to transfer the register
5441     * multiple times. Special registers (ie NOP/WFI) are
5442     * never migratable and not even raw-accessible.
5443     */
5444    if ((r->type & ARM_CP_SPECIAL)) {
5445        r2->type |= ARM_CP_NO_RAW;
5446    }
5447    if (((r->crm == CP_ANY) && crm != 0) ||
5448        ((r->opc1 == CP_ANY) && opc1 != 0) ||
5449        ((r->opc2 == CP_ANY) && opc2 != 0)) {
5450        r2->type |= ARM_CP_ALIAS;
5451    }
5452
5453    /* Check that raw accesses are either forbidden or handled. Note that
5454     * we can't assert this earlier because the setup of fieldoffset for
5455     * banked registers has to be done first.
5456     */
5457    if (!(r2->type & ARM_CP_NO_RAW)) {
5458        assert(!raw_accessors_invalid(r2));
5459    }
5460
5461    /* Overriding of an existing definition must be explicitly
5462     * requested.
5463     */
5464    if (!(r->type & ARM_CP_OVERRIDE)) {
5465        ARMCPRegInfo *oldreg;
5466        oldreg = g_hash_table_lookup(cpu->cp_regs, key);
5467        if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
5468            fprintf(stderr, "Register redefined: cp=%d %d bit "
5469                    "crn=%d crm=%d opc1=%d opc2=%d, "
5470                    "was %s, now %s\n", r2->cp, 32 + 32 * is64,
5471                    r2->crn, r2->crm, r2->opc1, r2->opc2,
5472                    oldreg->name, r2->name);
5473            g_assert_not_reached();
5474        }
5475    }
5476    g_hash_table_insert(cpu->cp_regs, key, r2);
5477}
5478
5479
5480void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
5481                                       const ARMCPRegInfo *r, void *opaque)
5482{
5483    /* Define implementations of coprocessor registers.
5484     * We store these in a hashtable because typically
5485     * there are less than 150 registers in a space which
5486     * is 16*16*16*8*8 = 262144 in size.
5487     * Wildcarding is supported for the crm, opc1 and opc2 fields.
5488     * If a register is defined twice then the second definition is
5489     * used, so this can be used to define some generic registers and
5490     * then override them with implementation specific variations.
5491     * At least one of the original and the second definition should
5492     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
5493     * against accidental use.
5494     *
5495     * The state field defines whether the register is to be
5496     * visible in the AArch32 or AArch64 execution state. If the
5497     * state is set to ARM_CP_STATE_BOTH then we synthesise a
5498     * reginfo structure for the AArch32 view, which sees the lower
5499     * 32 bits of the 64 bit register.
5500     *
5501     * Only registers visible in AArch64 may set r->opc0; opc0 cannot
5502     * be wildcarded. AArch64 registers are always considered to be 64
5503     * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
5504     * the register, if any.
5505     */
5506    int crm, opc1, opc2, state;
5507    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
5508    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
5509    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
5510    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
5511    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
5512    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
5513    /* 64 bit registers have only CRm and Opc1 fields */
5514    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
5515    /* op0 only exists in the AArch64 encodings */
5516    assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
5517    /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
5518    assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
5519    /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
5520     * encodes a minimum access level for the register. We roll this
5521     * runtime check into our general permission check code, so check
5522     * here that the reginfo's specified permissions are strict enough
5523     * to encompass the generic architectural permission check.
5524     */
5525    if (r->state != ARM_CP_STATE_AA32) {
5526        int mask = 0;
5527        switch (r->opc1) {
5528        case 0: case 1: case 2:
5529            /* min_EL EL1 */
5530            mask = PL1_RW;
5531            break;
5532        case 3:
5533            /* min_EL EL0 */
5534            mask = PL0_RW;
5535            break;
5536        case 4:
5537            /* min_EL EL2 */
5538            mask = PL2_RW;
5539            break;
5540        case 5:
5541            /* unallocated encoding, so not possible */
5542            assert(false);
5543            break;
5544        case 6:
5545            /* min_EL EL3 */
5546            mask = PL3_RW;
5547            break;
5548        case 7:
5549            /* min_EL EL1, secure mode only (we don't check the latter) */
5550            mask = PL1_RW;
5551            break;
5552        default:
5553            /* broken reginfo with out-of-range opc1 */
5554            assert(false);
5555            break;
5556        }
5557        /* assert our permissions are not too lax (stricter is fine) */
5558        assert((r->access & ~mask) == 0);
5559    }
5560
5561    /* Check that the register definition has enough info to handle
5562     * reads and writes if they are permitted.
5563     */
5564    if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
5565        if (r->access & PL3_R) {
5566            assert((r->fieldoffset ||
5567                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5568                   r->readfn);
5569        }
5570        if (r->access & PL3_W) {
5571            assert((r->fieldoffset ||
5572                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
5573                   r->writefn);
5574        }
5575    }
5576    /* Bad type field probably means missing sentinel at end of reg list */
5577    assert(cptype_valid(r->type));
5578    for (crm = crmmin; crm <= crmmax; crm++) {
5579        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
5580            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
5581                for (state = ARM_CP_STATE_AA32;
5582                     state <= ARM_CP_STATE_AA64; state++) {
5583                    if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
5584                        continue;
5585                    }
5586                    if (state == ARM_CP_STATE_AA32) {
5587                        /* Under AArch32 CP registers can be common
5588                         * (same for secure and non-secure world) or banked.
5589                         */
5590                        switch (r->secure) {
5591                        case ARM_CP_SECSTATE_S:
5592                        case ARM_CP_SECSTATE_NS:
5593                            add_cpreg_to_hashtable(cpu, r, opaque, state,
5594                                                   r->secure, crm, opc1, opc2);
5595                            break;
5596                        default:
5597                            add_cpreg_to_hashtable(cpu, r, opaque, state,
5598                                                   ARM_CP_SECSTATE_S,
5599                                                   crm, opc1, opc2);
5600                            add_cpreg_to_hashtable(cpu, r, opaque, state,
5601                                                   ARM_CP_SECSTATE_NS,
5602                                                   crm, opc1, opc2);
5603                            break;
5604                        }
5605                    } else {
5606                        /* AArch64 registers get mapped to non-secure instance
5607                         * of AArch32 */
5608                        add_cpreg_to_hashtable(cpu, r, opaque, state,
5609                                               ARM_CP_SECSTATE_NS,
5610                                               crm, opc1, opc2);
5611                    }
5612                }
5613            }
5614        }
5615    }
5616}
5617
5618void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
5619                                    const ARMCPRegInfo *regs, void *opaque)
5620{
5621    /* Define a whole list of registers */
5622    const ARMCPRegInfo *r;
5623    for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
5624        define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
5625    }
5626}
5627
5628const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
5629{
5630    return g_hash_table_lookup(cpregs, &encoded_cp);
5631}
5632
5633void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
5634                         uint64_t value)
5635{
5636    /* Helper coprocessor write function for write-ignore registers */
5637}
5638
5639uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
5640{
5641    /* Helper coprocessor write function for read-as-zero registers */
5642    return 0;
5643}
5644
5645void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
5646{
5647    /* Helper coprocessor reset function for do-nothing-on-reset registers */
5648}
5649
5650static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
5651{
5652    /* Return true if it is not valid for us to switch to
5653     * this CPU mode (ie all the UNPREDICTABLE cases in
5654     * the ARM ARM CPSRWriteByInstr pseudocode).
5655     */
5656
5657    /* Changes to or from Hyp via MSR and CPS are illegal. */
5658    if (write_type == CPSRWriteByInstr &&
5659        ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
5660         mode == ARM_CPU_MODE_HYP)) {
5661        return 1;
5662    }
5663
5664    switch (mode) {
5665    case ARM_CPU_MODE_USR:
5666        return 0;
5667    case ARM_CPU_MODE_SYS:
5668    case ARM_CPU_MODE_SVC:
5669    case ARM_CPU_MODE_ABT:
5670    case ARM_CPU_MODE_UND:
5671    case ARM_CPU_MODE_IRQ:
5672    case ARM_CPU_MODE_FIQ:
5673        /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
5674         * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
5675         */
5676        /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
5677         * and CPS are treated as illegal mode changes.
5678         */
5679        if (write_type == CPSRWriteByInstr &&
5680            (env->cp15.hcr_el2 & HCR_TGE) &&
5681            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
5682            !arm_is_secure_below_el3(env)) {
5683            return 1;
5684        }
5685        return 0;
5686    case ARM_CPU_MODE_HYP:
5687        return !arm_feature(env, ARM_FEATURE_EL2)
5688            || arm_current_el(env) < 2 || arm_is_secure(env);
5689    case ARM_CPU_MODE_MON:
5690        return arm_current_el(env) < 3;
5691    default:
5692        return 1;
5693    }
5694}
5695
5696uint32_t cpsr_read(CPUARMState *env)
5697{
5698    int ZF;
5699    ZF = (env->ZF == 0);
5700    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
5701        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
5702        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
5703        | ((env->condexec_bits & 0xfc) << 8)
5704        | (env->GE << 16) | (env->daif & CPSR_AIF);
5705}
5706
5707void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
5708                CPSRWriteType write_type)
5709{
5710    uint32_t changed_daif;
5711
5712    if (mask & CPSR_NZCV) {
5713        env->ZF = (~val) & CPSR_Z;
5714        env->NF = val;
5715        env->CF = (val >> 29) & 1;
5716        env->VF = (val << 3) & 0x80000000;
5717    }
5718    if (mask & CPSR_Q)
5719        env->QF = ((val & CPSR_Q) != 0);
5720    if (mask & CPSR_T)
5721        env->thumb = ((val & CPSR_T) != 0);
5722    if (mask & CPSR_IT_0_1) {
5723        env->condexec_bits &= ~3;
5724        env->condexec_bits |= (val >> 25) & 3;
5725    }
5726    if (mask & CPSR_IT_2_7) {
5727        env->condexec_bits &= 3;
5728        env->condexec_bits |= (val >> 8) & 0xfc;
5729    }
5730    if (mask & CPSR_GE) {
5731        env->GE = (val >> 16) & 0xf;
5732    }
5733
5734    /* In a V7 implementation that includes the security extensions but does
5735     * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
5736     * whether non-secure software is allowed to change the CPSR_F and CPSR_A
5737     * bits respectively.
5738     *
5739     * In a V8 implementation, it is permitted for privileged software to
5740     * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
5741     */
5742    if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
5743        arm_feature(env, ARM_FEATURE_EL3) &&
5744        !arm_feature(env, ARM_FEATURE_EL2) &&
5745        !arm_is_secure(env)) {
5746
5747        changed_daif = (env->daif ^ val) & mask;
5748
5749        if (changed_daif & CPSR_A) {
5750            /* Check to see if we are allowed to change the masking of async
5751             * abort exceptions from a non-secure state.
5752             */
5753            if (!(env->cp15.scr_el3 & SCR_AW)) {
5754                qemu_log_mask(LOG_GUEST_ERROR,
5755                              "Ignoring attempt to switch CPSR_A flag from "
5756                              "non-secure world with SCR.AW bit clear\n");
5757                mask &= ~CPSR_A;
5758            }
5759        }
5760
5761        if (changed_daif & CPSR_F) {
5762            /* Check to see if we are allowed to change the masking of FIQ
5763             * exceptions from a non-secure state.
5764             */
5765            if (!(env->cp15.scr_el3 & SCR_FW)) {
5766                qemu_log_mask(LOG_GUEST_ERROR,
5767                              "Ignoring attempt to switch CPSR_F flag from "
5768                              "non-secure world with SCR.FW bit clear\n");
5769                mask &= ~CPSR_F;
5770            }
5771
5772            /* Check whether non-maskable FIQ (NMFI) support is enabled.
5773             * If this bit is set software is not allowed to mask
5774             * FIQs, but is allowed to set CPSR_F to 0.
5775             */
5776            if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
5777                (val & CPSR_F)) {
5778                qemu_log_mask(LOG_GUEST_ERROR,
5779                              "Ignoring attempt to enable CPSR_F flag "
5780                              "(non-maskable FIQ [NMFI] support enabled)\n");
5781                mask &= ~CPSR_F;
5782            }
5783        }
5784    }
5785
5786    env->daif &= ~(CPSR_AIF & mask);
5787    env->daif |= val & CPSR_AIF & mask;
5788
5789    if (write_type != CPSRWriteRaw &&
5790        ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
5791        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
5792            /* Note that we can only get here in USR mode if this is a
5793             * gdb stub write; for this case we follow the architectural
5794             * behaviour for guest writes in USR mode of ignoring an attempt
5795             * to switch mode. (Those are caught by translate.c for writes
5796             * triggered by guest instructions.)
5797             */
5798            mask &= ~CPSR_M;
5799        } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
5800            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
5801             * v7, and has defined behaviour in v8:
5802             *  + leave CPSR.M untouched
5803             *  + allow changes to the other CPSR fields
5804             *  + set PSTATE.IL
5805             * For user changes via the GDB stub, we don't set PSTATE.IL,
5806             * as this would be unnecessarily harsh for a user error.
5807             */
5808            mask &= ~CPSR_M;
5809            if (write_type != CPSRWriteByGDBStub &&
5810                arm_feature(env, ARM_FEATURE_V8)) {
5811                mask |= CPSR_IL;
5812                val |= CPSR_IL;
5813            }
5814        } else {
5815            switch_mode(env, val & CPSR_M);
5816        }
5817    }
5818    mask &= ~CACHED_CPSR_BITS;
5819    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
5820}
5821
5822/* Sign/zero extend */
5823uint32_t HELPER(sxtb16)(uint32_t x)
5824{
5825    uint32_t res;
5826    res = (uint16_t)(int8_t)x;
5827    res |= (uint32_t)(int8_t)(x >> 16) << 16;
5828    return res;
5829}
5830
5831uint32_t HELPER(uxtb16)(uint32_t x)
5832{
5833    uint32_t res;
5834    res = (uint16_t)(uint8_t)x;
5835    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
5836    return res;
5837}
5838
5839int32_t HELPER(sdiv)(int32_t num, int32_t den)
5840{
5841    if (den == 0)
5842      return 0;
5843    if (num == INT_MIN && den == -1)
5844      return INT_MIN;
5845    return num / den;
5846}
5847
5848uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
5849{
5850    if (den == 0)
5851      return 0;
5852    return num / den;
5853}
5854
5855uint32_t HELPER(rbit)(uint32_t x)
5856{
5857    return revbit32(x);
5858}
5859
5860#if defined(CONFIG_USER_ONLY)
5861
5862/* These should probably raise undefined insn exceptions.  */
5863void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
5864{
5865    ARMCPU *cpu = arm_env_get_cpu(env);
5866
5867    cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
5868}
5869
5870uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
5871{
5872    ARMCPU *cpu = arm_env_get_cpu(env);
5873
5874    cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
5875    return 0;
5876}
5877
5878void switch_mode(CPUARMState *env, int mode)
5879{
5880    ARMCPU *cpu = arm_env_get_cpu(env);
5881
5882    if (mode != ARM_CPU_MODE_USR) {
5883        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
5884    }
5885}
5886
5887uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5888                                 uint32_t cur_el, bool secure)
5889{
5890    return 1;
5891}
5892
5893void aarch64_sync_64_to_32(CPUARMState *env)
5894{
5895    g_assert_not_reached();
5896}
5897
5898#else
5899
5900void switch_mode(CPUARMState *env, int mode)
5901{
5902    int old_mode;
5903    int i;
5904
5905    old_mode = env->uncached_cpsr & CPSR_M;
5906    if (mode == old_mode)
5907        return;
5908
5909    if (old_mode == ARM_CPU_MODE_FIQ) {
5910        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
5911        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
5912    } else if (mode == ARM_CPU_MODE_FIQ) {
5913        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
5914        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
5915    }
5916
5917    i = bank_number(old_mode);
5918    env->banked_r13[i] = env->regs[13];
5919    env->banked_r14[i] = env->regs[14];
5920    env->banked_spsr[i] = env->spsr;
5921
5922    i = bank_number(mode);
5923    env->regs[13] = env->banked_r13[i];
5924    env->regs[14] = env->banked_r14[i];
5925    env->spsr = env->banked_spsr[i];
5926}
5927
5928/* Physical Interrupt Target EL Lookup Table
5929 *
5930 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
5931 *
5932 * The below multi-dimensional table is used for looking up the target
5933 * exception level given numerous condition criteria.  Specifically, the
5934 * target EL is based on SCR and HCR routing controls as well as the
5935 * currently executing EL and secure state.
5936 *
5937 *    Dimensions:
5938 *    target_el_table[2][2][2][2][2][4]
5939 *                    |  |  |  |  |  +--- Current EL
5940 *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
5941 *                    |  |  |  +--------- HCR mask override
5942 *                    |  |  +------------ SCR exec state control
5943 *                    |  +--------------- SCR mask override
5944 *                    +------------------ 32-bit(0)/64-bit(1) EL3
5945 *
5946 *    The table values are as such:
5947 *    0-3 = EL0-EL3
5948 *     -1 = Cannot occur
5949 *
5950 * The ARM ARM target EL table includes entries indicating that an "exception
5951 * is not taken".  The two cases where this is applicable are:
5952 *    1) An exception is taken from EL3 but the SCR does not have the exception
5953 *    routed to EL3.
5954 *    2) An exception is taken from EL2 but the HCR does not have the exception
5955 *    routed to EL2.
5956 * In these two cases, the below table contain a target of EL1.  This value is
5957 * returned as it is expected that the consumer of the table data will check
5958 * for "target EL >= current EL" to ensure the exception is not taken.
5959 *
5960 *            SCR     HCR
5961 *         64  EA     AMO                 From
5962 *        BIT IRQ     IMO      Non-secure         Secure
5963 *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
5964 */
5965static const int8_t target_el_table[2][2][2][2][2][4] = {
5966    {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5967       {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
5968      {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
5969       {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
5970     {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5971       {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
5972      {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
5973       {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
5974    {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
5975       {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
5976      {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
5977       {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
5978     {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5979       {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
5980      {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
5981       {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
5982};
5983
5984/*
5985 * Determine the target EL for physical exceptions
5986 */
5987uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
5988                                 uint32_t cur_el, bool secure)
5989{
5990    CPUARMState *env = cs->env_ptr;
5991    int rw;
5992    int scr;
5993    int hcr;
5994    int target_el;
5995    /* Is the highest EL AArch64? */
5996    int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
5997
5998    if (arm_feature(env, ARM_FEATURE_EL3)) {
5999        rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
6000    } else {
6001        /* Either EL2 is the highest EL (and so the EL2 register width
6002         * is given by is64); or there is no EL2 or EL3, in which case
6003         * the value of 'rw' does not affect the table lookup anyway.
6004         */
6005        rw = is64;
6006    }
6007
6008    switch (excp_idx) {
6009    case EXCP_IRQ:
6010        scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
6011        hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
6012        break;
6013    case EXCP_FIQ:
6014        scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
6015        hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
6016        break;
6017    default:
6018        scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
6019        hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
6020        break;
6021    };
6022
6023    /* If HCR.TGE is set then HCR is treated as being 1 */
6024    hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
6025
6026    /* Perform a table-lookup for the target EL given the current state */
6027    target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
6028
6029    assert(target_el > 0);
6030
6031    return target_el;
6032}
6033
6034static void v7m_push(CPUARMState *env, uint32_t val)
6035{
6036    CPUState *cs = CPU(arm_env_get_cpu(env));
6037
6038    env->regs[13] -= 4;
6039    stl_phys(cs->as, env->regs[13], val);
6040}
6041
6042static uint32_t v7m_pop(CPUARMState *env)
6043{
6044    CPUState *cs = CPU(arm_env_get_cpu(env));
6045    uint32_t val;
6046
6047    val = ldl_phys(cs->as, env->regs[13]);
6048    env->regs[13] += 4;
6049    return val;
6050}
6051
6052/* Switch to V7M main or process stack pointer.  */
6053static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
6054{
6055    uint32_t tmp;
6056    bool old_spsel = env->v7m.control & R_V7M_CONTROL_SPSEL_MASK;
6057
6058    if (old_spsel != new_spsel) {
6059        tmp = env->v7m.other_sp;
6060        env->v7m.other_sp = env->regs[13];
6061        env->regs[13] = tmp;
6062
6063        env->v7m.control = deposit32(env->v7m.control,
6064                                     R_V7M_CONTROL_SPSEL_SHIFT,
6065                                     R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
6066    }
6067}
6068
6069static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
6070{
6071    CPUState *cs = CPU(cpu);
6072    CPUARMState *env = &cpu->env;
6073    MemTxResult result;
6074    hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
6075    uint32_t addr;
6076
6077    addr = address_space_ldl(cs->as, vec,
6078                             MEMTXATTRS_UNSPECIFIED, &result);
6079    if (result != MEMTX_OK) {
6080        /* Architecturally this should cause a HardFault setting HSFR.VECTTBL,
6081         * which would then be immediately followed by our failing to load
6082         * the entry vector for that HardFault, which is a Lockup case.
6083         * Since we don't model Lockup, we just report this guest error
6084         * via cpu_abort().
6085         */
6086        cpu_abort(cs, "Failed to read from exception vector table "
6087                  "entry %08x\n", (unsigned)vec);
6088    }
6089    return addr;
6090}
6091
6092static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
6093{
6094    /* Do the "take the exception" parts of exception entry,
6095     * but not the pushing of state to the stack. This is
6096     * similar to the pseudocode ExceptionTaken() function.
6097     */
6098    CPUARMState *env = &cpu->env;
6099    uint32_t addr;
6100
6101    armv7m_nvic_acknowledge_irq(env->nvic);
6102    switch_v7m_sp(env, 0);
6103    /* Clear IT bits */
6104    env->condexec_bits = 0;
6105    env->regs[14] = lr;
6106    addr = arm_v7m_load_vector(cpu);
6107    env->regs[15] = addr & 0xfffffffe;
6108    env->thumb = addr & 1;
6109}
6110
6111static void v7m_push_stack(ARMCPU *cpu)
6112{
6113    /* Do the "set up stack frame" part of exception entry,
6114     * similar to pseudocode PushStack().
6115     */
6116    CPUARMState *env = &cpu->env;
6117    uint32_t xpsr = xpsr_read(env);
6118
6119    /* Align stack pointer if the guest wants that */
6120    if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
6121        env->regs[13] -= 4;
6122        xpsr |= 0x200;
6123    }
6124    /* Switch to the handler mode.  */
6125    v7m_push(env, xpsr);
6126    v7m_push(env, env->regs[15]);
6127    v7m_push(env, env->regs[14]);
6128    v7m_push(env, env->regs[12]);
6129    v7m_push(env, env->regs[3]);
6130    v7m_push(env, env->regs[2]);
6131    v7m_push(env, env->regs[1]);
6132    v7m_push(env, env->regs[0]);
6133}
6134
6135static void do_v7m_exception_exit(ARMCPU *cpu)
6136{
6137    CPUARMState *env = &cpu->env;
6138    uint32_t type;
6139    uint32_t xpsr;
6140    bool ufault = false;
6141    bool return_to_sp_process = false;
6142    bool return_to_handler = false;
6143    bool rettobase = false;
6144
6145    /* We can only get here from an EXCP_EXCEPTION_EXIT, and
6146     * arm_v7m_do_unassigned_access() enforces the architectural rule
6147     * that jumps to magic addresses don't have magic behaviour unless
6148     * we're in Handler mode (compare pseudocode BXWritePC()).
6149     */
6150    assert(env->v7m.exception != 0);
6151
6152    /* In the spec pseudocode ExceptionReturn() is called directly
6153     * from BXWritePC() and gets the full target PC value including
6154     * bit zero. In QEMU's implementation we treat it as a normal
6155     * jump-to-register (which is then caught later on), and so split
6156     * the target value up between env->regs[15] and env->thumb in
6157     * gen_bx(). Reconstitute it.
6158     */
6159    type = env->regs[15];
6160    if (env->thumb) {
6161        type |= 1;
6162    }
6163
6164    qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
6165                  " previous exception %d\n",
6166                  type, env->v7m.exception);
6167
6168    if (extract32(type, 5, 23) != extract32(-1, 5, 23)) {
6169        qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
6170                      "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", type);
6171    }
6172
6173    if (env->v7m.exception != ARMV7M_EXCP_NMI) {
6174        /* Auto-clear FAULTMASK on return from other than NMI */
6175        env->daif &= ~PSTATE_F;
6176    }
6177
6178    switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
6179    case -1:
6180        /* attempt to exit an exception that isn't active */
6181        ufault = true;
6182        break;
6183    case 0:
6184        /* still an irq active now */
6185        break;
6186    case 1:
6187        /* we returned to base exception level, no nesting.
6188         * (In the pseudocode this is written using "NestedActivation != 1"
6189         * where we have 'rettobase == false'.)
6190         */
6191        rettobase = true;
6192        break;
6193    default:
6194        g_assert_not_reached();
6195    }
6196
6197    switch (type & 0xf) {
6198    case 1: /* Return to Handler */
6199        return_to_handler = true;
6200        break;
6201    case 13: /* Return to Thread using Process stack */
6202        return_to_sp_process = true;
6203        /* fall through */
6204    case 9: /* Return to Thread using Main stack */
6205        if (!rettobase &&
6206            !(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) {
6207            ufault = true;
6208        }
6209        break;
6210    default:
6211        ufault = true;
6212    }
6213
6214    if (ufault) {
6215        /* Bad exception return: instead of popping the exception
6216         * stack, directly take a usage fault on the current stack.
6217         */
6218        env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
6219        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6220        v7m_exception_taken(cpu, type | 0xf0000000);
6221        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
6222                      "stackframe: failed exception return integrity check\n");
6223        return;
6224    }
6225
6226    /* Switch to the target stack.  */
6227    switch_v7m_sp(env, return_to_sp_process);
6228    /* Pop registers.  */
6229    env->regs[0] = v7m_pop(env);
6230    env->regs[1] = v7m_pop(env);
6231    env->regs[2] = v7m_pop(env);
6232    env->regs[3] = v7m_pop(env);
6233    env->regs[12] = v7m_pop(env);
6234    env->regs[14] = v7m_pop(env);
6235    env->regs[15] = v7m_pop(env);
6236    if (env->regs[15] & 1) {
6237        qemu_log_mask(LOG_GUEST_ERROR,
6238                      "M profile return from interrupt with misaligned "
6239                      "PC is UNPREDICTABLE\n");
6240        /* Actual hardware seems to ignore the lsbit, and there are several
6241         * RTOSes out there which incorrectly assume the r15 in the stack
6242         * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
6243         */
6244        env->regs[15] &= ~1U;
6245    }
6246    xpsr = v7m_pop(env);
6247    xpsr_write(env, xpsr, 0xfffffdff);
6248    /* Undo stack alignment.  */
6249    if (xpsr & 0x200)
6250        env->regs[13] |= 4;
6251
6252    /* The restored xPSR exception field will be zero if we're
6253     * resuming in Thread mode. If that doesn't match what the
6254     * exception return type specified then this is a UsageFault.
6255     */
6256    if (return_to_handler == (env->v7m.exception == 0)) {
6257        /* Take an INVPC UsageFault by pushing the stack again. */
6258        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6259        env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
6260        v7m_push_stack(cpu);
6261        v7m_exception_taken(cpu, type | 0xf0000000);
6262        qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
6263                      "failed exception return integrity check\n");
6264        return;
6265    }
6266
6267    /* Otherwise, we have a successful exception exit. */
6268    qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
6269}
6270
6271static void arm_log_exception(int idx)
6272{
6273    if (qemu_loglevel_mask(CPU_LOG_INT)) {
6274        const char *exc = NULL;
6275        static const char * const excnames[] = {
6276            [EXCP_UDEF] = "Undefined Instruction",
6277            [EXCP_SWI] = "SVC",
6278            [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
6279            [EXCP_DATA_ABORT] = "Data Abort",
6280            [EXCP_IRQ] = "IRQ",
6281            [EXCP_FIQ] = "FIQ",
6282            [EXCP_BKPT] = "Breakpoint",
6283            [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
6284            [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
6285            [EXCP_HVC] = "Hypervisor Call",
6286            [EXCP_HYP_TRAP] = "Hypervisor Trap",
6287            [EXCP_SMC] = "Secure Monitor Call",
6288            [EXCP_VIRQ] = "Virtual IRQ",
6289            [EXCP_VFIQ] = "Virtual FIQ",
6290            [EXCP_SEMIHOST] = "Semihosting call",
6291            [EXCP_NOCP] = "v7M NOCP UsageFault",
6292            [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
6293        };
6294
6295        if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
6296            exc = excnames[idx];
6297        }
6298        if (!exc) {
6299            exc = "unknown";
6300        }
6301        qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
6302    }
6303}
6304
6305void arm_v7m_cpu_do_interrupt(CPUState *cs)
6306{
6307    ARMCPU *cpu = ARM_CPU(cs);
6308    CPUARMState *env = &cpu->env;
6309    uint32_t lr;
6310
6311    arm_log_exception(cs->exception_index);
6312
6313    lr = 0xfffffff1;
6314    if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
6315        lr |= 4;
6316    }
6317    if (env->v7m.exception == 0)
6318        lr |= 8;
6319
6320    /* For exceptions we just mark as pending on the NVIC, and let that
6321       handle it.  */
6322    switch (cs->exception_index) {
6323    case EXCP_UDEF:
6324        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6325        env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK;
6326        break;
6327    case EXCP_NOCP:
6328        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6329        env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK;
6330        break;
6331    case EXCP_INVSTATE:
6332        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
6333        env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK;
6334        break;
6335    case EXCP_SWI:
6336        /* The PC already points to the next instruction.  */
6337        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
6338        break;
6339    case EXCP_PREFETCH_ABORT:
6340    case EXCP_DATA_ABORT:
6341        /* Note that for M profile we don't have a guest facing FSR, but
6342         * the env->exception.fsr will be populated by the code that
6343         * raises the fault, in the A profile short-descriptor format.
6344         */
6345        switch (env->exception.fsr & 0xf) {
6346        case 0x8: /* External Abort */
6347            switch (cs->exception_index) {
6348            case EXCP_PREFETCH_ABORT:
6349                env->v7m.cfsr |= R_V7M_CFSR_PRECISERR_MASK;
6350                qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR\n");
6351                break;
6352            case EXCP_DATA_ABORT:
6353                env->v7m.cfsr |=
6354                    (R_V7M_CFSR_IBUSERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
6355                env->v7m.bfar = env->exception.vaddress;
6356                qemu_log_mask(CPU_LOG_INT,
6357                              "...with CFSR.IBUSERR and BFAR 0x%x\n",
6358                              env->v7m.bfar);
6359                break;
6360            }
6361            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
6362            break;
6363        default:
6364            /* All other FSR values are either MPU faults or "can't happen
6365             * for M profile" cases.
6366             */
6367            switch (cs->exception_index) {
6368            case EXCP_PREFETCH_ABORT:
6369                env->v7m.cfsr |= R_V7M_CFSR_IACCVIOL_MASK;
6370                qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
6371                break;
6372            case EXCP_DATA_ABORT:
6373                env->v7m.cfsr |=
6374                    (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
6375                env->v7m.mmfar = env->exception.vaddress;
6376                qemu_log_mask(CPU_LOG_INT,
6377                              "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
6378                              env->v7m.mmfar);
6379                break;
6380            }
6381            armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
6382            break;
6383        }
6384        break;
6385    case EXCP_BKPT:
6386        if (semihosting_enabled()) {
6387            int nr;
6388            nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
6389            if (nr == 0xab) {
6390                env->regs[15] += 2;
6391                qemu_log_mask(CPU_LOG_INT,
6392                              "...handling as semihosting call 0x%x\n",
6393                              env->regs[0]);
6394                env->regs[0] = do_arm_semihosting(env);
6395                return;
6396            }
6397        }
6398        armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
6399        break;
6400    case EXCP_IRQ:
6401        break;
6402    case EXCP_EXCEPTION_EXIT:
6403        do_v7m_exception_exit(cpu);
6404        return;
6405    default:
6406        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6407        return; /* Never happens.  Keep compiler happy.  */
6408    }
6409
6410    v7m_push_stack(cpu);
6411    v7m_exception_taken(cpu, lr);
6412    qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
6413}
6414
6415/* Function used to synchronize QEMU's AArch64 register set with AArch32
6416 * register set.  This is necessary when switching between AArch32 and AArch64
6417 * execution state.
6418 */
6419void aarch64_sync_32_to_64(CPUARMState *env)
6420{
6421    int i;
6422    uint32_t mode = env->uncached_cpsr & CPSR_M;
6423
6424    /* We can blanket copy R[0:7] to X[0:7] */
6425    for (i = 0; i < 8; i++) {
6426        env->xregs[i] = env->regs[i];
6427    }
6428
6429    /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
6430     * Otherwise, they come from the banked user regs.
6431     */
6432    if (mode == ARM_CPU_MODE_FIQ) {
6433        for (i = 8; i < 13; i++) {
6434            env->xregs[i] = env->usr_regs[i - 8];
6435        }
6436    } else {
6437        for (i = 8; i < 13; i++) {
6438            env->xregs[i] = env->regs[i];
6439        }
6440    }
6441
6442    /* Registers x13-x23 are the various mode SP and FP registers. Registers
6443     * r13 and r14 are only copied if we are in that mode, otherwise we copy
6444     * from the mode banked register.
6445     */
6446    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6447        env->xregs[13] = env->regs[13];
6448        env->xregs[14] = env->regs[14];
6449    } else {
6450        env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
6451        /* HYP is an exception in that it is copied from r14 */
6452        if (mode == ARM_CPU_MODE_HYP) {
6453            env->xregs[14] = env->regs[14];
6454        } else {
6455            env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
6456        }
6457    }
6458
6459    if (mode == ARM_CPU_MODE_HYP) {
6460        env->xregs[15] = env->regs[13];
6461    } else {
6462        env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
6463    }
6464
6465    if (mode == ARM_CPU_MODE_IRQ) {
6466        env->xregs[16] = env->regs[14];
6467        env->xregs[17] = env->regs[13];
6468    } else {
6469        env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
6470        env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
6471    }
6472
6473    if (mode == ARM_CPU_MODE_SVC) {
6474        env->xregs[18] = env->regs[14];
6475        env->xregs[19] = env->regs[13];
6476    } else {
6477        env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
6478        env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
6479    }
6480
6481    if (mode == ARM_CPU_MODE_ABT) {
6482        env->xregs[20] = env->regs[14];
6483        env->xregs[21] = env->regs[13];
6484    } else {
6485        env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
6486        env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
6487    }
6488
6489    if (mode == ARM_CPU_MODE_UND) {
6490        env->xregs[22] = env->regs[14];
6491        env->xregs[23] = env->regs[13];
6492    } else {
6493        env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
6494        env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
6495    }
6496
6497    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6498     * mode, then we can copy from r8-r14.  Otherwise, we copy from the
6499     * FIQ bank for r8-r14.
6500     */
6501    if (mode == ARM_CPU_MODE_FIQ) {
6502        for (i = 24; i < 31; i++) {
6503            env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
6504        }
6505    } else {
6506        for (i = 24; i < 29; i++) {
6507            env->xregs[i] = env->fiq_regs[i - 24];
6508        }
6509        env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
6510        env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
6511    }
6512
6513    env->pc = env->regs[15];
6514}
6515
6516/* Function used to synchronize QEMU's AArch32 register set with AArch64
6517 * register set.  This is necessary when switching between AArch32 and AArch64
6518 * execution state.
6519 */
6520void aarch64_sync_64_to_32(CPUARMState *env)
6521{
6522    int i;
6523    uint32_t mode = env->uncached_cpsr & CPSR_M;
6524
6525    /* We can blanket copy X[0:7] to R[0:7] */
6526    for (i = 0; i < 8; i++) {
6527        env->regs[i] = env->xregs[i];
6528    }
6529
6530    /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
6531     * Otherwise, we copy x8-x12 into the banked user regs.
6532     */
6533    if (mode == ARM_CPU_MODE_FIQ) {
6534        for (i = 8; i < 13; i++) {
6535            env->usr_regs[i - 8] = env->xregs[i];
6536        }
6537    } else {
6538        for (i = 8; i < 13; i++) {
6539            env->regs[i] = env->xregs[i];
6540        }
6541    }
6542
6543    /* Registers r13 & r14 depend on the current mode.
6544     * If we are in a given mode, we copy the corresponding x registers to r13
6545     * and r14.  Otherwise, we copy the x register to the banked r13 and r14
6546     * for the mode.
6547     */
6548    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
6549        env->regs[13] = env->xregs[13];
6550        env->regs[14] = env->xregs[14];
6551    } else {
6552        env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
6553
6554        /* HYP is an exception in that it does not have its own banked r14 but
6555         * shares the USR r14
6556         */
6557        if (mode == ARM_CPU_MODE_HYP) {
6558            env->regs[14] = env->xregs[14];
6559        } else {
6560            env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
6561        }
6562    }
6563
6564    if (mode == ARM_CPU_MODE_HYP) {
6565        env->regs[13] = env->xregs[15];
6566    } else {
6567        env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
6568    }
6569
6570    if (mode == ARM_CPU_MODE_IRQ) {
6571        env->regs[14] = env->xregs[16];
6572        env->regs[13] = env->xregs[17];
6573    } else {
6574        env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
6575        env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
6576    }
6577
6578    if (mode == ARM_CPU_MODE_SVC) {
6579        env->regs[14] = env->xregs[18];
6580        env->regs[13] = env->xregs[19];
6581    } else {
6582        env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
6583        env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
6584    }
6585
6586    if (mode == ARM_CPU_MODE_ABT) {
6587        env->regs[14] = env->xregs[20];
6588        env->regs[13] = env->xregs[21];
6589    } else {
6590        env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
6591        env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
6592    }
6593
6594    if (mode == ARM_CPU_MODE_UND) {
6595        env->regs[14] = env->xregs[22];
6596        env->regs[13] = env->xregs[23];
6597    } else {
6598        env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
6599        env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
6600    }
6601
6602    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
6603     * mode, then we can copy to r8-r14.  Otherwise, we copy to the
6604     * FIQ bank for r8-r14.
6605     */
6606    if (mode == ARM_CPU_MODE_FIQ) {
6607        for (i = 24; i < 31; i++) {
6608            env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
6609        }
6610    } else {
6611        for (i = 24; i < 29; i++) {
6612            env->fiq_regs[i - 24] = env->xregs[i];
6613        }
6614        env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
6615        env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
6616    }
6617
6618    env->regs[15] = env->pc;
6619}
6620
6621static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
6622{
6623    ARMCPU *cpu = ARM_CPU(cs);
6624    CPUARMState *env = &cpu->env;
6625    uint32_t addr;
6626    uint32_t mask;
6627    int new_mode;
6628    uint32_t offset;
6629    uint32_t moe;
6630
6631    /* If this is a debug exception we must update the DBGDSCR.MOE bits */
6632    switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
6633    case EC_BREAKPOINT:
6634    case EC_BREAKPOINT_SAME_EL:
6635        moe = 1;
6636        break;
6637    case EC_WATCHPOINT:
6638    case EC_WATCHPOINT_SAME_EL:
6639        moe = 10;
6640        break;
6641    case EC_AA32_BKPT:
6642        moe = 3;
6643        break;
6644    case EC_VECTORCATCH:
6645        moe = 5;
6646        break;
6647    default:
6648        moe = 0;
6649        break;
6650    }
6651
6652    if (moe) {
6653        env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
6654    }
6655
6656    /* TODO: Vectored interrupt controller.  */
6657    switch (cs->exception_index) {
6658    case EXCP_UDEF:
6659        new_mode = ARM_CPU_MODE_UND;
6660        addr = 0x04;
6661        mask = CPSR_I;
6662        if (env->thumb)
6663            offset = 2;
6664        else
6665            offset = 4;
6666        break;
6667    case EXCP_SWI:
6668        new_mode = ARM_CPU_MODE_SVC;
6669        addr = 0x08;
6670        mask = CPSR_I;
6671        /* The PC already points to the next instruction.  */
6672        offset = 0;
6673        break;
6674    case EXCP_BKPT:
6675        env->exception.fsr = 2;
6676        /* Fall through to prefetch abort.  */
6677    case EXCP_PREFETCH_ABORT:
6678        A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
6679        A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
6680        qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
6681                      env->exception.fsr, (uint32_t)env->exception.vaddress);
6682        new_mode = ARM_CPU_MODE_ABT;
6683        addr = 0x0c;
6684        mask = CPSR_A | CPSR_I;
6685        offset = 4;
6686        break;
6687    case EXCP_DATA_ABORT:
6688        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
6689        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
6690        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
6691                      env->exception.fsr,
6692                      (uint32_t)env->exception.vaddress);
6693        new_mode = ARM_CPU_MODE_ABT;
6694        addr = 0x10;
6695        mask = CPSR_A | CPSR_I;
6696        offset = 8;
6697        break;
6698    case EXCP_IRQ:
6699        new_mode = ARM_CPU_MODE_IRQ;
6700        addr = 0x18;
6701        /* Disable IRQ and imprecise data aborts.  */
6702        mask = CPSR_A | CPSR_I;
6703        offset = 4;
6704        if (env->cp15.scr_el3 & SCR_IRQ) {
6705            /* IRQ routed to monitor mode */
6706            new_mode = ARM_CPU_MODE_MON;
6707            mask |= CPSR_F;
6708        }
6709        break;
6710    case EXCP_FIQ:
6711        new_mode = ARM_CPU_MODE_FIQ;
6712        addr = 0x1c;
6713        /* Disable FIQ, IRQ and imprecise data aborts.  */
6714        mask = CPSR_A | CPSR_I | CPSR_F;
6715        if (env->cp15.scr_el3 & SCR_FIQ) {
6716            /* FIQ routed to monitor mode */
6717            new_mode = ARM_CPU_MODE_MON;
6718        }
6719        offset = 4;
6720        break;
6721    case EXCP_VIRQ:
6722        new_mode = ARM_CPU_MODE_IRQ;
6723        addr = 0x18;
6724        /* Disable IRQ and imprecise data aborts.  */
6725        mask = CPSR_A | CPSR_I;
6726        offset = 4;
6727        break;
6728    case EXCP_VFIQ:
6729        new_mode = ARM_CPU_MODE_FIQ;
6730        addr = 0x1c;
6731        /* Disable FIQ, IRQ and imprecise data aborts.  */
6732        mask = CPSR_A | CPSR_I | CPSR_F;
6733        offset = 4;
6734        break;
6735    case EXCP_SMC:
6736        new_mode = ARM_CPU_MODE_MON;
6737        addr = 0x08;
6738        mask = CPSR_A | CPSR_I | CPSR_F;
6739        offset = 0;
6740        break;
6741    default:
6742        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6743        return; /* Never happens.  Keep compiler happy.  */
6744    }
6745
6746    if (new_mode == ARM_CPU_MODE_MON) {
6747        addr += env->cp15.mvbar;
6748    } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
6749        /* High vectors. When enabled, base address cannot be remapped. */
6750        addr += 0xffff0000;
6751    } else {
6752        /* ARM v7 architectures provide a vector base address register to remap
6753         * the interrupt vector table.
6754         * This register is only followed in non-monitor mode, and is banked.
6755         * Note: only bits 31:5 are valid.
6756         */
6757        addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
6758    }
6759
6760    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
6761        env->cp15.scr_el3 &= ~SCR_NS;
6762    }
6763
6764    switch_mode (env, new_mode);
6765    /* For exceptions taken to AArch32 we must clear the SS bit in both
6766     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
6767     */
6768    env->uncached_cpsr &= ~PSTATE_SS;
6769    env->spsr = cpsr_read(env);
6770    /* Clear IT bits.  */
6771    env->condexec_bits = 0;
6772    /* Switch to the new mode, and to the correct instruction set.  */
6773    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
6774    /* Set new mode endianness */
6775    env->uncached_cpsr &= ~CPSR_E;
6776    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
6777        env->uncached_cpsr |= CPSR_E;
6778    }
6779    env->daif |= mask;
6780    /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
6781     * and we should just guard the thumb mode on V4 */
6782    if (arm_feature(env, ARM_FEATURE_V4T)) {
6783        env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
6784    }
6785    env->regs[14] = env->regs[15] + offset;
6786    env->regs[15] = addr;
6787}
6788
6789/* Handle exception entry to a target EL which is using AArch64 */
6790static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
6791{
6792    ARMCPU *cpu = ARM_CPU(cs);
6793    CPUARMState *env = &cpu->env;
6794    unsigned int new_el = env->exception.target_el;
6795    target_ulong addr = env->cp15.vbar_el[new_el];
6796    unsigned int new_mode = aarch64_pstate_mode(new_el, true);
6797
6798    if (arm_current_el(env) < new_el) {
6799        /* Entry vector offset depends on whether the implemented EL
6800         * immediately lower than the target level is using AArch32 or AArch64
6801         */
6802        bool is_aa64;
6803
6804        switch (new_el) {
6805        case 3:
6806            is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
6807            break;
6808        case 2:
6809            is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
6810            break;
6811        case 1:
6812            is_aa64 = is_a64(env);
6813            break;
6814        default:
6815            g_assert_not_reached();
6816        }
6817
6818        if (is_aa64) {
6819            addr += 0x400;
6820        } else {
6821            addr += 0x600;
6822        }
6823    } else if (pstate_read(env) & PSTATE_SP) {
6824        addr += 0x200;
6825    }
6826
6827    switch (cs->exception_index) {
6828    case EXCP_PREFETCH_ABORT:
6829    case EXCP_DATA_ABORT:
6830        env->cp15.far_el[new_el] = env->exception.vaddress;
6831        qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
6832                      env->cp15.far_el[new_el]);
6833        /* fall through */
6834    case EXCP_BKPT:
6835    case EXCP_UDEF:
6836    case EXCP_SWI:
6837    case EXCP_HVC:
6838    case EXCP_HYP_TRAP:
6839    case EXCP_SMC:
6840        env->cp15.esr_el[new_el] = env->exception.syndrome;
6841        break;
6842    case EXCP_IRQ:
6843    case EXCP_VIRQ:
6844        addr += 0x80;
6845        break;
6846    case EXCP_FIQ:
6847    case EXCP_VFIQ:
6848        addr += 0x100;
6849        break;
6850    case EXCP_SEMIHOST:
6851        qemu_log_mask(CPU_LOG_INT,
6852                      "...handling as semihosting call 0x%" PRIx64 "\n",
6853                      env->xregs[0]);
6854        env->xregs[0] = do_arm_semihosting(env);
6855        return;
6856    default:
6857        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
6858    }
6859
6860    if (is_a64(env)) {
6861        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
6862        aarch64_save_sp(env, arm_current_el(env));
6863        env->elr_el[new_el] = env->pc;
6864    } else {
6865        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
6866        env->elr_el[new_el] = env->regs[15];
6867
6868        aarch64_sync_32_to_64(env);
6869
6870        env->condexec_bits = 0;
6871    }
6872    qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
6873                  env->elr_el[new_el]);
6874
6875    pstate_write(env, PSTATE_DAIF | new_mode);
6876    env->aarch64 = 1;
6877    aarch64_restore_sp(env, new_el);
6878
6879    env->pc = addr;
6880
6881    qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
6882                  new_el, env->pc, pstate_read(env));
6883}
6884
6885static inline bool check_for_semihosting(CPUState *cs)
6886{
6887    /* Check whether this exception is a semihosting call; if so
6888     * then handle it and return true; otherwise return false.
6889     */
6890    ARMCPU *cpu = ARM_CPU(cs);
6891    CPUARMState *env = &cpu->env;
6892
6893    if (is_a64(env)) {
6894        if (cs->exception_index == EXCP_SEMIHOST) {
6895            /* This is always the 64-bit semihosting exception.
6896             * The "is this usermode" and "is semihosting enabled"
6897             * checks have been done at translate time.
6898             */
6899            qemu_log_mask(CPU_LOG_INT,
6900                          "...handling as semihosting call 0x%" PRIx64 "\n",
6901                          env->xregs[0]);
6902            env->xregs[0] = do_arm_semihosting(env);
6903            return true;
6904        }
6905        return false;
6906    } else {
6907        uint32_t imm;
6908
6909        /* Only intercept calls from privileged modes, to provide some
6910         * semblance of security.
6911         */
6912        if (cs->exception_index != EXCP_SEMIHOST &&
6913            (!semihosting_enabled() ||
6914             ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
6915            return false;
6916        }
6917
6918        switch (cs->exception_index) {
6919        case EXCP_SEMIHOST:
6920            /* This is always a semihosting call; the "is this usermode"
6921             * and "is semihosting enabled" checks have been done at
6922             * translate time.
6923             */
6924            break;
6925        case EXCP_SWI:
6926            /* Check for semihosting interrupt.  */
6927            if (env->thumb) {
6928                imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
6929                    & 0xff;
6930                if (imm == 0xab) {
6931                    break;
6932                }
6933            } else {
6934                imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
6935                    & 0xffffff;
6936                if (imm == 0x123456) {
6937                    break;
6938                }
6939            }
6940            return false;
6941        case EXCP_BKPT:
6942            /* See if this is a semihosting syscall.  */
6943            if (env->thumb) {
6944                imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
6945                    & 0xff;
6946                if (imm == 0xab) {
6947                    env->regs[15] += 2;
6948                    break;
6949                }
6950            }
6951            return false;
6952        default:
6953            return false;
6954        }
6955
6956        qemu_log_mask(CPU_LOG_INT,
6957                      "...handling as semihosting call 0x%x\n",
6958                      env->regs[0]);
6959        env->regs[0] = do_arm_semihosting(env);
6960        return true;
6961    }
6962}
6963
6964/* Handle a CPU exception for A and R profile CPUs.
6965 * Do any appropriate logging, handle PSCI calls, and then hand off
6966 * to the AArch64-entry or AArch32-entry function depending on the
6967 * target exception level's register width.
6968 */
6969void arm_cpu_do_interrupt(CPUState *cs)
6970{
6971    ARMCPU *cpu = ARM_CPU(cs);
6972    CPUARMState *env = &cpu->env;
6973    unsigned int new_el = env->exception.target_el;
6974
6975    assert(!arm_feature(env, ARM_FEATURE_M));
6976
6977    arm_log_exception(cs->exception_index);
6978    qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
6979                  new_el);
6980    if (qemu_loglevel_mask(CPU_LOG_INT)
6981        && !excp_is_internal(cs->exception_index)) {
6982        qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
6983                      env->exception.syndrome >> ARM_EL_EC_SHIFT,
6984                      env->exception.syndrome);
6985    }
6986
6987    if (arm_is_psci_call(cpu, cs->exception_index)) {
6988        arm_handle_psci_call(cpu);
6989        qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
6990        return;
6991    }
6992
6993    /* Semihosting semantics depend on the register width of the
6994     * code that caused the exception, not the target exception level,
6995     * so must be handled here.
6996     */
6997    if (check_for_semihosting(cs)) {
6998        return;
6999    }
7000
7001    assert(!excp_is_internal(cs->exception_index));
7002    if (arm_el_is_aa64(env, new_el)) {
7003        arm_cpu_do_interrupt_aarch64(cs);
7004    } else {
7005        arm_cpu_do_interrupt_aarch32(cs);
7006    }
7007
7008    /* Hooks may change global state so BQL should be held, also the
7009     * BQL needs to be held for any modification of
7010     * cs->interrupt_request.
7011     */
7012    g_assert(qemu_mutex_iothread_locked());
7013
7014    arm_call_el_change_hook(cpu);
7015
7016    if (!kvm_enabled()) {
7017        cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
7018    }
7019}
7020
7021/* Return the exception level which controls this address translation regime */
7022static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
7023{
7024    switch (mmu_idx) {
7025    case ARMMMUIdx_S2NS:
7026    case ARMMMUIdx_S1E2:
7027        return 2;
7028    case ARMMMUIdx_S1E3:
7029        return 3;
7030    case ARMMMUIdx_S1SE0:
7031        return arm_el_is_aa64(env, 3) ? 1 : 3;
7032    case ARMMMUIdx_S1SE1:
7033    case ARMMMUIdx_S1NSE0:
7034    case ARMMMUIdx_S1NSE1:
7035    case ARMMMUIdx_MPriv:
7036    case ARMMMUIdx_MNegPri:
7037    case ARMMMUIdx_MUser:
7038        return 1;
7039    default:
7040        g_assert_not_reached();
7041    }
7042}
7043
7044/* Return true if this address translation regime is secure */
7045static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
7046{
7047    switch (mmu_idx) {
7048    case ARMMMUIdx_S12NSE0:
7049    case ARMMMUIdx_S12NSE1:
7050    case ARMMMUIdx_S1NSE0:
7051    case ARMMMUIdx_S1NSE1:
7052    case ARMMMUIdx_S1E2:
7053    case ARMMMUIdx_S2NS:
7054    case ARMMMUIdx_MPriv:
7055    case ARMMMUIdx_MNegPri:
7056    case ARMMMUIdx_MUser:
7057        return false;
7058    case ARMMMUIdx_S1E3:
7059    case ARMMMUIdx_S1SE0:
7060    case ARMMMUIdx_S1SE1:
7061        return true;
7062    default:
7063        g_assert_not_reached();
7064    }
7065}
7066
7067/* Return the SCTLR value which controls this address translation regime */
7068static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
7069{
7070    return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
7071}
7072
7073/* Return true if the specified stage of address translation is disabled */
7074static inline bool regime_translation_disabled(CPUARMState *env,
7075                                               ARMMMUIdx mmu_idx)
7076{
7077    if (arm_feature(env, ARM_FEATURE_M)) {
7078        switch (env->v7m.mpu_ctrl &
7079                (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
7080        case R_V7M_MPU_CTRL_ENABLE_MASK:
7081            /* Enabled, but not for HardFault and NMI */
7082            return mmu_idx == ARMMMUIdx_MNegPri;
7083        case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
7084            /* Enabled for all cases */
7085            return false;
7086        case 0:
7087        default:
7088            /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
7089             * we warned about that in armv7m_nvic.c when the guest set it.
7090             */
7091            return true;
7092        }
7093    }
7094
7095    if (mmu_idx == ARMMMUIdx_S2NS) {
7096        return (env->cp15.hcr_el2 & HCR_VM) == 0;
7097    }
7098    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
7099}
7100
7101static inline bool regime_translation_big_endian(CPUARMState *env,
7102                                                 ARMMMUIdx mmu_idx)
7103{
7104    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
7105}
7106
7107/* Return the TCR controlling this translation regime */
7108static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
7109{
7110    if (mmu_idx == ARMMMUIdx_S2NS) {
7111        return &env->cp15.vtcr_el2;
7112    }
7113    return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
7114}
7115
7116/* Convert a possible stage1+2 MMU index into the appropriate
7117 * stage 1 MMU index
7118 */
7119static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
7120{
7121    if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
7122        mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
7123    }
7124    return mmu_idx;
7125}
7126
7127/* Returns TBI0 value for current regime el */
7128uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
7129{
7130    TCR *tcr;
7131    uint32_t el;
7132
7133    /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
7134     * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
7135     */
7136    mmu_idx = stage_1_mmu_idx(mmu_idx);
7137
7138    tcr = regime_tcr(env, mmu_idx);
7139    el = regime_el(env, mmu_idx);
7140
7141    if (el > 1) {
7142        return extract64(tcr->raw_tcr, 20, 1);
7143    } else {
7144        return extract64(tcr->raw_tcr, 37, 1);
7145    }
7146}
7147
7148/* Returns TBI1 value for current regime el */
7149uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
7150{
7151    TCR *tcr;
7152    uint32_t el;
7153
7154    /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
7155     * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
7156     */
7157    mmu_idx = stage_1_mmu_idx(mmu_idx);
7158
7159    tcr = regime_tcr(env, mmu_idx);
7160    el = regime_el(env, mmu_idx);
7161
7162    if (el > 1) {
7163        return 0;
7164    } else {
7165        return extract64(tcr->raw_tcr, 38, 1);
7166    }
7167}
7168
7169/* Return the TTBR associated with this translation regime */
7170static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
7171                                   int ttbrn)
7172{
7173    if (mmu_idx == ARMMMUIdx_S2NS) {
7174        return env->cp15.vttbr_el2;
7175    }
7176    if (ttbrn == 0) {
7177        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
7178    } else {
7179        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
7180    }
7181}
7182
7183/* Return true if the translation regime is using LPAE format page tables */
7184static inline bool regime_using_lpae_format(CPUARMState *env,
7185                                            ARMMMUIdx mmu_idx)
7186{
7187    int el = regime_el(env, mmu_idx);
7188    if (el == 2 || arm_el_is_aa64(env, el)) {
7189        return true;
7190    }
7191    if (arm_feature(env, ARM_FEATURE_LPAE)
7192        && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
7193        return true;
7194    }
7195    return false;
7196}
7197
7198/* Returns true if the stage 1 translation regime is using LPAE format page
7199 * tables. Used when raising alignment exceptions, whose FSR changes depending
7200 * on whether the long or short descriptor format is in use. */
7201bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
7202{
7203    mmu_idx = stage_1_mmu_idx(mmu_idx);
7204
7205    return regime_using_lpae_format(env, mmu_idx);
7206}
7207
7208static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
7209{
7210    switch (mmu_idx) {
7211    case ARMMMUIdx_S1SE0:
7212    case ARMMMUIdx_S1NSE0:
7213    case ARMMMUIdx_MUser:
7214        return true;
7215    default:
7216        return false;
7217    case ARMMMUIdx_S12NSE0:
7218    case ARMMMUIdx_S12NSE1:
7219        g_assert_not_reached();
7220    }
7221}
7222
7223/* Translate section/page access permissions to page
7224 * R/W protection flags
7225 *
7226 * @env:         CPUARMState
7227 * @mmu_idx:     MMU index indicating required translation regime
7228 * @ap:          The 3-bit access permissions (AP[2:0])
7229 * @domain_prot: The 2-bit domain access permissions
7230 */
7231static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
7232                                int ap, int domain_prot)
7233{
7234    bool is_user = regime_is_user(env, mmu_idx);
7235
7236    if (domain_prot == 3) {
7237        return PAGE_READ | PAGE_WRITE;
7238    }
7239
7240    switch (ap) {
7241    case 0:
7242        if (arm_feature(env, ARM_FEATURE_V7)) {
7243            return 0;
7244        }
7245        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
7246        case SCTLR_S:
7247            return is_user ? 0 : PAGE_READ;
7248        case SCTLR_R:
7249            return PAGE_READ;
7250        default:
7251            return 0;
7252        }
7253    case 1:
7254        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
7255    case 2:
7256        if (is_user) {
7257            return PAGE_READ;
7258        } else {
7259            return PAGE_READ | PAGE_WRITE;
7260        }
7261    case 3:
7262        return PAGE_READ | PAGE_WRITE;
7263    case 4: /* Reserved.  */
7264        return 0;
7265    case 5:
7266        return is_user ? 0 : PAGE_READ;
7267    case 6:
7268        return PAGE_READ;
7269    case 7:
7270        if (!arm_feature(env, ARM_FEATURE_V6K)) {
7271            return 0;
7272        }
7273        return PAGE_READ;
7274    default:
7275        g_assert_not_reached();
7276    }
7277}
7278
7279/* Translate section/page access permissions to page
7280 * R/W protection flags.
7281 *
7282 * @ap:      The 2-bit simple AP (AP[2:1])
7283 * @is_user: TRUE if accessing from PL0
7284 */
7285static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
7286{
7287    switch (ap) {
7288    case 0:
7289        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
7290    case 1:
7291        return PAGE_READ | PAGE_WRITE;
7292    case 2:
7293        return is_user ? 0 : PAGE_READ;
7294    case 3:
7295        return PAGE_READ;
7296    default:
7297        g_assert_not_reached();
7298    }
7299}
7300
7301static inline int
7302simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
7303{
7304    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
7305}
7306
7307/* Translate S2 section/page access permissions to protection flags
7308 *
7309 * @env:     CPUARMState
7310 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
7311 * @xn:      XN (execute-never) bit
7312 */
7313static int get_S2prot(CPUARMState *env, int s2ap, int xn)
7314{
7315    int prot = 0;
7316
7317    if (s2ap & 1) {
7318        prot |= PAGE_READ;
7319    }
7320    if (s2ap & 2) {
7321        prot |= PAGE_WRITE;
7322    }
7323    if (!xn) {
7324        if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
7325            prot |= PAGE_EXEC;
7326        }
7327    }
7328    return prot;
7329}
7330
7331/* Translate section/page access permissions to protection flags
7332 *
7333 * @env:     CPUARMState
7334 * @mmu_idx: MMU index indicating required translation regime
7335 * @is_aa64: TRUE if AArch64
7336 * @ap:      The 2-bit simple AP (AP[2:1])
7337 * @ns:      NS (non-secure) bit
7338 * @xn:      XN (execute-never) bit
7339 * @pxn:     PXN (privileged execute-never) bit
7340 */
7341static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
7342                      int ap, int ns, int xn, int pxn)
7343{
7344    bool is_user = regime_is_user(env, mmu_idx);
7345    int prot_rw, user_rw;
7346    bool have_wxn;
7347    int wxn = 0;
7348
7349    assert(mmu_idx != ARMMMUIdx_S2NS);
7350
7351    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
7352    if (is_user) {
7353        prot_rw = user_rw;
7354    } else {
7355        prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
7356    }
7357
7358    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
7359        return prot_rw;
7360    }
7361
7362    /* TODO have_wxn should be replaced with
7363     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
7364     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
7365     * compatible processors have EL2, which is required for [U]WXN.
7366     */
7367    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
7368
7369    if (have_wxn) {
7370        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
7371    }
7372
7373    if (is_aa64) {
7374        switch (regime_el(env, mmu_idx)) {
7375        case 1:
7376            if (!is_user) {
7377                xn = pxn || (user_rw & PAGE_WRITE);
7378            }
7379            break;
7380        case 2:
7381        case 3:
7382            break;
7383        }
7384    } else if (arm_feature(env, ARM_FEATURE_V7)) {
7385        switch (regime_el(env, mmu_idx)) {
7386        case 1:
7387        case 3:
7388            if (is_user) {
7389                xn = xn || !(user_rw & PAGE_READ);
7390            } else {
7391                int uwxn = 0;
7392                if (have_wxn) {
7393                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
7394                }
7395                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
7396                     (uwxn && (user_rw & PAGE_WRITE));
7397            }
7398            break;
7399        case 2:
7400            break;
7401        }
7402    } else {
7403        xn = wxn = 0;
7404    }
7405
7406    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
7407        return prot_rw;
7408    }
7409    return prot_rw | PAGE_EXEC;
7410}
7411
7412static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
7413                                     uint32_t *table, uint32_t address)
7414{
7415    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
7416    TCR *tcr = regime_tcr(env, mmu_idx);
7417
7418    if (address & tcr->mask) {
7419        if (tcr->raw_tcr & TTBCR_PD1) {
7420            /* Translation table walk disabled for TTBR1 */
7421            return false;
7422        }
7423        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
7424    } else {
7425        if (tcr->raw_tcr & TTBCR_PD0) {
7426            /* Translation table walk disabled for TTBR0 */
7427            return false;
7428        }
7429        *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
7430    }
7431    *table |= (address >> 18) & 0x3ffc;
7432    return true;
7433}
7434
7435/* Translate a S1 pagetable walk through S2 if needed.  */
7436static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
7437                               hwaddr addr, MemTxAttrs txattrs,
7438                               uint32_t *fsr,
7439                               ARMMMUFaultInfo *fi)
7440{
7441    if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
7442        !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
7443        target_ulong s2size;
7444        hwaddr s2pa;
7445        int s2prot;
7446        int ret;
7447
7448        ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
7449                                 &txattrs, &s2prot, &s2size, fsr, fi);
7450        if (ret) {
7451            fi->s2addr = addr;
7452            fi->stage2 = true;
7453            fi->s1ptw = true;
7454            return ~0;
7455        }
7456        addr = s2pa;
7457    }
7458    return addr;
7459}
7460
7461/* All loads done in the course of a page table walk go through here.
7462 * TODO: rather than ignoring errors from physical memory reads (which
7463 * are external aborts in ARM terminology) we should propagate this
7464 * error out so that we can turn it into a Data Abort if this walk
7465 * was being done for a CPU load/store or an address translation instruction
7466 * (but not if it was for a debug access).
7467 */
7468static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7469                            ARMMMUIdx mmu_idx, uint32_t *fsr,
7470                            ARMMMUFaultInfo *fi)
7471{
7472    ARMCPU *cpu = ARM_CPU(cs);
7473    CPUARMState *env = &cpu->env;
7474    MemTxAttrs attrs = {};
7475    AddressSpace *as;
7476
7477    attrs.secure = is_secure;
7478    as = arm_addressspace(cs, attrs);
7479    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7480    if (fi->s1ptw) {
7481        return 0;
7482    }
7483    if (regime_translation_big_endian(env, mmu_idx)) {
7484        return address_space_ldl_be(as, addr, attrs, NULL);
7485    } else {
7486        return address_space_ldl_le(as, addr, attrs, NULL);
7487    }
7488}
7489
7490static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
7491                            ARMMMUIdx mmu_idx, uint32_t *fsr,
7492                            ARMMMUFaultInfo *fi)
7493{
7494    ARMCPU *cpu = ARM_CPU(cs);
7495    CPUARMState *env = &cpu->env;
7496    MemTxAttrs attrs = {};
7497    AddressSpace *as;
7498
7499    attrs.secure = is_secure;
7500    as = arm_addressspace(cs, attrs);
7501    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
7502    if (fi->s1ptw) {
7503        return 0;
7504    }
7505    if (regime_translation_big_endian(env, mmu_idx)) {
7506        return address_space_ldq_be(as, addr, attrs, NULL);
7507    } else {
7508        return address_space_ldq_le(as, addr, attrs, NULL);
7509    }
7510}
7511
7512static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
7513                             int access_type, ARMMMUIdx mmu_idx,
7514                             hwaddr *phys_ptr, int *prot,
7515                             target_ulong *page_size, uint32_t *fsr,
7516                             ARMMMUFaultInfo *fi)
7517{
7518    CPUState *cs = CPU(arm_env_get_cpu(env));
7519    int code;
7520    uint32_t table;
7521    uint32_t desc;
7522    int type;
7523    int ap;
7524    int domain = 0;
7525    int domain_prot;
7526    hwaddr phys_addr;
7527    uint32_t dacr;
7528
7529    /* Pagetable walk.  */
7530    /* Lookup l1 descriptor.  */
7531    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7532        /* Section translation fault if page walk is disabled by PD0 or PD1 */
7533        code = 5;
7534        goto do_fault;
7535    }
7536    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7537                       mmu_idx, fsr, fi);
7538    type = (desc & 3);
7539    domain = (desc >> 5) & 0x0f;
7540    if (regime_el(env, mmu_idx) == 1) {
7541        dacr = env->cp15.dacr_ns;
7542    } else {
7543        dacr = env->cp15.dacr_s;
7544    }
7545    domain_prot = (dacr >> (domain * 2)) & 3;
7546    if (type == 0) {
7547        /* Section translation fault.  */
7548        code = 5;
7549        goto do_fault;
7550    }
7551    if (domain_prot == 0 || domain_prot == 2) {
7552        if (type == 2)
7553            code = 9; /* Section domain fault.  */
7554        else
7555            code = 11; /* Page domain fault.  */
7556        goto do_fault;
7557    }
7558    if (type == 2) {
7559        /* 1Mb section.  */
7560        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7561        ap = (desc >> 10) & 3;
7562        code = 13;
7563        *page_size = 1024 * 1024;
7564    } else {
7565        /* Lookup l2 entry.  */
7566        if (type == 1) {
7567            /* Coarse pagetable.  */
7568            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7569        } else {
7570            /* Fine pagetable.  */
7571            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
7572        }
7573        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7574                           mmu_idx, fsr, fi);
7575        switch (desc & 3) {
7576        case 0: /* Page translation fault.  */
7577            code = 7;
7578            goto do_fault;
7579        case 1: /* 64k page.  */
7580            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7581            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
7582            *page_size = 0x10000;
7583            break;
7584        case 2: /* 4k page.  */
7585            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7586            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
7587            *page_size = 0x1000;
7588            break;
7589        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
7590            if (type == 1) {
7591                /* ARMv6/XScale extended small page format */
7592                if (arm_feature(env, ARM_FEATURE_XSCALE)
7593                    || arm_feature(env, ARM_FEATURE_V6)) {
7594                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7595                    *page_size = 0x1000;
7596                } else {
7597                    /* UNPREDICTABLE in ARMv5; we choose to take a
7598                     * page translation fault.
7599                     */
7600                    code = 7;
7601                    goto do_fault;
7602                }
7603            } else {
7604                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
7605                *page_size = 0x400;
7606            }
7607            ap = (desc >> 4) & 3;
7608            break;
7609        default:
7610            /* Never happens, but compiler isn't smart enough to tell.  */
7611            abort();
7612        }
7613        code = 15;
7614    }
7615    *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7616    *prot |= *prot ? PAGE_EXEC : 0;
7617    if (!(*prot & (1 << access_type))) {
7618        /* Access permission fault.  */
7619        goto do_fault;
7620    }
7621    *phys_ptr = phys_addr;
7622    return false;
7623do_fault:
7624    *fsr = code | (domain << 4);
7625    return true;
7626}
7627
7628static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
7629                             int access_type, ARMMMUIdx mmu_idx,
7630                             hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
7631                             target_ulong *page_size, uint32_t *fsr,
7632                             ARMMMUFaultInfo *fi)
7633{
7634    CPUState *cs = CPU(arm_env_get_cpu(env));
7635    int code;
7636    uint32_t table;
7637    uint32_t desc;
7638    uint32_t xn;
7639    uint32_t pxn = 0;
7640    int type;
7641    int ap;
7642    int domain = 0;
7643    int domain_prot;
7644    hwaddr phys_addr;
7645    uint32_t dacr;
7646    bool ns;
7647
7648    /* Pagetable walk.  */
7649    /* Lookup l1 descriptor.  */
7650    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
7651        /* Section translation fault if page walk is disabled by PD0 or PD1 */
7652        code = 5;
7653        goto do_fault;
7654    }
7655    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7656                       mmu_idx, fsr, fi);
7657    type = (desc & 3);
7658    if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
7659        /* Section translation fault, or attempt to use the encoding
7660         * which is Reserved on implementations without PXN.
7661         */
7662        code = 5;
7663        goto do_fault;
7664    }
7665    if ((type == 1) || !(desc & (1 << 18))) {
7666        /* Page or Section.  */
7667        domain = (desc >> 5) & 0x0f;
7668    }
7669    if (regime_el(env, mmu_idx) == 1) {
7670        dacr = env->cp15.dacr_ns;
7671    } else {
7672        dacr = env->cp15.dacr_s;
7673    }
7674    domain_prot = (dacr >> (domain * 2)) & 3;
7675    if (domain_prot == 0 || domain_prot == 2) {
7676        if (type != 1) {
7677            code = 9; /* Section domain fault.  */
7678        } else {
7679            code = 11; /* Page domain fault.  */
7680        }
7681        goto do_fault;
7682    }
7683    if (type != 1) {
7684        if (desc & (1 << 18)) {
7685            /* Supersection.  */
7686            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
7687            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
7688            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
7689            *page_size = 0x1000000;
7690        } else {
7691            /* Section.  */
7692            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
7693            *page_size = 0x100000;
7694        }
7695        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
7696        xn = desc & (1 << 4);
7697        pxn = desc & 1;
7698        code = 13;
7699        ns = extract32(desc, 19, 1);
7700    } else {
7701        if (arm_feature(env, ARM_FEATURE_PXN)) {
7702            pxn = (desc >> 2) & 1;
7703        }
7704        ns = extract32(desc, 3, 1);
7705        /* Lookup l2 entry.  */
7706        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
7707        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
7708                           mmu_idx, fsr, fi);
7709        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
7710        switch (desc & 3) {
7711        case 0: /* Page translation fault.  */
7712            code = 7;
7713            goto do_fault;
7714        case 1: /* 64k page.  */
7715            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
7716            xn = desc & (1 << 15);
7717            *page_size = 0x10000;
7718            break;
7719        case 2: case 3: /* 4k page.  */
7720            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
7721            xn = desc & 1;
7722            *page_size = 0x1000;
7723            break;
7724        default:
7725            /* Never happens, but compiler isn't smart enough to tell.  */
7726            abort();
7727        }
7728        code = 15;
7729    }
7730    if (domain_prot == 3) {
7731        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
7732    } else {
7733        if (pxn && !regime_is_user(env, mmu_idx)) {
7734            xn = 1;
7735        }
7736        if (xn && access_type == 2)
7737            goto do_fault;
7738
7739        if (arm_feature(env, ARM_FEATURE_V6K) &&
7740                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
7741            /* The simplified model uses AP[0] as an access control bit.  */
7742            if ((ap & 1) == 0) {
7743                /* Access flag fault.  */
7744                code = (code == 15) ? 6 : 3;
7745                goto do_fault;
7746            }
7747            *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
7748        } else {
7749            *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
7750        }
7751        if (*prot && !xn) {
7752            *prot |= PAGE_EXEC;
7753        }
7754        if (!(*prot & (1 << access_type))) {
7755            /* Access permission fault.  */
7756            goto do_fault;
7757        }
7758    }
7759    if (ns) {
7760        /* The NS bit will (as required by the architecture) have no effect if
7761         * the CPU doesn't support TZ or this is a non-secure translation
7762         * regime, because the attribute will already be non-secure.
7763         */
7764        attrs->secure = false;
7765    }
7766    *phys_ptr = phys_addr;
7767    return false;
7768do_fault:
7769    *fsr = code | (domain << 4);
7770    return true;
7771}
7772
7773/* Fault type for long-descriptor MMU fault reporting; this corresponds
7774 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
7775 */
7776typedef enum {
7777    translation_fault = 1,
7778    access_fault = 2,
7779    permission_fault = 3,
7780} MMUFaultType;
7781
7782/*
7783 * check_s2_mmu_setup
7784 * @cpu:        ARMCPU
7785 * @is_aa64:    True if the translation regime is in AArch64 state
7786 * @startlevel: Suggested starting level
7787 * @inputsize:  Bitsize of IPAs
7788 * @stride:     Page-table stride (See the ARM ARM)
7789 *
7790 * Returns true if the suggested S2 translation parameters are OK and
7791 * false otherwise.
7792 */
7793static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
7794                               int inputsize, int stride)
7795{
7796    const int grainsize = stride + 3;
7797    int startsizecheck;
7798
7799    /* Negative levels are never allowed.  */
7800    if (level < 0) {
7801        return false;
7802    }
7803
7804    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
7805    if (startsizecheck < 1 || startsizecheck > stride + 4) {
7806        return false;
7807    }
7808
7809    if (is_aa64) {
7810        CPUARMState *env = &cpu->env;
7811        unsigned int pamax = arm_pamax(cpu);
7812
7813        switch (stride) {
7814        case 13: /* 64KB Pages.  */
7815            if (level == 0 || (level == 1 && pamax <= 42)) {
7816                return false;
7817            }
7818            break;
7819        case 11: /* 16KB Pages.  */
7820            if (level == 0 || (level == 1 && pamax <= 40)) {
7821                return false;
7822            }
7823            break;
7824        case 9: /* 4KB Pages.  */
7825            if (level == 0 && pamax <= 42) {
7826                return false;
7827            }
7828            break;
7829        default:
7830            g_assert_not_reached();
7831        }
7832
7833        /* Inputsize checks.  */
7834        if (inputsize > pamax &&
7835            (arm_el_is_aa64(env, 1) || inputsize > 40)) {
7836            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
7837            return false;
7838        }
7839    } else {
7840        /* AArch32 only supports 4KB pages. Assert on that.  */
7841        assert(stride == 9);
7842
7843        if (level == 0) {
7844            return false;
7845        }
7846    }
7847    return true;
7848}
7849
7850static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
7851                               int access_type, ARMMMUIdx mmu_idx,
7852                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
7853                               target_ulong *page_size_ptr, uint32_t *fsr,
7854                               ARMMMUFaultInfo *fi)
7855{
7856    ARMCPU *cpu = arm_env_get_cpu(env);
7857    CPUState *cs = CPU(cpu);
7858    /* Read an LPAE long-descriptor translation table. */
7859    MMUFaultType fault_type = translation_fault;
7860    uint32_t level;
7861    uint32_t epd = 0;
7862    int32_t t0sz, t1sz;
7863    uint32_t tg;
7864    uint64_t ttbr;
7865    int ttbr_select;
7866    hwaddr descaddr, indexmask, indexmask_grainsize;
7867    uint32_t tableattrs;
7868    target_ulong page_size;
7869    uint32_t attrs;
7870    int32_t stride = 9;
7871    int32_t addrsize;
7872    int inputsize;
7873    int32_t tbi = 0;
7874    TCR *tcr = regime_tcr(env, mmu_idx);
7875    int ap, ns, xn, pxn;
7876    uint32_t el = regime_el(env, mmu_idx);
7877    bool ttbr1_valid = true;
7878    uint64_t descaddrmask;
7879    bool aarch64 = arm_el_is_aa64(env, el);
7880
7881    /* TODO:
7882     * This code does not handle the different format TCR for VTCR_EL2.
7883     * This code also does not support shareability levels.
7884     * Attribute and permission bit handling should also be checked when adding
7885     * support for those page table walks.
7886     */
7887    if (aarch64) {
7888        level = 0;
7889        addrsize = 64;
7890        if (el > 1) {
7891            if (mmu_idx != ARMMMUIdx_S2NS) {
7892                tbi = extract64(tcr->raw_tcr, 20, 1);
7893            }
7894        } else {
7895            if (extract64(address, 55, 1)) {
7896                tbi = extract64(tcr->raw_tcr, 38, 1);
7897            } else {
7898                tbi = extract64(tcr->raw_tcr, 37, 1);
7899            }
7900        }
7901        tbi *= 8;
7902
7903        /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
7904         * invalid.
7905         */
7906        if (el > 1) {
7907            ttbr1_valid = false;
7908        }
7909    } else {
7910        level = 1;
7911        addrsize = 32;
7912        /* There is no TTBR1 for EL2 */
7913        if (el == 2) {
7914            ttbr1_valid = false;
7915        }
7916    }
7917
7918    /* Determine whether this address is in the region controlled by
7919     * TTBR0 or TTBR1 (or if it is in neither region and should fault).
7920     * This is a Non-secure PL0/1 stage 1 translation, so controlled by
7921     * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
7922     */
7923    if (aarch64) {
7924        /* AArch64 translation.  */
7925        t0sz = extract32(tcr->raw_tcr, 0, 6);
7926        t0sz = MIN(t0sz, 39);
7927        t0sz = MAX(t0sz, 16);
7928    } else if (mmu_idx != ARMMMUIdx_S2NS) {
7929        /* AArch32 stage 1 translation.  */
7930        t0sz = extract32(tcr->raw_tcr, 0, 3);
7931    } else {
7932        /* AArch32 stage 2 translation.  */
7933        bool sext = extract32(tcr->raw_tcr, 4, 1);
7934        bool sign = extract32(tcr->raw_tcr, 3, 1);
7935        /* Address size is 40-bit for a stage 2 translation,
7936         * and t0sz can be negative (from -8 to 7),
7937         * so we need to adjust it to use the TTBR selecting logic below.
7938         */
7939        addrsize = 40;
7940        t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
7941
7942        /* If the sign-extend bit is not the same as t0sz[3], the result
7943         * is unpredictable. Flag this as a guest error.  */
7944        if (sign != sext) {
7945            qemu_log_mask(LOG_GUEST_ERROR,
7946                          "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
7947        }
7948    }
7949    t1sz = extract32(tcr->raw_tcr, 16, 6);
7950    if (aarch64) {
7951        t1sz = MIN(t1sz, 39);
7952        t1sz = MAX(t1sz, 16);
7953    }
7954    if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
7955        /* there is a ttbr0 region and we are in it (high bits all zero) */
7956        ttbr_select = 0;
7957    } else if (ttbr1_valid && t1sz &&
7958               !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
7959        /* there is a ttbr1 region and we are in it (high bits all one) */
7960        ttbr_select = 1;
7961    } else if (!t0sz) {
7962        /* ttbr0 region is "everything not in the ttbr1 region" */
7963        ttbr_select = 0;
7964    } else if (!t1sz && ttbr1_valid) {
7965        /* ttbr1 region is "everything not in the ttbr0 region" */
7966        ttbr_select = 1;
7967    } else {
7968        /* in the gap between the two regions, this is a Translation fault */
7969        fault_type = translation_fault;
7970        goto do_fault;
7971    }
7972
7973    /* Note that QEMU ignores shareability and cacheability attributes,
7974     * so we don't need to do anything with the SH, ORGN, IRGN fields
7975     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
7976     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
7977     * implement any ASID-like capability so we can ignore it (instead
7978     * we will always flush the TLB any time the ASID is changed).
7979     */
7980    if (ttbr_select == 0) {
7981        ttbr = regime_ttbr(env, mmu_idx, 0);
7982        if (el < 2) {
7983            epd = extract32(tcr->raw_tcr, 7, 1);
7984        }
7985        inputsize = addrsize - t0sz;
7986
7987        tg = extract32(tcr->raw_tcr, 14, 2);
7988        if (tg == 1) { /* 64KB pages */
7989            stride = 13;
7990        }
7991        if (tg == 2) { /* 16KB pages */
7992            stride = 11;
7993        }
7994    } else {
7995        /* We should only be here if TTBR1 is valid */
7996        assert(ttbr1_valid);
7997
7998        ttbr = regime_ttbr(env, mmu_idx, 1);
7999        epd = extract32(tcr->raw_tcr, 23, 1);
8000        inputsize = addrsize - t1sz;
8001
8002        tg = extract32(tcr->raw_tcr, 30, 2);
8003        if (tg == 3)  { /* 64KB pages */
8004            stride = 13;
8005        }
8006        if (tg == 1) { /* 16KB pages */
8007            stride = 11;
8008        }
8009    }
8010
8011    /* Here we should have set up all the parameters for the translation:
8012     * inputsize, ttbr, epd, stride, tbi
8013     */
8014
8015    if (epd) {
8016        /* Translation table walk disabled => Translation fault on TLB miss
8017         * Note: This is always 0 on 64-bit EL2 and EL3.
8018         */
8019        goto do_fault;
8020    }
8021
8022    if (mmu_idx != ARMMMUIdx_S2NS) {
8023        /* The starting level depends on the virtual address size (which can
8024         * be up to 48 bits) and the translation granule size. It indicates
8025         * the number of strides (stride bits at a time) needed to
8026         * consume the bits of the input address. In the pseudocode this is:
8027         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
8028         * where their 'inputsize' is our 'inputsize', 'grainsize' is
8029         * our 'stride + 3' and 'stride' is our 'stride'.
8030         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
8031         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
8032         * = 4 - (inputsize - 4) / stride;
8033         */
8034        level = 4 - (inputsize - 4) / stride;
8035    } else {
8036        /* For stage 2 translations the starting level is specified by the
8037         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
8038         */
8039        uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
8040        uint32_t startlevel;
8041        bool ok;
8042
8043        if (!aarch64 || stride == 9) {
8044            /* AArch32 or 4KB pages */
8045            startlevel = 2 - sl0;
8046        } else {
8047            /* 16KB or 64KB pages */
8048            startlevel = 3 - sl0;
8049        }
8050
8051        /* Check that the starting level is valid. */
8052        ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
8053                                inputsize, stride);
8054        if (!ok) {
8055            fault_type = translation_fault;
8056            goto do_fault;
8057        }
8058        level = startlevel;
8059    }
8060
8061    indexmask_grainsize = (1ULL << (stride + 3)) - 1;
8062    indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
8063
8064    /* Now we can extract the actual base address from the TTBR */
8065    descaddr = extract64(ttbr, 0, 48);
8066    descaddr &= ~indexmask;
8067
8068    /* The address field in the descriptor goes up to bit 39 for ARMv7
8069     * but up to bit 47 for ARMv8, but we use the descaddrmask
8070     * up to bit 39 for AArch32, because we don't need other bits in that case
8071     * to construct next descriptor address (anyway they should be all zeroes).
8072     */
8073    descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
8074                   ~indexmask_grainsize;
8075
8076    /* Secure accesses start with the page table in secure memory and
8077     * can be downgraded to non-secure at any step. Non-secure accesses
8078     * remain non-secure. We implement this by just ORing in the NSTable/NS
8079     * bits at each step.
8080     */
8081    tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
8082    for (;;) {
8083        uint64_t descriptor;
8084        bool nstable;
8085
8086        descaddr |= (address >> (stride * (4 - level))) & indexmask;
8087        descaddr &= ~7ULL;
8088        nstable = extract32(tableattrs, 4, 1);
8089        descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
8090        if (fi->s1ptw) {
8091            goto do_fault;
8092        }
8093
8094        if (!(descriptor & 1) ||
8095            (!(descriptor & 2) && (level == 3))) {
8096            /* Invalid, or the Reserved level 3 encoding */
8097            goto do_fault;
8098        }
8099        descaddr = descriptor & descaddrmask;
8100
8101        if ((descriptor & 2) && (level < 3)) {
8102            /* Table entry. The top five bits are attributes which  may
8103             * propagate down through lower levels of the table (and
8104             * which are all arranged so that 0 means "no effect", so
8105             * we can gather them up by ORing in the bits at each level).
8106             */
8107            tableattrs |= extract64(descriptor, 59, 5);
8108            level++;
8109            indexmask = indexmask_grainsize;
8110            continue;
8111        }
8112        /* Block entry at level 1 or 2, or page entry at level 3.
8113         * These are basically the same thing, although the number
8114         * of bits we pull in from the vaddr varies.
8115         */
8116        page_size = (1ULL << ((stride * (4 - level)) + 3));
8117        descaddr |= (address & (page_size - 1));
8118        /* Extract attributes from the descriptor */
8119        attrs = extract64(descriptor, 2, 10)
8120            | (extract64(descriptor, 52, 12) << 10);
8121
8122        if (mmu_idx == ARMMMUIdx_S2NS) {
8123            /* Stage 2 table descriptors do not include any attribute fields */
8124            break;
8125        }
8126        /* Merge in attributes from table descriptors */
8127        attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
8128        attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
8129        /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
8130         * means "force PL1 access only", which means forcing AP[1] to 0.
8131         */
8132        if (extract32(tableattrs, 2, 1)) {
8133            attrs &= ~(1 << 4);
8134        }
8135        attrs |= nstable << 3; /* NS */
8136        break;
8137    }
8138    /* Here descaddr is the final physical address, and attributes
8139     * are all in attrs.
8140     */
8141    fault_type = access_fault;
8142    if ((attrs & (1 << 8)) == 0) {
8143        /* Access flag */
8144        goto do_fault;
8145    }
8146
8147    ap = extract32(attrs, 4, 2);
8148    xn = extract32(attrs, 12, 1);
8149
8150    if (mmu_idx == ARMMMUIdx_S2NS) {
8151        ns = true;
8152        *prot = get_S2prot(env, ap, xn);
8153    } else {
8154        ns = extract32(attrs, 3, 1);
8155        pxn = extract32(attrs, 11, 1);
8156        *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
8157    }
8158
8159    fault_type = permission_fault;
8160    if (!(*prot & (1 << access_type))) {
8161        goto do_fault;
8162    }
8163
8164    if (ns) {
8165        /* The NS bit will (as required by the architecture) have no effect if
8166         * the CPU doesn't support TZ or this is a non-secure translation
8167         * regime, because the attribute will already be non-secure.
8168         */
8169        txattrs->secure = false;
8170    }
8171    *phys_ptr = descaddr;
8172    *page_size_ptr = page_size;
8173    return false;
8174
8175do_fault:
8176    /* Long-descriptor format IFSR/DFSR value */
8177    *fsr = (1 << 9) | (fault_type << 2) | level;
8178    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
8179    fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
8180    return true;
8181}
8182
8183static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
8184                                                ARMMMUIdx mmu_idx,
8185                                                int32_t address, int *prot)
8186{
8187    if (!arm_feature(env, ARM_FEATURE_M)) {
8188        *prot = PAGE_READ | PAGE_WRITE;
8189        switch (address) {
8190        case 0xF0000000 ... 0xFFFFFFFF:
8191            if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
8192                /* hivecs execing is ok */
8193                *prot |= PAGE_EXEC;
8194            }
8195            break;
8196        case 0x00000000 ... 0x7FFFFFFF:
8197            *prot |= PAGE_EXEC;
8198            break;
8199        }
8200    } else {
8201        /* Default system address map for M profile cores.
8202         * The architecture specifies which regions are execute-never;
8203         * at the MPU level no other checks are defined.
8204         */
8205        switch (address) {
8206        case 0x00000000 ... 0x1fffffff: /* ROM */
8207        case 0x20000000 ... 0x3fffffff: /* SRAM */
8208        case 0x60000000 ... 0x7fffffff: /* RAM */
8209        case 0x80000000 ... 0x9fffffff: /* RAM */
8210            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
8211            break;
8212        case 0x40000000 ... 0x5fffffff: /* Peripheral */
8213        case 0xa0000000 ... 0xbfffffff: /* Device */
8214        case 0xc0000000 ... 0xdfffffff: /* Device */
8215        case 0xe0000000 ... 0xffffffff: /* System */
8216            *prot = PAGE_READ | PAGE_WRITE;
8217            break;
8218        default:
8219            g_assert_not_reached();
8220        }
8221    }
8222}
8223
8224static bool pmsav7_use_background_region(ARMCPU *cpu,
8225                                         ARMMMUIdx mmu_idx, bool is_user)
8226{
8227    /* Return true if we should use the default memory map as a
8228     * "background" region if there are no hits against any MPU regions.
8229     */
8230    CPUARMState *env = &cpu->env;
8231
8232    if (is_user) {
8233        return false;
8234    }
8235
8236    if (arm_feature(env, ARM_FEATURE_M)) {
8237        return env->v7m.mpu_ctrl & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
8238    } else {
8239        return regime_sctlr(env, mmu_idx) & SCTLR_BR;
8240    }
8241}
8242
8243static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
8244{
8245    /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
8246    return arm_feature(env, ARM_FEATURE_M) &&
8247        extract32(address, 20, 12) == 0xe00;
8248}
8249
8250static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
8251{
8252    /* True if address is in the M profile system region
8253     * 0xe0000000 - 0xffffffff
8254     */
8255    return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
8256}
8257
8258static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
8259                                 int access_type, ARMMMUIdx mmu_idx,
8260                                 hwaddr *phys_ptr, int *prot, uint32_t *fsr)
8261{
8262    ARMCPU *cpu = arm_env_get_cpu(env);
8263    int n;
8264    bool is_user = regime_is_user(env, mmu_idx);
8265
8266    *phys_ptr = address;
8267    *prot = 0;
8268
8269    if (regime_translation_disabled(env, mmu_idx) ||
8270        m_is_ppb_region(env, address)) {
8271        /* MPU disabled or M profile PPB access: use default memory map.
8272         * The other case which uses the default memory map in the
8273         * v7M ARM ARM pseudocode is exception vector reads from the vector
8274         * table. In QEMU those accesses are done in arm_v7m_load_vector(),
8275         * which always does a direct read using address_space_ldl(), rather
8276         * than going via this function, so we don't need to check that here.
8277         */
8278        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
8279    } else { /* MPU enabled */
8280        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
8281            /* region search */
8282            uint32_t base = env->pmsav7.drbar[n];
8283            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
8284            uint32_t rmask;
8285            bool srdis = false;
8286
8287            if (!(env->pmsav7.drsr[n] & 0x1)) {
8288                continue;
8289            }
8290
8291            if (!rsize) {
8292                qemu_log_mask(LOG_GUEST_ERROR,
8293                              "DRSR[%d]: Rsize field cannot be 0\n", n);
8294                continue;
8295            }
8296            rsize++;
8297            rmask = (1ull << rsize) - 1;
8298
8299            if (base & rmask) {
8300                qemu_log_mask(LOG_GUEST_ERROR,
8301                              "DRBAR[%d]: 0x%" PRIx32 " misaligned "
8302                              "to DRSR region size, mask = 0x%" PRIx32 "\n",
8303                              n, base, rmask);
8304                continue;
8305            }
8306
8307            if (address < base || address > base + rmask) {
8308                continue;
8309            }
8310
8311            /* Region matched */
8312
8313            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
8314                int i, snd;
8315                uint32_t srdis_mask;
8316
8317                rsize -= 3; /* sub region size (power of 2) */
8318                snd = ((address - base) >> rsize) & 0x7;
8319                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
8320
8321                srdis_mask = srdis ? 0x3 : 0x0;
8322                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
8323                    /* This will check in groups of 2, 4 and then 8, whether
8324                     * the subregion bits are consistent. rsize is incremented
8325                     * back up to give the region size, considering consistent
8326                     * adjacent subregions as one region. Stop testing if rsize
8327                     * is already big enough for an entire QEMU page.
8328                     */
8329                    int snd_rounded = snd & ~(i - 1);
8330                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
8331                                                     snd_rounded + 8, i);
8332                    if (srdis_mask ^ srdis_multi) {
8333                        break;
8334                    }
8335                    srdis_mask = (srdis_mask << i) | srdis_mask;
8336                    rsize++;
8337                }
8338            }
8339            if (rsize < TARGET_PAGE_BITS) {
8340                qemu_log_mask(LOG_UNIMP,
8341                              "DRSR[%d]: No support for MPU (sub)region "
8342                              "alignment of %" PRIu32 " bits. Minimum is %d\n",
8343                              n, rsize, TARGET_PAGE_BITS);
8344                continue;
8345            }
8346            if (srdis) {
8347                continue;
8348            }
8349            break;
8350        }
8351
8352        if (n == -1) { /* no hits */
8353            if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
8354                /* background fault */
8355                *fsr = 0;
8356                return true;
8357            }
8358            get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
8359        } else { /* a MPU hit! */
8360            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
8361            uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
8362
8363            if (m_is_system_region(env, address)) {
8364                /* System space is always execute never */
8365                xn = 1;
8366            }
8367
8368            if (is_user) { /* User mode AP bit decoding */
8369                switch (ap) {
8370                case 0:
8371                case 1:
8372                case 5:
8373                    break; /* no access */
8374                case 3:
8375                    *prot |= PAGE_WRITE;
8376                    /* fall through */
8377                case 2:
8378                case 6:
8379                    *prot |= PAGE_READ | PAGE_EXEC;
8380                    break;
8381                default:
8382                    qemu_log_mask(LOG_GUEST_ERROR,
8383                                  "DRACR[%d]: Bad value for AP bits: 0x%"
8384                                  PRIx32 "\n", n, ap);
8385                }
8386            } else { /* Priv. mode AP bits decoding */
8387                switch (ap) {
8388                case 0:
8389                    break; /* no access */
8390                case 1:
8391                case 2:
8392                case 3:
8393                    *prot |= PAGE_WRITE;
8394                    /* fall through */
8395                case 5:
8396                case 6:
8397                    *prot |= PAGE_READ | PAGE_EXEC;
8398                    break;
8399                default:
8400                    qemu_log_mask(LOG_GUEST_ERROR,
8401                                  "DRACR[%d]: Bad value for AP bits: 0x%"
8402                                  PRIx32 "\n", n, ap);
8403                }
8404            }
8405
8406            /* execute never */
8407            if (xn) {
8408                *prot &= ~PAGE_EXEC;
8409            }
8410        }
8411    }
8412
8413    *fsr = 0x00d; /* Permission fault */
8414    return !(*prot & (1 << access_type));
8415}
8416
8417static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
8418                                 int access_type, ARMMMUIdx mmu_idx,
8419                                 hwaddr *phys_ptr, int *prot, uint32_t *fsr)
8420{
8421    int n;
8422    uint32_t mask;
8423    uint32_t base;
8424    bool is_user = regime_is_user(env, mmu_idx);
8425
8426    *phys_ptr = address;
8427    for (n = 7; n >= 0; n--) {
8428        base = env->cp15.c6_region[n];
8429        if ((base & 1) == 0) {
8430            continue;
8431        }
8432        mask = 1 << ((base >> 1) & 0x1f);
8433        /* Keep this shift separate from the above to avoid an
8434           (undefined) << 32.  */
8435        mask = (mask << 1) - 1;
8436        if (((base ^ address) & ~mask) == 0) {
8437            break;
8438        }
8439    }
8440    if (n < 0) {
8441        *fsr = 2;
8442        return true;
8443    }
8444
8445    if (access_type == 2) {
8446        mask = env->cp15.pmsav5_insn_ap;
8447    } else {
8448        mask = env->cp15.pmsav5_data_ap;
8449    }
8450    mask = (mask >> (n * 4)) & 0xf;
8451    switch (mask) {
8452    case 0:
8453        *fsr = 1;
8454        return true;
8455    case 1:
8456        if (is_user) {
8457            *fsr = 1;
8458            return true;
8459        }
8460        *prot = PAGE_READ | PAGE_WRITE;
8461        break;
8462    case 2:
8463        *prot = PAGE_READ;
8464        if (!is_user) {
8465            *prot |= PAGE_WRITE;
8466        }
8467        break;
8468    case 3:
8469        *prot = PAGE_READ | PAGE_WRITE;
8470        break;
8471    case 5:
8472        if (is_user) {
8473            *fsr = 1;
8474            return true;
8475        }
8476        *prot = PAGE_READ;
8477        break;
8478    case 6:
8479        *prot = PAGE_READ;
8480        break;
8481    default:
8482        /* Bad permission.  */
8483        *fsr = 1;
8484        return true;
8485    }
8486    *prot |= PAGE_EXEC;
8487    return false;
8488}
8489
8490/* get_phys_addr - get the physical address for this virtual address
8491 *
8492 * Find the physical address corresponding to the given virtual address,
8493 * by doing a translation table walk on MMU based systems or using the
8494 * MPU state on MPU based systems.
8495 *
8496 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
8497 * prot and page_size may not be filled in, and the populated fsr value provides
8498 * information on why the translation aborted, in the format of a
8499 * DFSR/IFSR fault register, with the following caveats:
8500 *  * we honour the short vs long DFSR format differences.
8501 *  * the WnR bit is never set (the caller must do this).
8502 *  * for PSMAv5 based systems we don't bother to return a full FSR format
8503 *    value.
8504 *
8505 * @env: CPUARMState
8506 * @address: virtual address to get physical address for
8507 * @access_type: 0 for read, 1 for write, 2 for execute
8508 * @mmu_idx: MMU index indicating required translation regime
8509 * @phys_ptr: set to the physical address corresponding to the virtual address
8510 * @attrs: set to the memory transaction attributes to use
8511 * @prot: set to the permissions for the page containing phys_ptr
8512 * @page_size: set to the size of the page containing phys_ptr
8513 * @fsr: set to the DFSR/IFSR value on failure
8514 */
8515static bool get_phys_addr(CPUARMState *env, target_ulong address,
8516                          int access_type, ARMMMUIdx mmu_idx,
8517                          hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
8518                          target_ulong *page_size, uint32_t *fsr,
8519                          ARMMMUFaultInfo *fi)
8520{
8521    if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8522        /* Call ourselves recursively to do the stage 1 and then stage 2
8523         * translations.
8524         */
8525        if (arm_feature(env, ARM_FEATURE_EL2)) {
8526            hwaddr ipa;
8527            int s2_prot;
8528            int ret;
8529
8530            ret = get_phys_addr(env, address, access_type,
8531                                stage_1_mmu_idx(mmu_idx), &ipa, attrs,
8532                                prot, page_size, fsr, fi);
8533
8534            /* If S1 fails or S2 is disabled, return early.  */
8535            if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8536                *phys_ptr = ipa;
8537                return ret;
8538            }
8539
8540            /* S1 is done. Now do S2 translation.  */
8541            ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
8542                                     phys_ptr, attrs, &s2_prot,
8543                                     page_size, fsr, fi);
8544            fi->s2addr = ipa;
8545            /* Combine the S1 and S2 perms.  */
8546            *prot &= s2_prot;
8547            return ret;
8548        } else {
8549            /*
8550             * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
8551             */
8552            mmu_idx = stage_1_mmu_idx(mmu_idx);
8553        }
8554    }
8555
8556    /* The page table entries may downgrade secure to non-secure, but
8557     * cannot upgrade an non-secure translation regime's attributes
8558     * to secure.
8559     */
8560    attrs->secure = regime_is_secure(env, mmu_idx);
8561    attrs->user = regime_is_user(env, mmu_idx);
8562
8563    /* Fast Context Switch Extension. This doesn't exist at all in v8.
8564     * In v7 and earlier it affects all stage 1 translations.
8565     */
8566    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
8567        && !arm_feature(env, ARM_FEATURE_V8)) {
8568        if (regime_el(env, mmu_idx) == 3) {
8569            address += env->cp15.fcseidr_s;
8570        } else {
8571            address += env->cp15.fcseidr_ns;
8572        }
8573    }
8574
8575    /* pmsav7 has special handling for when MPU is disabled so call it before
8576     * the common MMU/MPU disabled check below.
8577     */
8578    if (arm_feature(env, ARM_FEATURE_PMSA) &&
8579        arm_feature(env, ARM_FEATURE_V7)) {
8580        bool ret;
8581        *page_size = TARGET_PAGE_SIZE;
8582        ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
8583                                   phys_ptr, prot, fsr);
8584        qemu_log_mask(CPU_LOG_MMU, "PMSAv7 MPU lookup for %s at 0x%08" PRIx32
8585                      " mmu_idx %u -> %s (prot %c%c%c)\n",
8586                      access_type == MMU_DATA_LOAD ? "reading" :
8587                      (access_type == MMU_DATA_STORE ? "writing" : "execute"),
8588                      (uint32_t)address, mmu_idx,
8589                      ret ? "Miss" : "Hit",
8590                      *prot & PAGE_READ ? 'r' : '-',
8591                      *prot & PAGE_WRITE ? 'w' : '-',
8592                      *prot & PAGE_EXEC ? 'x' : '-');
8593
8594        return ret;
8595    }
8596
8597    if (regime_translation_disabled(env, mmu_idx)) {
8598        /* MMU/MPU disabled.  */
8599        *phys_ptr = address;
8600        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
8601        *page_size = TARGET_PAGE_SIZE;
8602        return 0;
8603    }
8604
8605    if (arm_feature(env, ARM_FEATURE_PMSA)) {
8606        /* Pre-v7 MPU */
8607        *page_size = TARGET_PAGE_SIZE;
8608        return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
8609                                    phys_ptr, prot, fsr);
8610    }
8611
8612    if (regime_using_lpae_format(env, mmu_idx)) {
8613        return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
8614                                  attrs, prot, page_size, fsr, fi);
8615    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
8616        return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
8617                                attrs, prot, page_size, fsr, fi);
8618    } else {
8619        return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
8620                                prot, page_size, fsr, fi);
8621    }
8622}
8623
8624/* Walk the page table and (if the mapping exists) add the page
8625 * to the TLB. Return false on success, or true on failure. Populate
8626 * fsr with ARM DFSR/IFSR fault register format value on failure.
8627 */
8628bool arm_tlb_fill(CPUState *cs, vaddr address,
8629                  int access_type, int mmu_idx, uint32_t *fsr,
8630                  ARMMMUFaultInfo *fi)
8631{
8632    ARMCPU *cpu = ARM_CPU(cs);
8633    CPUARMState *env = &cpu->env;
8634    hwaddr phys_addr;
8635    target_ulong page_size;
8636    int prot;
8637    int ret;
8638    MemTxAttrs attrs = {};
8639
8640    ret = get_phys_addr(env, address, access_type,
8641                        core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
8642                        &attrs, &prot, &page_size, fsr, fi);
8643    if (!ret) {
8644        /* Map a single [sub]page.  */
8645        phys_addr &= TARGET_PAGE_MASK;
8646        address &= TARGET_PAGE_MASK;
8647        tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
8648                                prot, mmu_idx, page_size);
8649        return 0;
8650    }
8651
8652    return ret;
8653}
8654
8655hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
8656                                         MemTxAttrs *attrs)
8657{
8658    ARMCPU *cpu = ARM_CPU(cs);
8659    CPUARMState *env = &cpu->env;
8660    hwaddr phys_addr;
8661    target_ulong page_size;
8662    int prot;
8663    bool ret;
8664    uint32_t fsr;
8665    ARMMMUFaultInfo fi = {};
8666    ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
8667
8668    *attrs = (MemTxAttrs) {};
8669
8670    ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
8671                        attrs, &prot, &page_size, &fsr, &fi);
8672
8673    if (ret) {
8674        return -1;
8675    }
8676    return phys_addr;
8677}
8678
8679uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
8680{
8681    uint32_t mask;
8682    unsigned el = arm_current_el(env);
8683
8684    /* First handle registers which unprivileged can read */
8685
8686    switch (reg) {
8687    case 0 ... 7: /* xPSR sub-fields */
8688        mask = 0;
8689        if ((reg & 1) && el) {
8690            mask |= 0x000001ff; /* IPSR (unpriv. reads as zero) */
8691        }
8692        if (!(reg & 4)) {
8693            mask |= 0xf8000000; /* APSR */
8694        }
8695        /* EPSR reads as zero */
8696        return xpsr_read(env) & mask;
8697        break;
8698    case 20: /* CONTROL */
8699        return env->v7m.control;
8700    }
8701
8702    if (el == 0) {
8703        return 0; /* unprivileged reads others as zero */
8704    }
8705
8706    switch (reg) {
8707    case 8: /* MSP */
8708        return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
8709            env->v7m.other_sp : env->regs[13];
8710    case 9: /* PSP */
8711        return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
8712            env->regs[13] : env->v7m.other_sp;
8713    case 16: /* PRIMASK */
8714        return (env->daif & PSTATE_I) != 0;
8715    case 17: /* BASEPRI */
8716    case 18: /* BASEPRI_MAX */
8717        return env->v7m.basepri;
8718    case 19: /* FAULTMASK */
8719        return (env->daif & PSTATE_F) != 0;
8720    default:
8721        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
8722                                       " register %d\n", reg);
8723        return 0;
8724    }
8725}
8726
8727void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
8728{
8729    /* We're passed bits [11..0] of the instruction; extract
8730     * SYSm and the mask bits.
8731     * Invalid combinations of SYSm and mask are UNPREDICTABLE;
8732     * we choose to treat them as if the mask bits were valid.
8733     * NB that the pseudocode 'mask' variable is bits [11..10],
8734     * whereas ours is [11..8].
8735     */
8736    uint32_t mask = extract32(maskreg, 8, 4);
8737    uint32_t reg = extract32(maskreg, 0, 8);
8738
8739    if (arm_current_el(env) == 0 && reg > 7) {
8740        /* only xPSR sub-fields may be written by unprivileged */
8741        return;
8742    }
8743
8744    switch (reg) {
8745    case 0 ... 7: /* xPSR sub-fields */
8746        /* only APSR is actually writable */
8747        if (!(reg & 4)) {
8748            uint32_t apsrmask = 0;
8749
8750            if (mask & 8) {
8751                apsrmask |= 0xf8000000; /* APSR NZCVQ */
8752            }
8753            if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
8754                apsrmask |= 0x000f0000; /* APSR GE[3:0] */
8755            }
8756            xpsr_write(env, val, apsrmask);
8757        }
8758        break;
8759    case 8: /* MSP */
8760        if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
8761            env->v7m.other_sp = val;
8762        } else {
8763            env->regs[13] = val;
8764        }
8765        break;
8766    case 9: /* PSP */
8767        if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
8768            env->regs[13] = val;
8769        } else {
8770            env->v7m.other_sp = val;
8771        }
8772        break;
8773    case 16: /* PRIMASK */
8774        if (val & 1) {
8775            env->daif |= PSTATE_I;
8776        } else {
8777            env->daif &= ~PSTATE_I;
8778        }
8779        break;
8780    case 17: /* BASEPRI */
8781        env->v7m.basepri = val & 0xff;
8782        break;
8783    case 18: /* BASEPRI_MAX */
8784        val &= 0xff;
8785        if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
8786            env->v7m.basepri = val;
8787        break;
8788    case 19: /* FAULTMASK */
8789        if (val & 1) {
8790            env->daif |= PSTATE_F;
8791        } else {
8792            env->daif &= ~PSTATE_F;
8793        }
8794        break;
8795    case 20: /* CONTROL */
8796        /* Writing to the SPSEL bit only has an effect if we are in
8797         * thread mode; other bits can be updated by any privileged code.
8798         * switch_v7m_sp() deals with updating the SPSEL bit in
8799         * env->v7m.control, so we only need update the others.
8800         */
8801        if (env->v7m.exception == 0) {
8802            switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
8803        }
8804        env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
8805        env->v7m.control |= val & R_V7M_CONTROL_NPRIV_MASK;
8806        break;
8807    default:
8808        qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
8809                                       " register %d\n", reg);
8810        return;
8811    }
8812}
8813
8814#endif
8815
8816void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
8817{
8818    /* Implement DC ZVA, which zeroes a fixed-length block of memory.
8819     * Note that we do not implement the (architecturally mandated)
8820     * alignment fault for attempts to use this on Device memory
8821     * (which matches the usual QEMU behaviour of not implementing either
8822     * alignment faults or any memory attribute handling).
8823     */
8824
8825    ARMCPU *cpu = arm_env_get_cpu(env);
8826    uint64_t blocklen = 4 << cpu->dcz_blocksize;
8827    uint64_t vaddr = vaddr_in & ~(blocklen - 1);
8828
8829#ifndef CONFIG_USER_ONLY
8830    {
8831        /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
8832         * the block size so we might have to do more than one TLB lookup.
8833         * We know that in fact for any v8 CPU the page size is at least 4K
8834         * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
8835         * 1K as an artefact of legacy v5 subpage support being present in the
8836         * same QEMU executable.
8837         */
8838        int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
8839        void *hostaddr[maxidx];
8840        int try, i;
8841        unsigned mmu_idx = cpu_mmu_index(env, false);
8842        TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
8843
8844        for (try = 0; try < 2; try++) {
8845
8846            for (i = 0; i < maxidx; i++) {
8847                hostaddr[i] = tlb_vaddr_to_host(env,
8848                                                vaddr + TARGET_PAGE_SIZE * i,
8849                                                1, mmu_idx);
8850                if (!hostaddr[i]) {
8851                    break;
8852                }
8853            }
8854            if (i == maxidx) {
8855                /* If it's all in the TLB it's fair game for just writing to;
8856                 * we know we don't need to update dirty status, etc.
8857                 */
8858                for (i = 0; i < maxidx - 1; i++) {
8859                    memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
8860                }
8861                memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
8862                return;
8863            }
8864            /* OK, try a store and see if we can populate the tlb. This
8865             * might cause an exception if the memory isn't writable,
8866             * in which case we will longjmp out of here. We must for
8867             * this purpose use the actual register value passed to us
8868             * so that we get the fault address right.
8869             */
8870            helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
8871            /* Now we can populate the other TLB entries, if any */
8872            for (i = 0; i < maxidx; i++) {
8873                uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
8874                if (va != (vaddr_in & TARGET_PAGE_MASK)) {
8875                    helper_ret_stb_mmu(env, va, 0, oi, GETPC());
8876                }
8877            }
8878        }
8879
8880        /* Slow path (probably attempt to do this to an I/O device or
8881         * similar, or clearing of a block of code we have translations
8882         * cached for). Just do a series of byte writes as the architecture
8883         * demands. It's not worth trying to use a cpu_physical_memory_map(),
8884         * memset(), unmap() sequence here because:
8885         *  + we'd need to account for the blocksize being larger than a page
8886         *  + the direct-RAM access case is almost always going to be dealt
8887         *    with in the fastpath code above, so there's no speed benefit
8888         *  + we would have to deal with the map returning NULL because the
8889         *    bounce buffer was in use
8890         */
8891        for (i = 0; i < blocklen; i++) {
8892            helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
8893        }
8894    }
8895#else
8896    memset(g2h(vaddr), 0, blocklen);
8897#endif
8898}
8899
8900/* Note that signed overflow is undefined in C.  The following routines are
8901   careful to use unsigned types where modulo arithmetic is required.
8902   Failure to do so _will_ break on newer gcc.  */
8903
8904/* Signed saturating arithmetic.  */
8905
8906/* Perform 16-bit signed saturating addition.  */
8907static inline uint16_t add16_sat(uint16_t a, uint16_t b)
8908{
8909    uint16_t res;
8910
8911    res = a + b;
8912    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
8913        if (a & 0x8000)
8914            res = 0x8000;
8915        else
8916            res = 0x7fff;
8917    }
8918    return res;
8919}
8920
8921/* Perform 8-bit signed saturating addition.  */
8922static inline uint8_t add8_sat(uint8_t a, uint8_t b)
8923{
8924    uint8_t res;
8925
8926    res = a + b;
8927    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
8928        if (a & 0x80)
8929            res = 0x80;
8930        else
8931            res = 0x7f;
8932    }
8933    return res;
8934}
8935
8936/* Perform 16-bit signed saturating subtraction.  */
8937static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
8938{
8939    uint16_t res;
8940
8941    res = a - b;
8942    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
8943        if (a & 0x8000)
8944            res = 0x8000;
8945        else
8946            res = 0x7fff;
8947    }
8948    return res;
8949}
8950
8951/* Perform 8-bit signed saturating subtraction.  */
8952static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
8953{
8954    uint8_t res;
8955
8956    res = a - b;
8957    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
8958        if (a & 0x80)
8959            res = 0x80;
8960        else
8961            res = 0x7f;
8962    }
8963    return res;
8964}
8965
8966#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
8967#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
8968#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
8969#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
8970#define PFX q
8971
8972#include "op_addsub.h"
8973
8974/* Unsigned saturating arithmetic.  */
8975static inline uint16_t add16_usat(uint16_t a, uint16_t b)
8976{
8977    uint16_t res;
8978    res = a + b;
8979    if (res < a)
8980        res = 0xffff;
8981    return res;
8982}
8983
8984static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
8985{
8986    if (a > b)
8987        return a - b;
8988    else
8989        return 0;
8990}
8991
8992static inline uint8_t add8_usat(uint8_t a, uint8_t b)
8993{
8994    uint8_t res;
8995    res = a + b;
8996    if (res < a)
8997        res = 0xff;
8998    return res;
8999}
9000
9001static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
9002{
9003    if (a > b)
9004        return a - b;
9005    else
9006        return 0;
9007}
9008
9009#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
9010#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
9011#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
9012#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
9013#define PFX uq
9014
9015#include "op_addsub.h"
9016
9017/* Signed modulo arithmetic.  */
9018#define SARITH16(a, b, n, op) do { \
9019    int32_t sum; \
9020    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
9021    RESULT(sum, n, 16); \
9022    if (sum >= 0) \
9023        ge |= 3 << (n * 2); \
9024    } while(0)
9025
9026#define SARITH8(a, b, n, op) do { \
9027    int32_t sum; \
9028    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
9029    RESULT(sum, n, 8); \
9030    if (sum >= 0) \
9031        ge |= 1 << n; \
9032    } while(0)
9033
9034
9035#define ADD16(a, b, n) SARITH16(a, b, n, +)
9036#define SUB16(a, b, n) SARITH16(a, b, n, -)
9037#define ADD8(a, b, n)  SARITH8(a, b, n, +)
9038#define SUB8(a, b, n)  SARITH8(a, b, n, -)
9039#define PFX s
9040#define ARITH_GE
9041
9042#include "op_addsub.h"
9043
9044/* Unsigned modulo arithmetic.  */
9045#define ADD16(a, b, n) do { \
9046    uint32_t sum; \
9047    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
9048    RESULT(sum, n, 16); \
9049    if ((sum >> 16) == 1) \
9050        ge |= 3 << (n * 2); \
9051    } while(0)
9052
9053#define ADD8(a, b, n) do { \
9054    uint32_t sum; \
9055    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
9056    RESULT(sum, n, 8); \
9057    if ((sum >> 8) == 1) \
9058        ge |= 1 << n; \
9059    } while(0)
9060
9061#define SUB16(a, b, n) do { \
9062    uint32_t sum; \
9063    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
9064    RESULT(sum, n, 16); \
9065    if ((sum >> 16) == 0) \
9066        ge |= 3 << (n * 2); \
9067    } while(0)
9068
9069#define SUB8(a, b, n) do { \
9070    uint32_t sum; \
9071    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
9072    RESULT(sum, n, 8); \
9073    if ((sum >> 8) == 0) \
9074        ge |= 1 << n; \
9075    } while(0)
9076
9077#define PFX u
9078#define ARITH_GE
9079
9080#include "op_addsub.h"
9081
9082/* Halved signed arithmetic.  */
9083#define ADD16(a, b, n) \
9084  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
9085#define SUB16(a, b, n) \
9086  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
9087#define ADD8(a, b, n) \
9088  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
9089#define SUB8(a, b, n) \
9090  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
9091#define PFX sh
9092
9093#include "op_addsub.h"
9094
9095/* Halved unsigned arithmetic.  */
9096#define ADD16(a, b, n) \
9097  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
9098#define SUB16(a, b, n) \
9099  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
9100#define ADD8(a, b, n) \
9101  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
9102#define SUB8(a, b, n) \
9103  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
9104#define PFX uh
9105
9106#include "op_addsub.h"
9107
9108static inline uint8_t do_usad(uint8_t a, uint8_t b)
9109{
9110    if (a > b)
9111        return a - b;
9112    else
9113        return b - a;
9114}
9115
9116/* Unsigned sum of absolute byte differences.  */
9117uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
9118{
9119    uint32_t sum;
9120    sum = do_usad(a, b);
9121    sum += do_usad(a >> 8, b >> 8);
9122    sum += do_usad(a >> 16, b >>16);
9123    sum += do_usad(a >> 24, b >> 24);
9124    return sum;
9125}
9126
9127/* For ARMv6 SEL instruction.  */
9128uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
9129{
9130    uint32_t mask;
9131
9132    mask = 0;
9133    if (flags & 1)
9134        mask |= 0xff;
9135    if (flags & 2)
9136        mask |= 0xff00;
9137    if (flags & 4)
9138        mask |= 0xff0000;
9139    if (flags & 8)
9140        mask |= 0xff000000;
9141    return (a & mask) | (b & ~mask);
9142}
9143
9144/* VFP support.  We follow the convention used for VFP instructions:
9145   Single precision routines have a "s" suffix, double precision a
9146   "d" suffix.  */
9147
9148/* Convert host exception flags to vfp form.  */
9149static inline int vfp_exceptbits_from_host(int host_bits)
9150{
9151    int target_bits = 0;
9152
9153    if (host_bits & float_flag_invalid)
9154        target_bits |= 1;
9155    if (host_bits & float_flag_divbyzero)
9156        target_bits |= 2;
9157    if (host_bits & float_flag_overflow)
9158        target_bits |= 4;
9159    if (host_bits & (float_flag_underflow | float_flag_output_denormal))
9160        target_bits |= 8;
9161    if (host_bits & float_flag_inexact)
9162        target_bits |= 0x10;
9163    if (host_bits & float_flag_input_denormal)
9164        target_bits |= 0x80;
9165    return target_bits;
9166}
9167
9168uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
9169{
9170    int i;
9171    uint32_t fpscr;
9172
9173    fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
9174            | (env->vfp.vec_len << 16)
9175            | (env->vfp.vec_stride << 20);
9176    i = get_float_exception_flags(&env->vfp.fp_status);
9177    i |= get_float_exception_flags(&env->vfp.standard_fp_status);
9178    fpscr |= vfp_exceptbits_from_host(i);
9179    return fpscr;
9180}
9181
9182uint32_t vfp_get_fpscr(CPUARMState *env)
9183{
9184    return HELPER(vfp_get_fpscr)(env);
9185}
9186
9187/* Convert vfp exception flags to target form.  */
9188static inline int vfp_exceptbits_to_host(int target_bits)
9189{
9190    int host_bits = 0;
9191
9192    if (target_bits & 1)
9193        host_bits |= float_flag_invalid;
9194    if (target_bits & 2)
9195        host_bits |= float_flag_divbyzero;
9196    if (target_bits & 4)
9197        host_bits |= float_flag_overflow;
9198    if (target_bits & 8)
9199        host_bits |= float_flag_underflow;
9200    if (target_bits & 0x10)
9201        host_bits |= float_flag_inexact;
9202    if (target_bits & 0x80)
9203        host_bits |= float_flag_input_denormal;
9204    return host_bits;
9205}
9206
9207void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
9208{
9209    int i;
9210    uint32_t changed;
9211
9212    changed = env->vfp.xregs[ARM_VFP_FPSCR];
9213    env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
9214    env->vfp.vec_len = (val >> 16) & 7;
9215    env->vfp.vec_stride = (val >> 20) & 3;
9216
9217    changed ^= val;
9218    if (changed & (3 << 22)) {
9219        i = (val >> 22) & 3;
9220        switch (i) {
9221        case FPROUNDING_TIEEVEN:
9222            i = float_round_nearest_even;
9223            break;
9224        case FPROUNDING_POSINF:
9225            i = float_round_up;
9226            break;
9227        case FPROUNDING_NEGINF:
9228            i = float_round_down;
9229            break;
9230        case FPROUNDING_ZERO:
9231            i = float_round_to_zero;
9232            break;
9233        }
9234        set_float_rounding_mode(i, &env->vfp.fp_status);
9235    }
9236    if (changed & (1 << 24)) {
9237        set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
9238        set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
9239    }
9240    if (changed & (1 << 25))
9241        set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
9242
9243    i = vfp_exceptbits_to_host(val);
9244    set_float_exception_flags(i, &env->vfp.fp_status);
9245    set_float_exception_flags(0, &env->vfp.standard_fp_status);
9246}
9247
9248void vfp_set_fpscr(CPUARMState *env, uint32_t val)
9249{
9250    HELPER(vfp_set_fpscr)(env, val);
9251}
9252
9253#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
9254
9255#define VFP_BINOP(name) \
9256float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
9257{ \
9258    float_status *fpst = fpstp; \
9259    return float32_ ## name(a, b, fpst); \
9260} \
9261float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
9262{ \
9263    float_status *fpst = fpstp; \
9264    return float64_ ## name(a, b, fpst); \
9265}
9266VFP_BINOP(add)
9267VFP_BINOP(sub)
9268VFP_BINOP(mul)
9269VFP_BINOP(div)
9270VFP_BINOP(min)
9271VFP_BINOP(max)
9272VFP_BINOP(minnum)
9273VFP_BINOP(maxnum)
9274#undef VFP_BINOP
9275
9276float32 VFP_HELPER(neg, s)(float32 a)
9277{
9278    return float32_chs(a);
9279}
9280
9281float64 VFP_HELPER(neg, d)(float64 a)
9282{
9283    return float64_chs(a);
9284}
9285
9286float32 VFP_HELPER(abs, s)(float32 a)
9287{
9288    return float32_abs(a);
9289}
9290
9291float64 VFP_HELPER(abs, d)(float64 a)
9292{
9293    return float64_abs(a);
9294}
9295
9296float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
9297{
9298    return float32_sqrt(a, &env->vfp.fp_status);
9299}
9300
9301float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
9302{
9303    return float64_sqrt(a, &env->vfp.fp_status);
9304}
9305
9306/* XXX: check quiet/signaling case */
9307#define DO_VFP_cmp(p, type) \
9308void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env)  \
9309{ \
9310    uint32_t flags; \
9311    switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
9312    case 0: flags = 0x6; break; \
9313    case -1: flags = 0x8; break; \
9314    case 1: flags = 0x2; break; \
9315    default: case 2: flags = 0x3; break; \
9316    } \
9317    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
9318        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
9319} \
9320void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
9321{ \
9322    uint32_t flags; \
9323    switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
9324    case 0: flags = 0x6; break; \
9325    case -1: flags = 0x8; break; \
9326    case 1: flags = 0x2; break; \
9327    default: case 2: flags = 0x3; break; \
9328    } \
9329    env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
9330        | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
9331}
9332DO_VFP_cmp(s, float32)
9333DO_VFP_cmp(d, float64)
9334#undef DO_VFP_cmp
9335
9336/* Integer to float and float to integer conversions */
9337
9338#define CONV_ITOF(name, fsz, sign) \
9339    float##fsz HELPER(name)(uint32_t x, void *fpstp) \
9340{ \
9341    float_status *fpst = fpstp; \
9342    return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
9343}
9344
9345#define CONV_FTOI(name, fsz, sign, round) \
9346uint32_t HELPER(name)(float##fsz x, void *fpstp) \
9347{ \
9348    float_status *fpst = fpstp; \
9349    if (float##fsz##_is_any_nan(x)) { \
9350        float_raise(float_flag_invalid, fpst); \
9351        return 0; \
9352    } \
9353    return float##fsz##_to_##sign##int32##round(x, fpst); \
9354}
9355
9356#define FLOAT_CONVS(name, p, fsz, sign) \
9357CONV_ITOF(vfp_##name##to##p, fsz, sign) \
9358CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
9359CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
9360
9361FLOAT_CONVS(si, s, 32, )
9362FLOAT_CONVS(si, d, 64, )
9363FLOAT_CONVS(ui, s, 32, u)
9364FLOAT_CONVS(ui, d, 64, u)
9365
9366#undef CONV_ITOF
9367#undef CONV_FTOI
9368#undef FLOAT_CONVS
9369
9370/* floating point conversion */
9371float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
9372{
9373    float64 r = float32_to_float64(x, &env->vfp.fp_status);
9374    /* ARM requires that S<->D conversion of any kind of NaN generates
9375     * a quiet NaN by forcing the most significant frac bit to 1.
9376     */
9377    return float64_maybe_silence_nan(r, &env->vfp.fp_status);
9378}
9379
9380float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
9381{
9382    float32 r =  float64_to_float32(x, &env->vfp.fp_status);
9383    /* ARM requires that S<->D conversion of any kind of NaN generates
9384     * a quiet NaN by forcing the most significant frac bit to 1.
9385     */
9386    return float32_maybe_silence_nan(r, &env->vfp.fp_status);
9387}
9388
9389/* VFP3 fixed point conversion.  */
9390#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
9391float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t  x, uint32_t shift, \
9392                                     void *fpstp) \
9393{ \
9394    float_status *fpst = fpstp; \
9395    float##fsz tmp; \
9396    tmp = itype##_to_##float##fsz(x, fpst); \
9397    return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
9398}
9399
9400/* Notice that we want only input-denormal exception flags from the
9401 * scalbn operation: the other possible flags (overflow+inexact if
9402 * we overflow to infinity, output-denormal) aren't correct for the
9403 * complete scale-and-convert operation.
9404 */
9405#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
9406uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
9407                                             uint32_t shift, \
9408                                             void *fpstp) \
9409{ \
9410    float_status *fpst = fpstp; \
9411    int old_exc_flags = get_float_exception_flags(fpst); \
9412    float##fsz tmp; \
9413    if (float##fsz##_is_any_nan(x)) { \
9414        float_raise(float_flag_invalid, fpst); \
9415        return 0; \
9416    } \
9417    tmp = float##fsz##_scalbn(x, shift, fpst); \
9418    old_exc_flags |= get_float_exception_flags(fpst) \
9419        & float_flag_input_denormal; \
9420    set_float_exception_flags(old_exc_flags, fpst); \
9421    return float##fsz##_to_##itype##round(tmp, fpst); \
9422}
9423
9424#define VFP_CONV_FIX(name, p, fsz, isz, itype)                   \
9425VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
9426VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
9427VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
9428
9429#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype)               \
9430VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype)                     \
9431VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
9432
9433VFP_CONV_FIX(sh, d, 64, 64, int16)
9434VFP_CONV_FIX(sl, d, 64, 64, int32)
9435VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
9436VFP_CONV_FIX(uh, d, 64, 64, uint16)
9437VFP_CONV_FIX(ul, d, 64, 64, uint32)
9438VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
9439VFP_CONV_FIX(sh, s, 32, 32, int16)
9440VFP_CONV_FIX(sl, s, 32, 32, int32)
9441VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
9442VFP_CONV_FIX(uh, s, 32, 32, uint16)
9443VFP_CONV_FIX(ul, s, 32, 32, uint32)
9444VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
9445#undef VFP_CONV_FIX
9446#undef VFP_CONV_FIX_FLOAT
9447#undef VFP_CONV_FLOAT_FIX_ROUND
9448
9449/* Set the current fp rounding mode and return the old one.
9450 * The argument is a softfloat float_round_ value.
9451 */
9452uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
9453{
9454    float_status *fp_status = &env->vfp.fp_status;
9455
9456    uint32_t prev_rmode = get_float_rounding_mode(fp_status);
9457    set_float_rounding_mode(rmode, fp_status);
9458
9459    return prev_rmode;
9460}
9461
9462/* Set the current fp rounding mode in the standard fp status and return
9463 * the old one. This is for NEON instructions that need to change the
9464 * rounding mode but wish to use the standard FPSCR values for everything
9465 * else. Always set the rounding mode back to the correct value after
9466 * modifying it.
9467 * The argument is a softfloat float_round_ value.
9468 */
9469uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
9470{
9471    float_status *fp_status = &env->vfp.standard_fp_status;
9472
9473    uint32_t prev_rmode = get_float_rounding_mode(fp_status);
9474    set_float_rounding_mode(rmode, fp_status);
9475
9476    return prev_rmode;
9477}
9478
9479/* Half precision conversions.  */
9480static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
9481{
9482    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9483    float32 r = float16_to_float32(make_float16(a), ieee, s);
9484    if (ieee) {
9485        return float32_maybe_silence_nan(r, s);
9486    }
9487    return r;
9488}
9489
9490static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
9491{
9492    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9493    float16 r = float32_to_float16(a, ieee, s);
9494    if (ieee) {
9495        r = float16_maybe_silence_nan(r, s);
9496    }
9497    return float16_val(r);
9498}
9499
9500float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9501{
9502    return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
9503}
9504
9505uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9506{
9507    return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
9508}
9509
9510float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
9511{
9512    return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
9513}
9514
9515uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
9516{
9517    return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
9518}
9519
9520float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
9521{
9522    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9523    float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
9524    if (ieee) {
9525        return float64_maybe_silence_nan(r, &env->vfp.fp_status);
9526    }
9527    return r;
9528}
9529
9530uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
9531{
9532    int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
9533    float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
9534    if (ieee) {
9535        r = float16_maybe_silence_nan(r, &env->vfp.fp_status);
9536    }
9537    return float16_val(r);
9538}
9539
9540#define float32_two make_float32(0x40000000)
9541#define float32_three make_float32(0x40400000)
9542#define float32_one_point_five make_float32(0x3fc00000)
9543
9544float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
9545{
9546    float_status *s = &env->vfp.standard_fp_status;
9547    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9548        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9549        if (!(float32_is_zero(a) || float32_is_zero(b))) {
9550            float_raise(float_flag_input_denormal, s);
9551        }
9552        return float32_two;
9553    }
9554    return float32_sub(float32_two, float32_mul(a, b, s), s);
9555}
9556
9557float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
9558{
9559    float_status *s = &env->vfp.standard_fp_status;
9560    float32 product;
9561    if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
9562        (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
9563        if (!(float32_is_zero(a) || float32_is_zero(b))) {
9564            float_raise(float_flag_input_denormal, s);
9565        }
9566        return float32_one_point_five;
9567    }
9568    product = float32_mul(a, b, s);
9569    return float32_div(float32_sub(float32_three, product, s), float32_two, s);
9570}
9571
9572/* NEON helpers.  */
9573
9574/* Constants 256 and 512 are used in some helpers; we avoid relying on
9575 * int->float conversions at run-time.  */
9576#define float64_256 make_float64(0x4070000000000000LL)
9577#define float64_512 make_float64(0x4080000000000000LL)
9578#define float32_maxnorm make_float32(0x7f7fffff)
9579#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
9580
9581/* Reciprocal functions
9582 *
9583 * The algorithm that must be used to calculate the estimate
9584 * is specified by the ARM ARM, see FPRecipEstimate()
9585 */
9586
9587static float64 recip_estimate(float64 a, float_status *real_fp_status)
9588{
9589    /* These calculations mustn't set any fp exception flags,
9590     * so we use a local copy of the fp_status.
9591     */
9592    float_status dummy_status = *real_fp_status;
9593    float_status *s = &dummy_status;
9594    /* q = (int)(a * 512.0) */
9595    float64 q = float64_mul(float64_512, a, s);
9596    int64_t q_int = float64_to_int64_round_to_zero(q, s);
9597
9598    /* r = 1.0 / (((double)q + 0.5) / 512.0) */
9599    q = int64_to_float64(q_int, s);
9600    q = float64_add(q, float64_half, s);
9601    q = float64_div(q, float64_512, s);
9602    q = float64_div(float64_one, q, s);
9603
9604    /* s = (int)(256.0 * r + 0.5) */
9605    q = float64_mul(q, float64_256, s);
9606    q = float64_add(q, float64_half, s);
9607    q_int = float64_to_int64_round_to_zero(q, s);
9608
9609    /* return (double)s / 256.0 */
9610    return float64_div(int64_to_float64(q_int, s), float64_256, s);
9611}
9612
9613/* Common wrapper to call recip_estimate */
9614static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
9615{
9616    uint64_t val64 = float64_val(num);
9617    uint64_t frac = extract64(val64, 0, 52);
9618    int64_t exp = extract64(val64, 52, 11);
9619    uint64_t sbit;
9620    float64 scaled, estimate;
9621
9622    /* Generate the scaled number for the estimate function */
9623    if (exp == 0) {
9624        if (extract64(frac, 51, 1) == 0) {
9625            exp = -1;
9626            frac = extract64(frac, 0, 50) << 2;
9627        } else {
9628            frac = extract64(frac, 0, 51) << 1;
9629        }
9630    }
9631
9632    /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
9633    scaled = make_float64((0x3feULL << 52)
9634                          | extract64(frac, 44, 8) << 44);
9635
9636    estimate = recip_estimate(scaled, fpst);
9637
9638    /* Build new result */
9639    val64 = float64_val(estimate);
9640    sbit = 0x8000000000000000ULL & val64;
9641    exp = off - exp;
9642    frac = extract64(val64, 0, 52);
9643
9644    if (exp == 0) {
9645        frac = 1ULL << 51 | extract64(frac, 1, 51);
9646    } else if (exp == -1) {
9647        frac = 1ULL << 50 | extract64(frac, 2, 50);
9648        exp = 0;
9649    }
9650
9651    return make_float64(sbit | (exp << 52) | frac);
9652}
9653
9654static bool round_to_inf(float_status *fpst, bool sign_bit)
9655{
9656    switch (fpst->float_rounding_mode) {
9657    case float_round_nearest_even: /* Round to Nearest */
9658        return true;
9659    case float_round_up: /* Round to +Inf */
9660        return !sign_bit;
9661    case float_round_down: /* Round to -Inf */
9662        return sign_bit;
9663    case float_round_to_zero: /* Round to Zero */
9664        return false;
9665    }
9666
9667    g_assert_not_reached();
9668}
9669
9670float32 HELPER(recpe_f32)(float32 input, void *fpstp)
9671{
9672    float_status *fpst = fpstp;
9673    float32 f32 = float32_squash_input_denormal(input, fpst);
9674    uint32_t f32_val = float32_val(f32);
9675    uint32_t f32_sbit = 0x80000000ULL & f32_val;
9676    int32_t f32_exp = extract32(f32_val, 23, 8);
9677    uint32_t f32_frac = extract32(f32_val, 0, 23);
9678    float64 f64, r64;
9679    uint64_t r64_val;
9680    int64_t r64_exp;
9681    uint64_t r64_frac;
9682
9683    if (float32_is_any_nan(f32)) {
9684        float32 nan = f32;
9685        if (float32_is_signaling_nan(f32, fpst)) {
9686            float_raise(float_flag_invalid, fpst);
9687            nan = float32_maybe_silence_nan(f32, fpst);
9688        }
9689        if (fpst->default_nan_mode) {
9690            nan =  float32_default_nan(fpst);
9691        }
9692        return nan;
9693    } else if (float32_is_infinity(f32)) {
9694        return float32_set_sign(float32_zero, float32_is_neg(f32));
9695    } else if (float32_is_zero(f32)) {
9696        float_raise(float_flag_divbyzero, fpst);
9697        return float32_set_sign(float32_infinity, float32_is_neg(f32));
9698    } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
9699        /* Abs(value) < 2.0^-128 */
9700        float_raise(float_flag_overflow | float_flag_inexact, fpst);
9701        if (round_to_inf(fpst, f32_sbit)) {
9702            return float32_set_sign(float32_infinity, float32_is_neg(f32));
9703        } else {
9704            return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
9705        }
9706    } else if (f32_exp >= 253 && fpst->flush_to_zero) {
9707        float_raise(float_flag_underflow, fpst);
9708        return float32_set_sign(float32_zero, float32_is_neg(f32));
9709    }
9710
9711
9712    f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
9713    r64 = call_recip_estimate(f64, 253, fpst);
9714    r64_val = float64_val(r64);
9715    r64_exp = extract64(r64_val, 52, 11);
9716    r64_frac = extract64(r64_val, 0, 52);
9717
9718    /* result = sign : result_exp<7:0> : fraction<51:29>; */
9719    return make_float32(f32_sbit |
9720                        (r64_exp & 0xff) << 23 |
9721                        extract64(r64_frac, 29, 24));
9722}
9723
9724float64 HELPER(recpe_f64)(float64 input, void *fpstp)
9725{
9726    float_status *fpst = fpstp;
9727    float64 f64 = float64_squash_input_denormal(input, fpst);
9728    uint64_t f64_val = float64_val(f64);
9729    uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
9730    int64_t f64_exp = extract64(f64_val, 52, 11);
9731    float64 r64;
9732    uint64_t r64_val;
9733    int64_t r64_exp;
9734    uint64_t r64_frac;
9735
9736    /* Deal with any special cases */
9737    if (float64_is_any_nan(f64)) {
9738        float64 nan = f64;
9739        if (float64_is_signaling_nan(f64, fpst)) {
9740            float_raise(float_flag_invalid, fpst);
9741            nan = float64_maybe_silence_nan(f64, fpst);
9742        }
9743        if (fpst->default_nan_mode) {
9744            nan =  float64_default_nan(fpst);
9745        }
9746        return nan;
9747    } else if (float64_is_infinity(f64)) {
9748        return float64_set_sign(float64_zero, float64_is_neg(f64));
9749    } else if (float64_is_zero(f64)) {
9750        float_raise(float_flag_divbyzero, fpst);
9751        return float64_set_sign(float64_infinity, float64_is_neg(f64));
9752    } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
9753        /* Abs(value) < 2.0^-1024 */
9754        float_raise(float_flag_overflow | float_flag_inexact, fpst);
9755        if (round_to_inf(fpst, f64_sbit)) {
9756            return float64_set_sign(float64_infinity, float64_is_neg(f64));
9757        } else {
9758            return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
9759        }
9760    } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
9761        float_raise(float_flag_underflow, fpst);
9762        return float64_set_sign(float64_zero, float64_is_neg(f64));
9763    }
9764
9765    r64 = call_recip_estimate(f64, 2045, fpst);
9766    r64_val = float64_val(r64);
9767    r64_exp = extract64(r64_val, 52, 11);
9768    r64_frac = extract64(r64_val, 0, 52);
9769
9770    /* result = sign : result_exp<10:0> : fraction<51:0> */
9771    return make_float64(f64_sbit |
9772                        ((r64_exp & 0x7ff) << 52) |
9773                        r64_frac);
9774}
9775
9776/* The algorithm that must be used to calculate the estimate
9777 * is specified by the ARM ARM.
9778 */
9779static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
9780{
9781    /* These calculations mustn't set any fp exception flags,
9782     * so we use a local copy of the fp_status.
9783     */
9784    float_status dummy_status = *real_fp_status;
9785    float_status *s = &dummy_status;
9786    float64 q;
9787    int64_t q_int;
9788
9789    if (float64_lt(a, float64_half, s)) {
9790        /* range 0.25 <= a < 0.5 */
9791
9792        /* a in units of 1/512 rounded down */
9793        /* q0 = (int)(a * 512.0);  */
9794        q = float64_mul(float64_512, a, s);
9795        q_int = float64_to_int64_round_to_zero(q, s);
9796
9797        /* reciprocal root r */
9798        /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0);  */
9799        q = int64_to_float64(q_int, s);
9800        q = float64_add(q, float64_half, s);
9801        q = float64_div(q, float64_512, s);
9802        q = float64_sqrt(q, s);
9803        q = float64_div(float64_one, q, s);
9804    } else {
9805        /* range 0.5 <= a < 1.0 */
9806
9807        /* a in units of 1/256 rounded down */
9808        /* q1 = (int)(a * 256.0); */
9809        q = float64_mul(float64_256, a, s);
9810        int64_t q_int = float64_to_int64_round_to_zero(q, s);
9811
9812        /* reciprocal root r */
9813        /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
9814        q = int64_to_float64(q_int, s);
9815        q = float64_add(q, float64_half, s);
9816        q = float64_div(q, float64_256, s);
9817        q = float64_sqrt(q, s);
9818        q = float64_div(float64_one, q, s);
9819    }
9820    /* r in units of 1/256 rounded to nearest */
9821    /* s = (int)(256.0 * r + 0.5); */
9822
9823    q = float64_mul(q, float64_256,s );
9824    q = float64_add(q, float64_half, s);
9825    q_int = float64_to_int64_round_to_zero(q, s);
9826
9827    /* return (double)s / 256.0;*/
9828    return float64_div(int64_to_float64(q_int, s), float64_256, s);
9829}
9830
9831float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
9832{
9833    float_status *s = fpstp;
9834    float32 f32 = float32_squash_input_denormal(input, s);
9835    uint32_t val = float32_val(f32);
9836    uint32_t f32_sbit = 0x80000000 & val;
9837    int32_t f32_exp = extract32(val, 23, 8);
9838    uint32_t f32_frac = extract32(val, 0, 23);
9839    uint64_t f64_frac;
9840    uint64_t val64;
9841    int result_exp;
9842    float64 f64;
9843
9844    if (float32_is_any_nan(f32)) {
9845        float32 nan = f32;
9846        if (float32_is_signaling_nan(f32, s)) {
9847            float_raise(float_flag_invalid, s);
9848            nan = float32_maybe_silence_nan(f32, s);
9849        }
9850        if (s->default_nan_mode) {
9851            nan =  float32_default_nan(s);
9852        }
9853        return nan;
9854    } else if (float32_is_zero(f32)) {
9855        float_raise(float_flag_divbyzero, s);
9856        return float32_set_sign(float32_infinity, float32_is_neg(f32));
9857    } else if (float32_is_neg(f32)) {
9858        float_raise(float_flag_invalid, s);
9859        return float32_default_nan(s);
9860    } else if (float32_is_infinity(f32)) {
9861        return float32_zero;
9862    }
9863
9864    /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9865     * preserving the parity of the exponent.  */
9866
9867    f64_frac = ((uint64_t) f32_frac) << 29;
9868    if (f32_exp == 0) {
9869        while (extract64(f64_frac, 51, 1) == 0) {
9870            f64_frac = f64_frac << 1;
9871            f32_exp = f32_exp-1;
9872        }
9873        f64_frac = extract64(f64_frac, 0, 51) << 1;
9874    }
9875
9876    if (extract64(f32_exp, 0, 1) == 0) {
9877        f64 = make_float64(((uint64_t) f32_sbit) << 32
9878                           | (0x3feULL << 52)
9879                           | f64_frac);
9880    } else {
9881        f64 = make_float64(((uint64_t) f32_sbit) << 32
9882                           | (0x3fdULL << 52)
9883                           | f64_frac);
9884    }
9885
9886    result_exp = (380 - f32_exp) / 2;
9887
9888    f64 = recip_sqrt_estimate(f64, s);
9889
9890    val64 = float64_val(f64);
9891
9892    val = ((result_exp & 0xff) << 23)
9893        | ((val64 >> 29)  & 0x7fffff);
9894    return make_float32(val);
9895}
9896
9897float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
9898{
9899    float_status *s = fpstp;
9900    float64 f64 = float64_squash_input_denormal(input, s);
9901    uint64_t val = float64_val(f64);
9902    uint64_t f64_sbit = 0x8000000000000000ULL & val;
9903    int64_t f64_exp = extract64(val, 52, 11);
9904    uint64_t f64_frac = extract64(val, 0, 52);
9905    int64_t result_exp;
9906    uint64_t result_frac;
9907
9908    if (float64_is_any_nan(f64)) {
9909        float64 nan = f64;
9910        if (float64_is_signaling_nan(f64, s)) {
9911            float_raise(float_flag_invalid, s);
9912            nan = float64_maybe_silence_nan(f64, s);
9913        }
9914        if (s->default_nan_mode) {
9915            nan =  float64_default_nan(s);
9916        }
9917        return nan;
9918    } else if (float64_is_zero(f64)) {
9919        float_raise(float_flag_divbyzero, s);
9920        return float64_set_sign(float64_infinity, float64_is_neg(f64));
9921    } else if (float64_is_neg(f64)) {
9922        float_raise(float_flag_invalid, s);
9923        return float64_default_nan(s);
9924    } else if (float64_is_infinity(f64)) {
9925        return float64_zero;
9926    }
9927
9928    /* Scale and normalize to a double-precision value between 0.25 and 1.0,
9929     * preserving the parity of the exponent.  */
9930
9931    if (f64_exp == 0) {
9932        while (extract64(f64_frac, 51, 1) == 0) {
9933            f64_frac = f64_frac << 1;
9934            f64_exp = f64_exp - 1;
9935        }
9936        f64_frac = extract64(f64_frac, 0, 51) << 1;
9937    }
9938
9939    if (extract64(f64_exp, 0, 1) == 0) {
9940        f64 = make_float64(f64_sbit
9941                           | (0x3feULL << 52)
9942                           | f64_frac);
9943    } else {
9944        f64 = make_float64(f64_sbit
9945                           | (0x3fdULL << 52)
9946                           | f64_frac);
9947    }
9948
9949    result_exp = (3068 - f64_exp) / 2;
9950
9951    f64 = recip_sqrt_estimate(f64, s);
9952
9953    result_frac = extract64(float64_val(f64), 0, 52);
9954
9955    return make_float64(f64_sbit |
9956                        ((result_exp & 0x7ff) << 52) |
9957                        result_frac);
9958}
9959
9960uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
9961{
9962    float_status *s = fpstp;
9963    float64 f64;
9964
9965    if ((a & 0x80000000) == 0) {
9966        return 0xffffffff;
9967    }
9968
9969    f64 = make_float64((0x3feULL << 52)
9970                       | ((int64_t)(a & 0x7fffffff) << 21));
9971
9972    f64 = recip_estimate(f64, s);
9973
9974    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9975}
9976
9977uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
9978{
9979    float_status *fpst = fpstp;
9980    float64 f64;
9981
9982    if ((a & 0xc0000000) == 0) {
9983        return 0xffffffff;
9984    }
9985
9986    if (a & 0x80000000) {
9987        f64 = make_float64((0x3feULL << 52)
9988                           | ((uint64_t)(a & 0x7fffffff) << 21));
9989    } else { /* bits 31-30 == '01' */
9990        f64 = make_float64((0x3fdULL << 52)
9991                           | ((uint64_t)(a & 0x3fffffff) << 22));
9992    }
9993
9994    f64 = recip_sqrt_estimate(f64, fpst);
9995
9996    return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
9997}
9998
9999/* VFPv4 fused multiply-accumulate */
10000float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
10001{
10002    float_status *fpst = fpstp;
10003    return float32_muladd(a, b, c, 0, fpst);
10004}
10005
10006float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
10007{
10008    float_status *fpst = fpstp;
10009    return float64_muladd(a, b, c, 0, fpst);
10010}
10011
10012/* ARMv8 round to integral */
10013float32 HELPER(rints_exact)(float32 x, void *fp_status)
10014{
10015    return float32_round_to_int(x, fp_status);
10016}
10017
10018float64 HELPER(rintd_exact)(float64 x, void *fp_status)
10019{
10020    return float64_round_to_int(x, fp_status);
10021}
10022
10023float32 HELPER(rints)(float32 x, void *fp_status)
10024{
10025    int old_flags = get_float_exception_flags(fp_status), new_flags;
10026    float32 ret;
10027
10028    ret = float32_round_to_int(x, fp_status);
10029
10030    /* Suppress any inexact exceptions the conversion produced */
10031    if (!(old_flags & float_flag_inexact)) {
10032        new_flags = get_float_exception_flags(fp_status);
10033        set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
10034    }
10035
10036    return ret;
10037}
10038
10039float64 HELPER(rintd)(float64 x, void *fp_status)
10040{
10041    int old_flags = get_float_exception_flags(fp_status), new_flags;
10042    float64 ret;
10043
10044    ret = float64_round_to_int(x, fp_status);
10045
10046    new_flags = get_float_exception_flags(fp_status);
10047
10048    /* Suppress any inexact exceptions the conversion produced */
10049    if (!(old_flags & float_flag_inexact)) {
10050        new_flags = get_float_exception_flags(fp_status);
10051        set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
10052    }
10053
10054    return ret;
10055}
10056
10057/* Convert ARM rounding mode to softfloat */
10058int arm_rmode_to_sf(int rmode)
10059{
10060    switch (rmode) {
10061    case FPROUNDING_TIEAWAY:
10062        rmode = float_round_ties_away;
10063        break;
10064    case FPROUNDING_ODD:
10065        /* FIXME: add support for TIEAWAY and ODD */
10066        qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
10067                      rmode);
10068    case FPROUNDING_TIEEVEN:
10069    default:
10070        rmode = float_round_nearest_even;
10071        break;
10072    case FPROUNDING_POSINF:
10073        rmode = float_round_up;
10074        break;
10075    case FPROUNDING_NEGINF:
10076        rmode = float_round_down;
10077        break;
10078    case FPROUNDING_ZERO:
10079        rmode = float_round_to_zero;
10080        break;
10081    }
10082    return rmode;
10083}
10084
10085/* CRC helpers.
10086 * The upper bytes of val (above the number specified by 'bytes') must have
10087 * been zeroed out by the caller.
10088 */
10089uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
10090{
10091    uint8_t buf[4];
10092
10093    stl_le_p(buf, val);
10094
10095    /* zlib crc32 converts the accumulator and output to one's complement.  */
10096    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
10097}
10098
10099uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
10100{
10101    uint8_t buf[4];
10102
10103    stl_le_p(buf, val);
10104
10105    /* Linux crc32c converts the output to one's complement.  */
10106    return crc32c(acc, buf, bytes) ^ 0xffffffff;
10107}
10108