qemu/target/arm/helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8
   9#include "qemu/osdep.h"
  10#include "qemu/units.h"
  11#include "target/arm/idau.h"
  12#include "trace.h"
  13#include "cpu.h"
  14#include "internals.h"
  15#include "exec/gdbstub.h"
  16#include "exec/helper-proto.h"
  17#include "qemu/host-utils.h"
  18#include "qemu/main-loop.h"
  19#include "qemu/bitops.h"
  20#include "qemu/crc32c.h"
  21#include "qemu/qemu-print.h"
  22#include "exec/exec-all.h"
  23#include <zlib.h> /* For crc32 */
  24#include "hw/irq.h"
  25#include "hw/semihosting/semihost.h"
  26#include "sysemu/cpus.h"
  27#include "sysemu/cpu-timers.h"
  28#include "sysemu/kvm.h"
  29#include "sysemu/tcg.h"
  30#include "qemu/range.h"
  31#include "qapi/qapi-commands-machine-target.h"
  32#include "qapi/error.h"
  33#include "qemu/guest-random.h"
  34#ifdef CONFIG_TCG
  35#include "arm_ldst.h"
  36#include "exec/cpu_ldst.h"
  37#endif
  38
  39#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  40
  41#ifndef CONFIG_USER_ONLY
  42
  43static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
  44                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
  45                               bool s1_is_el0,
  46                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  47                               target_ulong *page_size_ptr,
  48                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
  49    __attribute__((nonnull));
  50#endif
  51
  52static void switch_mode(CPUARMState *env, int mode);
  53static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx);
  54
  55static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
  56{
  57    ARMCPU *cpu = env_archcpu(env);
  58    int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
  59
  60    /* VFP data registers are always little-endian.  */
  61    if (reg < nregs) {
  62        return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg));
  63    }
  64    if (arm_feature(env, ARM_FEATURE_NEON)) {
  65        /* Aliases for Q regs.  */
  66        nregs += 16;
  67        if (reg < nregs) {
  68            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  69            return gdb_get_reg128(buf, q[0], q[1]);
  70        }
  71    }
  72    switch (reg - nregs) {
  73    case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break;
  74    case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break;
  75    case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break;
  76    }
  77    return 0;
  78}
  79
  80static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  81{
  82    ARMCPU *cpu = env_archcpu(env);
  83    int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16;
  84
  85    if (reg < nregs) {
  86        *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
  87        return 8;
  88    }
  89    if (arm_feature(env, ARM_FEATURE_NEON)) {
  90        nregs += 16;
  91        if (reg < nregs) {
  92            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  93            q[0] = ldq_le_p(buf);
  94            q[1] = ldq_le_p(buf + 8);
  95            return 16;
  96        }
  97    }
  98    switch (reg - nregs) {
  99    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
 100    case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
 101    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
 102    }
 103    return 0;
 104}
 105
 106static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg)
 107{
 108    switch (reg) {
 109    case 0 ... 31:
 110    {
 111        /* 128 bit FP register - quads are in LE order */
 112        uint64_t *q = aa64_vfp_qreg(env, reg);
 113        return gdb_get_reg128(buf, q[1], q[0]);
 114    }
 115    case 32:
 116        /* FPSR */
 117        return gdb_get_reg32(buf, vfp_get_fpsr(env));
 118    case 33:
 119        /* FPCR */
 120        return gdb_get_reg32(buf,vfp_get_fpcr(env));
 121    default:
 122        return 0;
 123    }
 124}
 125
 126static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 127{
 128    switch (reg) {
 129    case 0 ... 31:
 130        /* 128 bit FP register */
 131        {
 132            uint64_t *q = aa64_vfp_qreg(env, reg);
 133            q[0] = ldq_le_p(buf);
 134            q[1] = ldq_le_p(buf + 8);
 135            return 16;
 136        }
 137    case 32:
 138        /* FPSR */
 139        vfp_set_fpsr(env, ldl_p(buf));
 140        return 4;
 141    case 33:
 142        /* FPCR */
 143        vfp_set_fpcr(env, ldl_p(buf));
 144        return 4;
 145    default:
 146        return 0;
 147    }
 148}
 149
 150static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 151{
 152    assert(ri->fieldoffset);
 153    if (cpreg_field_is_64bit(ri)) {
 154        return CPREG_FIELD64(env, ri);
 155    } else {
 156        return CPREG_FIELD32(env, ri);
 157    }
 158}
 159
 160static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 161                      uint64_t value)
 162{
 163    assert(ri->fieldoffset);
 164    if (cpreg_field_is_64bit(ri)) {
 165        CPREG_FIELD64(env, ri) = value;
 166    } else {
 167        CPREG_FIELD32(env, ri) = value;
 168    }
 169}
 170
 171static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 172{
 173    return (char *)env + ri->fieldoffset;
 174}
 175
 176uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 177{
 178    /* Raw read of a coprocessor register (as needed for migration, etc). */
 179    if (ri->type & ARM_CP_CONST) {
 180        return ri->resetvalue;
 181    } else if (ri->raw_readfn) {
 182        return ri->raw_readfn(env, ri);
 183    } else if (ri->readfn) {
 184        return ri->readfn(env, ri);
 185    } else {
 186        return raw_read(env, ri);
 187    }
 188}
 189
 190static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 191                             uint64_t v)
 192{
 193    /* Raw write of a coprocessor register (as needed for migration, etc).
 194     * Note that constant registers are treated as write-ignored; the
 195     * caller should check for success by whether a readback gives the
 196     * value written.
 197     */
 198    if (ri->type & ARM_CP_CONST) {
 199        return;
 200    } else if (ri->raw_writefn) {
 201        ri->raw_writefn(env, ri, v);
 202    } else if (ri->writefn) {
 203        ri->writefn(env, ri, v);
 204    } else {
 205        raw_write(env, ri, v);
 206    }
 207}
 208
 209/**
 210 * arm_get/set_gdb_*: get/set a gdb register
 211 * @env: the CPU state
 212 * @buf: a buffer to copy to/from
 213 * @reg: register number (offset from start of group)
 214 *
 215 * We return the number of bytes copied
 216 */
 217
 218static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg)
 219{
 220    ARMCPU *cpu = env_archcpu(env);
 221    const ARMCPRegInfo *ri;
 222    uint32_t key;
 223
 224    key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg];
 225    ri = get_arm_cp_reginfo(cpu->cp_regs, key);
 226    if (ri) {
 227        if (cpreg_field_is_64bit(ri)) {
 228            return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
 229        } else {
 230            return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
 231        }
 232    }
 233    return 0;
 234}
 235
 236static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 237{
 238    return 0;
 239}
 240
 241#ifdef TARGET_AARCH64
 242static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg)
 243{
 244    ARMCPU *cpu = env_archcpu(env);
 245
 246    switch (reg) {
 247    /* The first 32 registers are the zregs */
 248    case 0 ... 31:
 249    {
 250        int vq, len = 0;
 251        for (vq = 0; vq < cpu->sve_max_vq; vq++) {
 252            len += gdb_get_reg128(buf,
 253                                  env->vfp.zregs[reg].d[vq * 2 + 1],
 254                                  env->vfp.zregs[reg].d[vq * 2]);
 255        }
 256        return len;
 257    }
 258    case 32:
 259        return gdb_get_reg32(buf, vfp_get_fpsr(env));
 260    case 33:
 261        return gdb_get_reg32(buf, vfp_get_fpcr(env));
 262    /* then 16 predicates and the ffr */
 263    case 34 ... 50:
 264    {
 265        int preg = reg - 34;
 266        int vq, len = 0;
 267        for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
 268            len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]);
 269        }
 270        return len;
 271    }
 272    case 51:
 273    {
 274        /*
 275         * We report in Vector Granules (VG) which is 64bit in a Z reg
 276         * while the ZCR works in Vector Quads (VQ) which is 128bit chunks.
 277         */
 278        int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1;
 279        return gdb_get_reg32(buf, vq * 2);
 280    }
 281    default:
 282        /* gdbstub asked for something out our range */
 283        qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg);
 284        break;
 285    }
 286
 287    return 0;
 288}
 289
 290static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg)
 291{
 292    ARMCPU *cpu = env_archcpu(env);
 293
 294    /* The first 32 registers are the zregs */
 295    switch (reg) {
 296    /* The first 32 registers are the zregs */
 297    case 0 ... 31:
 298    {
 299        int vq, len = 0;
 300        uint64_t *p = (uint64_t *) buf;
 301        for (vq = 0; vq < cpu->sve_max_vq; vq++) {
 302            env->vfp.zregs[reg].d[vq * 2 + 1] = *p++;
 303            env->vfp.zregs[reg].d[vq * 2] = *p++;
 304            len += 16;
 305        }
 306        return len;
 307    }
 308    case 32:
 309        vfp_set_fpsr(env, *(uint32_t *)buf);
 310        return 4;
 311    case 33:
 312        vfp_set_fpcr(env, *(uint32_t *)buf);
 313        return 4;
 314    case 34 ... 50:
 315    {
 316        int preg = reg - 34;
 317        int vq, len = 0;
 318        uint64_t *p = (uint64_t *) buf;
 319        for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) {
 320            env->vfp.pregs[preg].p[vq / 4] = *p++;
 321            len += 8;
 322        }
 323        return len;
 324    }
 325    case 51:
 326        /* cannot set vg via gdbstub */
 327        return 0;
 328    default:
 329        /* gdbstub asked for something out our range */
 330        break;
 331    }
 332
 333    return 0;
 334}
 335#endif /* TARGET_AARCH64 */
 336
 337static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 338{
 339   /* Return true if the regdef would cause an assertion if you called
 340    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 341    * program bug for it not to have the NO_RAW flag).
 342    * NB that returning false here doesn't necessarily mean that calling
 343    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 344    * read/write access functions which are safe for raw use" from "has
 345    * read/write access functions which have side effects but has forgotten
 346    * to provide raw access functions".
 347    * The tests here line up with the conditions in read/write_raw_cp_reg()
 348    * and assertions in raw_read()/raw_write().
 349    */
 350    if ((ri->type & ARM_CP_CONST) ||
 351        ri->fieldoffset ||
 352        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 353        return false;
 354    }
 355    return true;
 356}
 357
 358bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
 359{
 360    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 361    int i;
 362    bool ok = true;
 363
 364    for (i = 0; i < cpu->cpreg_array_len; i++) {
 365        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 366        const ARMCPRegInfo *ri;
 367        uint64_t newval;
 368
 369        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 370        if (!ri) {
 371            ok = false;
 372            continue;
 373        }
 374        if (ri->type & ARM_CP_NO_RAW) {
 375            continue;
 376        }
 377
 378        newval = read_raw_cp_reg(&cpu->env, ri);
 379        if (kvm_sync) {
 380            /*
 381             * Only sync if the previous list->cpustate sync succeeded.
 382             * Rather than tracking the success/failure state for every
 383             * item in the list, we just recheck "does the raw write we must
 384             * have made in write_list_to_cpustate() read back OK" here.
 385             */
 386            uint64_t oldval = cpu->cpreg_values[i];
 387
 388            if (oldval == newval) {
 389                continue;
 390            }
 391
 392            write_raw_cp_reg(&cpu->env, ri, oldval);
 393            if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
 394                continue;
 395            }
 396
 397            write_raw_cp_reg(&cpu->env, ri, newval);
 398        }
 399        cpu->cpreg_values[i] = newval;
 400    }
 401    return ok;
 402}
 403
 404bool write_list_to_cpustate(ARMCPU *cpu)
 405{
 406    int i;
 407    bool ok = true;
 408
 409    for (i = 0; i < cpu->cpreg_array_len; i++) {
 410        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 411        uint64_t v = cpu->cpreg_values[i];
 412        const ARMCPRegInfo *ri;
 413
 414        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 415        if (!ri) {
 416            ok = false;
 417            continue;
 418        }
 419        if (ri->type & ARM_CP_NO_RAW) {
 420            continue;
 421        }
 422        /* Write value and confirm it reads back as written
 423         * (to catch read-only registers and partially read-only
 424         * registers where the incoming migration value doesn't match)
 425         */
 426        write_raw_cp_reg(&cpu->env, ri, v);
 427        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 428            ok = false;
 429        }
 430    }
 431    return ok;
 432}
 433
 434static void add_cpreg_to_list(gpointer key, gpointer opaque)
 435{
 436    ARMCPU *cpu = opaque;
 437    uint64_t regidx;
 438    const ARMCPRegInfo *ri;
 439
 440    regidx = *(uint32_t *)key;
 441    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 442
 443    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 444        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 445        /* The value array need not be initialized at this point */
 446        cpu->cpreg_array_len++;
 447    }
 448}
 449
 450static void count_cpreg(gpointer key, gpointer opaque)
 451{
 452    ARMCPU *cpu = opaque;
 453    uint64_t regidx;
 454    const ARMCPRegInfo *ri;
 455
 456    regidx = *(uint32_t *)key;
 457    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 458
 459    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 460        cpu->cpreg_array_len++;
 461    }
 462}
 463
 464static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 465{
 466    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 467    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 468
 469    if (aidx > bidx) {
 470        return 1;
 471    }
 472    if (aidx < bidx) {
 473        return -1;
 474    }
 475    return 0;
 476}
 477
 478void init_cpreg_list(ARMCPU *cpu)
 479{
 480    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 481     * Note that we require cpreg_tuples[] to be sorted by key ID.
 482     */
 483    GList *keys;
 484    int arraylen;
 485
 486    keys = g_hash_table_get_keys(cpu->cp_regs);
 487    keys = g_list_sort(keys, cpreg_key_compare);
 488
 489    cpu->cpreg_array_len = 0;
 490
 491    g_list_foreach(keys, count_cpreg, cpu);
 492
 493    arraylen = cpu->cpreg_array_len;
 494    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 495    cpu->cpreg_values = g_new(uint64_t, arraylen);
 496    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 497    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 498    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 499    cpu->cpreg_array_len = 0;
 500
 501    g_list_foreach(keys, add_cpreg_to_list, cpu);
 502
 503    assert(cpu->cpreg_array_len == arraylen);
 504
 505    g_list_free(keys);
 506}
 507
 508/*
 509 * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0.
 510 */
 511static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 512                                        const ARMCPRegInfo *ri,
 513                                        bool isread)
 514{
 515    if (!is_a64(env) && arm_current_el(env) == 3 &&
 516        arm_is_secure_below_el3(env)) {
 517        return CP_ACCESS_TRAP_UNCATEGORIZED;
 518    }
 519    return CP_ACCESS_OK;
 520}
 521
 522/* Some secure-only AArch32 registers trap to EL3 if used from
 523 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 524 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 525 * We assume that the .access field is set to PL1_RW.
 526 */
 527static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 528                                            const ARMCPRegInfo *ri,
 529                                            bool isread)
 530{
 531    if (arm_current_el(env) == 3) {
 532        return CP_ACCESS_OK;
 533    }
 534    if (arm_is_secure_below_el3(env)) {
 535        return CP_ACCESS_TRAP_EL3;
 536    }
 537    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 538    return CP_ACCESS_TRAP_UNCATEGORIZED;
 539}
 540
 541/* Check for traps to "powerdown debug" registers, which are controlled
 542 * by MDCR.TDOSA
 543 */
 544static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 545                                   bool isread)
 546{
 547    int el = arm_current_el(env);
 548    bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
 549        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 550        (arm_hcr_el2_eff(env) & HCR_TGE);
 551
 552    if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
 553        return CP_ACCESS_TRAP_EL2;
 554    }
 555    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 556        return CP_ACCESS_TRAP_EL3;
 557    }
 558    return CP_ACCESS_OK;
 559}
 560
 561/* Check for traps to "debug ROM" registers, which are controlled
 562 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 563 */
 564static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 565                                  bool isread)
 566{
 567    int el = arm_current_el(env);
 568    bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
 569        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 570        (arm_hcr_el2_eff(env) & HCR_TGE);
 571
 572    if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
 573        return CP_ACCESS_TRAP_EL2;
 574    }
 575    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 576        return CP_ACCESS_TRAP_EL3;
 577    }
 578    return CP_ACCESS_OK;
 579}
 580
 581/* Check for traps to general debug registers, which are controlled
 582 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 583 */
 584static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 585                                  bool isread)
 586{
 587    int el = arm_current_el(env);
 588    bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
 589        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 590        (arm_hcr_el2_eff(env) & HCR_TGE);
 591
 592    if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
 593        return CP_ACCESS_TRAP_EL2;
 594    }
 595    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 596        return CP_ACCESS_TRAP_EL3;
 597    }
 598    return CP_ACCESS_OK;
 599}
 600
 601/* Check for traps to performance monitor registers, which are controlled
 602 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 603 */
 604static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 605                                 bool isread)
 606{
 607    int el = arm_current_el(env);
 608
 609    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 610        && !arm_is_secure_below_el3(env)) {
 611        return CP_ACCESS_TRAP_EL2;
 612    }
 613    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 614        return CP_ACCESS_TRAP_EL3;
 615    }
 616    return CP_ACCESS_OK;
 617}
 618
 619/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM.  */
 620static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
 621                                      bool isread)
 622{
 623    if (arm_current_el(env) == 1) {
 624        uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
 625        if (arm_hcr_el2_eff(env) & trap) {
 626            return CP_ACCESS_TRAP_EL2;
 627        }
 628    }
 629    return CP_ACCESS_OK;
 630}
 631
 632/* Check for traps from EL1 due to HCR_EL2.TSW.  */
 633static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri,
 634                                 bool isread)
 635{
 636    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) {
 637        return CP_ACCESS_TRAP_EL2;
 638    }
 639    return CP_ACCESS_OK;
 640}
 641
 642/* Check for traps from EL1 due to HCR_EL2.TACR.  */
 643static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri,
 644                                  bool isread)
 645{
 646    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) {
 647        return CP_ACCESS_TRAP_EL2;
 648    }
 649    return CP_ACCESS_OK;
 650}
 651
 652/* Check for traps from EL1 due to HCR_EL2.TTLB. */
 653static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri,
 654                                  bool isread)
 655{
 656    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) {
 657        return CP_ACCESS_TRAP_EL2;
 658    }
 659    return CP_ACCESS_OK;
 660}
 661
 662static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 663{
 664    ARMCPU *cpu = env_archcpu(env);
 665
 666    raw_write(env, ri, value);
 667    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
 668}
 669
 670static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 671{
 672    ARMCPU *cpu = env_archcpu(env);
 673
 674    if (raw_read(env, ri) != value) {
 675        /* Unlike real hardware the qemu TLB uses virtual addresses,
 676         * not modified virtual addresses, so this causes a TLB flush.
 677         */
 678        tlb_flush(CPU(cpu));
 679        raw_write(env, ri, value);
 680    }
 681}
 682
 683static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 684                             uint64_t value)
 685{
 686    ARMCPU *cpu = env_archcpu(env);
 687
 688    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
 689        && !extended_addresses_enabled(env)) {
 690        /* For VMSA (when not using the LPAE long descriptor page table
 691         * format) this register includes the ASID, so do a TLB flush.
 692         * For PMSA it is purely a process ID and no action is needed.
 693         */
 694        tlb_flush(CPU(cpu));
 695    }
 696    raw_write(env, ri, value);
 697}
 698
 699/* IS variants of TLB operations must affect all cores */
 700static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 701                             uint64_t value)
 702{
 703    CPUState *cs = env_cpu(env);
 704
 705    tlb_flush_all_cpus_synced(cs);
 706}
 707
 708static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 709                             uint64_t value)
 710{
 711    CPUState *cs = env_cpu(env);
 712
 713    tlb_flush_all_cpus_synced(cs);
 714}
 715
 716static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 717                             uint64_t value)
 718{
 719    CPUState *cs = env_cpu(env);
 720
 721    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 722}
 723
 724static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 725                             uint64_t value)
 726{
 727    CPUState *cs = env_cpu(env);
 728
 729    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 730}
 731
 732/*
 733 * Non-IS variants of TLB operations are upgraded to
 734 * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to
 735 * force broadcast of these operations.
 736 */
 737static bool tlb_force_broadcast(CPUARMState *env)
 738{
 739    return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB);
 740}
 741
 742static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 743                          uint64_t value)
 744{
 745    /* Invalidate all (TLBIALL) */
 746    CPUState *cs = env_cpu(env);
 747
 748    if (tlb_force_broadcast(env)) {
 749        tlb_flush_all_cpus_synced(cs);
 750    } else {
 751        tlb_flush(cs);
 752    }
 753}
 754
 755static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 756                          uint64_t value)
 757{
 758    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 759    CPUState *cs = env_cpu(env);
 760
 761    value &= TARGET_PAGE_MASK;
 762    if (tlb_force_broadcast(env)) {
 763        tlb_flush_page_all_cpus_synced(cs, value);
 764    } else {
 765        tlb_flush_page(cs, value);
 766    }
 767}
 768
 769static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 770                           uint64_t value)
 771{
 772    /* Invalidate by ASID (TLBIASID) */
 773    CPUState *cs = env_cpu(env);
 774
 775    if (tlb_force_broadcast(env)) {
 776        tlb_flush_all_cpus_synced(cs);
 777    } else {
 778        tlb_flush(cs);
 779    }
 780}
 781
 782static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 783                           uint64_t value)
 784{
 785    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 786    CPUState *cs = env_cpu(env);
 787
 788    value &= TARGET_PAGE_MASK;
 789    if (tlb_force_broadcast(env)) {
 790        tlb_flush_page_all_cpus_synced(cs, value);
 791    } else {
 792        tlb_flush_page(cs, value);
 793    }
 794}
 795
 796static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 797                               uint64_t value)
 798{
 799    CPUState *cs = env_cpu(env);
 800
 801    tlb_flush_by_mmuidx(cs,
 802                        ARMMMUIdxBit_E10_1 |
 803                        ARMMMUIdxBit_E10_1_PAN |
 804                        ARMMMUIdxBit_E10_0);
 805}
 806
 807static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 808                                  uint64_t value)
 809{
 810    CPUState *cs = env_cpu(env);
 811
 812    tlb_flush_by_mmuidx_all_cpus_synced(cs,
 813                                        ARMMMUIdxBit_E10_1 |
 814                                        ARMMMUIdxBit_E10_1_PAN |
 815                                        ARMMMUIdxBit_E10_0);
 816}
 817
 818
 819static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 820                              uint64_t value)
 821{
 822    CPUState *cs = env_cpu(env);
 823
 824    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2);
 825}
 826
 827static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 828                                 uint64_t value)
 829{
 830    CPUState *cs = env_cpu(env);
 831
 832    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2);
 833}
 834
 835static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 836                              uint64_t value)
 837{
 838    CPUState *cs = env_cpu(env);
 839    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 840
 841    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2);
 842}
 843
 844static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 845                                 uint64_t value)
 846{
 847    CPUState *cs = env_cpu(env);
 848    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 849
 850    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 851                                             ARMMMUIdxBit_E2);
 852}
 853
 854static const ARMCPRegInfo cp_reginfo[] = {
 855    /* Define the secure and non-secure FCSE identifier CP registers
 856     * separately because there is no secure bank in V8 (no _EL3).  This allows
 857     * the secure register to be properly reset and migrated. There is also no
 858     * v8 EL1 version of the register so the non-secure instance stands alone.
 859     */
 860    { .name = "FCSEIDR",
 861      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 862      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 863      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 864      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 865    { .name = "FCSEIDR_S",
 866      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 867      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 868      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 869      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 870    /* Define the secure and non-secure context identifier CP registers
 871     * separately because there is no secure bank in V8 (no _EL3).  This allows
 872     * the secure register to be properly reset and migrated.  In the
 873     * non-secure case, the 32-bit register will have reset and migration
 874     * disabled during registration as it is handled by the 64-bit instance.
 875     */
 876    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 877      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 878      .access = PL1_RW, .accessfn = access_tvm_trvm,
 879      .secure = ARM_CP_SECSTATE_NS,
 880      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 881      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 882    { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
 883      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 884      .access = PL1_RW, .accessfn = access_tvm_trvm,
 885      .secure = ARM_CP_SECSTATE_S,
 886      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 887      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 888    REGINFO_SENTINEL
 889};
 890
 891static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 892    /* NB: Some of these registers exist in v8 but with more precise
 893     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 894     */
 895    /* MMU Domain access control / MPU write buffer control */
 896    { .name = "DACR",
 897      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 898      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
 899      .writefn = dacr_write, .raw_writefn = raw_write,
 900      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 901                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 902    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 903     * For v6 and v5, these mappings are overly broad.
 904     */
 905    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 906      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 907    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 908      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 909    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 910      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 911    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 912      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 913    /* Cache maintenance ops; some of this space may be overridden later. */
 914    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 915      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 916      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 917    REGINFO_SENTINEL
 918};
 919
 920static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 921    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 922     * over-broad.
 923     */
 924    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 925      .access = PL1_W, .type = ARM_CP_WFI },
 926    REGINFO_SENTINEL
 927};
 928
 929static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 930    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 931     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 932     */
 933    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 934      .access = PL1_W, .type = ARM_CP_WFI },
 935    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 936     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 937     * OMAPCP will override this space.
 938     */
 939    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 940      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 941      .resetvalue = 0 },
 942    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 943      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 944      .resetvalue = 0 },
 945    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 946    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 947      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 948      .resetvalue = 0 },
 949    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 950     * implementing it as RAZ means the "debug architecture version" bits
 951     * will read as a reserved value, which should cause Linux to not try
 952     * to use the debug hardware.
 953     */
 954    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 955      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 956    /* MMU TLB control. Note that the wildcarding means we cover not just
 957     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 958     */
 959    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 960      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 961      .type = ARM_CP_NO_RAW },
 962    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 963      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 964      .type = ARM_CP_NO_RAW },
 965    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 966      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 967      .type = ARM_CP_NO_RAW },
 968    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 969      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 970      .type = ARM_CP_NO_RAW },
 971    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 972      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 973    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 974      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 975    REGINFO_SENTINEL
 976};
 977
 978static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 979                        uint64_t value)
 980{
 981    uint32_t mask = 0;
 982
 983    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 984    if (!arm_feature(env, ARM_FEATURE_V8)) {
 985        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 986         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 987         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 988         */
 989        if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
 990            /* VFP coprocessor: cp10 & cp11 [23:20] */
 991            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 992
 993            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 994                /* ASEDIS [31] bit is RAO/WI */
 995                value |= (1 << 31);
 996            }
 997
 998            /* VFPv3 and upwards with NEON implement 32 double precision
 999             * registers (D0-D31).
1000             */
1001            if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
1002                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
1003                value |= (1 << 30);
1004            }
1005        }
1006        value &= mask;
1007    }
1008
1009    /*
1010     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1011     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1012     */
1013    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1014        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1015        value &= ~(0xf << 20);
1016        value |= env->cp15.cpacr_el1 & (0xf << 20);
1017    }
1018
1019    env->cp15.cpacr_el1 = value;
1020}
1021
1022static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1023{
1024    /*
1025     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
1026     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
1027     */
1028    uint64_t value = env->cp15.cpacr_el1;
1029
1030    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
1031        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
1032        value &= ~(0xf << 20);
1033    }
1034    return value;
1035}
1036
1037
1038static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
1039{
1040    /* Call cpacr_write() so that we reset with the correct RAO bits set
1041     * for our CPU features.
1042     */
1043    cpacr_write(env, ri, 0);
1044}
1045
1046static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1047                                   bool isread)
1048{
1049    if (arm_feature(env, ARM_FEATURE_V8)) {
1050        /* Check if CPACR accesses are to be trapped to EL2 */
1051        if (arm_current_el(env) == 1 &&
1052            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
1053            return CP_ACCESS_TRAP_EL2;
1054        /* Check if CPACR accesses are to be trapped to EL3 */
1055        } else if (arm_current_el(env) < 3 &&
1056                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1057            return CP_ACCESS_TRAP_EL3;
1058        }
1059    }
1060
1061    return CP_ACCESS_OK;
1062}
1063
1064static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
1065                                  bool isread)
1066{
1067    /* Check if CPTR accesses are set to trap to EL3 */
1068    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
1069        return CP_ACCESS_TRAP_EL3;
1070    }
1071
1072    return CP_ACCESS_OK;
1073}
1074
1075static const ARMCPRegInfo v6_cp_reginfo[] = {
1076    /* prefetch by MVA in v6, NOP in v7 */
1077    { .name = "MVA_prefetch",
1078      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
1079      .access = PL1_W, .type = ARM_CP_NOP },
1080    /* We need to break the TB after ISB to execute self-modifying code
1081     * correctly and also to take any pending interrupts immediately.
1082     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
1083     */
1084    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
1085      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
1086    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
1087      .access = PL0_W, .type = ARM_CP_NOP },
1088    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
1089      .access = PL0_W, .type = ARM_CP_NOP },
1090    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
1091      .access = PL1_RW, .accessfn = access_tvm_trvm,
1092      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
1093                             offsetof(CPUARMState, cp15.ifar_ns) },
1094      .resetvalue = 0, },
1095    /* Watchpoint Fault Address Register : should actually only be present
1096     * for 1136, 1176, 11MPCore.
1097     */
1098    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1099      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1100    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1101      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1102      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1103      .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1104    REGINFO_SENTINEL
1105};
1106
1107/* Definitions for the PMU registers */
1108#define PMCRN_MASK  0xf800
1109#define PMCRN_SHIFT 11
1110#define PMCRLC  0x40
1111#define PMCRDP  0x20
1112#define PMCRX   0x10
1113#define PMCRD   0x8
1114#define PMCRC   0x4
1115#define PMCRP   0x2
1116#define PMCRE   0x1
1117/*
1118 * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
1119 * which can be written as 1 to trigger behaviour but which stay RAZ).
1120 */
1121#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
1122
1123#define PMXEVTYPER_P          0x80000000
1124#define PMXEVTYPER_U          0x40000000
1125#define PMXEVTYPER_NSK        0x20000000
1126#define PMXEVTYPER_NSU        0x10000000
1127#define PMXEVTYPER_NSH        0x08000000
1128#define PMXEVTYPER_M          0x04000000
1129#define PMXEVTYPER_MT         0x02000000
1130#define PMXEVTYPER_EVTCOUNT   0x0000ffff
1131#define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1132                               PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1133                               PMXEVTYPER_M | PMXEVTYPER_MT | \
1134                               PMXEVTYPER_EVTCOUNT)
1135
1136#define PMCCFILTR             0xf8000000
1137#define PMCCFILTR_M           PMXEVTYPER_M
1138#define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1139
1140static inline uint32_t pmu_num_counters(CPUARMState *env)
1141{
1142  return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1143}
1144
1145/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1146static inline uint64_t pmu_counter_mask(CPUARMState *env)
1147{
1148  return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1149}
1150
1151typedef struct pm_event {
1152    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1153    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1154    bool (*supported)(CPUARMState *);
1155    /*
1156     * Retrieve the current count of the underlying event. The programmed
1157     * counters hold a difference from the return value from this function
1158     */
1159    uint64_t (*get_count)(CPUARMState *);
1160    /*
1161     * Return how many nanoseconds it will take (at a minimum) for count events
1162     * to occur. A negative value indicates the counter will never overflow, or
1163     * that the counter has otherwise arranged for the overflow bit to be set
1164     * and the PMU interrupt to be raised on overflow.
1165     */
1166    int64_t (*ns_per_count)(uint64_t);
1167} pm_event;
1168
1169static bool event_always_supported(CPUARMState *env)
1170{
1171    return true;
1172}
1173
1174static uint64_t swinc_get_count(CPUARMState *env)
1175{
1176    /*
1177     * SW_INCR events are written directly to the pmevcntr's by writes to
1178     * PMSWINC, so there is no underlying count maintained by the PMU itself
1179     */
1180    return 0;
1181}
1182
1183static int64_t swinc_ns_per(uint64_t ignored)
1184{
1185    return -1;
1186}
1187
1188/*
1189 * Return the underlying cycle count for the PMU cycle counters. If we're in
1190 * usermode, simply return 0.
1191 */
1192static uint64_t cycles_get_count(CPUARMState *env)
1193{
1194#ifndef CONFIG_USER_ONLY
1195    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1196                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1197#else
1198    return cpu_get_host_ticks();
1199#endif
1200}
1201
1202#ifndef CONFIG_USER_ONLY
1203static int64_t cycles_ns_per(uint64_t cycles)
1204{
1205    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1206}
1207
1208static bool instructions_supported(CPUARMState *env)
1209{
1210    return icount_enabled() == 1; /* Precise instruction counting */
1211}
1212
1213static uint64_t instructions_get_count(CPUARMState *env)
1214{
1215    return (uint64_t)icount_get_raw();
1216}
1217
1218static int64_t instructions_ns_per(uint64_t icount)
1219{
1220    return icount_to_ns((int64_t)icount);
1221}
1222#endif
1223
1224static bool pmu_8_1_events_supported(CPUARMState *env)
1225{
1226    /* For events which are supported in any v8.1 PMU */
1227    return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
1228}
1229
1230static bool pmu_8_4_events_supported(CPUARMState *env)
1231{
1232    /* For events which are supported in any v8.1 PMU */
1233    return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
1234}
1235
1236static uint64_t zero_event_get_count(CPUARMState *env)
1237{
1238    /* For events which on QEMU never fire, so their count is always zero */
1239    return 0;
1240}
1241
1242static int64_t zero_event_ns_per(uint64_t cycles)
1243{
1244    /* An event which never fires can never overflow */
1245    return -1;
1246}
1247
1248static const pm_event pm_events[] = {
1249    { .number = 0x000, /* SW_INCR */
1250      .supported = event_always_supported,
1251      .get_count = swinc_get_count,
1252      .ns_per_count = swinc_ns_per,
1253    },
1254#ifndef CONFIG_USER_ONLY
1255    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1256      .supported = instructions_supported,
1257      .get_count = instructions_get_count,
1258      .ns_per_count = instructions_ns_per,
1259    },
1260    { .number = 0x011, /* CPU_CYCLES, Cycle */
1261      .supported = event_always_supported,
1262      .get_count = cycles_get_count,
1263      .ns_per_count = cycles_ns_per,
1264    },
1265#endif
1266    { .number = 0x023, /* STALL_FRONTEND */
1267      .supported = pmu_8_1_events_supported,
1268      .get_count = zero_event_get_count,
1269      .ns_per_count = zero_event_ns_per,
1270    },
1271    { .number = 0x024, /* STALL_BACKEND */
1272      .supported = pmu_8_1_events_supported,
1273      .get_count = zero_event_get_count,
1274      .ns_per_count = zero_event_ns_per,
1275    },
1276    { .number = 0x03c, /* STALL */
1277      .supported = pmu_8_4_events_supported,
1278      .get_count = zero_event_get_count,
1279      .ns_per_count = zero_event_ns_per,
1280    },
1281};
1282
1283/*
1284 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1285 * events (i.e. the statistical profiling extension), this implementation
1286 * should first be updated to something sparse instead of the current
1287 * supported_event_map[] array.
1288 */
1289#define MAX_EVENT_ID 0x3c
1290#define UNSUPPORTED_EVENT UINT16_MAX
1291static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1292
1293/*
1294 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1295 * of ARM event numbers to indices in our pm_events array.
1296 *
1297 * Note: Events in the 0x40XX range are not currently supported.
1298 */
1299void pmu_init(ARMCPU *cpu)
1300{
1301    unsigned int i;
1302
1303    /*
1304     * Empty supported_event_map and cpu->pmceid[01] before adding supported
1305     * events to them
1306     */
1307    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1308        supported_event_map[i] = UNSUPPORTED_EVENT;
1309    }
1310    cpu->pmceid0 = 0;
1311    cpu->pmceid1 = 0;
1312
1313    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1314        const pm_event *cnt = &pm_events[i];
1315        assert(cnt->number <= MAX_EVENT_ID);
1316        /* We do not currently support events in the 0x40xx range */
1317        assert(cnt->number <= 0x3f);
1318
1319        if (cnt->supported(&cpu->env)) {
1320            supported_event_map[cnt->number] = i;
1321            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1322            if (cnt->number & 0x20) {
1323                cpu->pmceid1 |= event_mask;
1324            } else {
1325                cpu->pmceid0 |= event_mask;
1326            }
1327        }
1328    }
1329}
1330
1331/*
1332 * Check at runtime whether a PMU event is supported for the current machine
1333 */
1334static bool event_supported(uint16_t number)
1335{
1336    if (number > MAX_EVENT_ID) {
1337        return false;
1338    }
1339    return supported_event_map[number] != UNSUPPORTED_EVENT;
1340}
1341
1342static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1343                                   bool isread)
1344{
1345    /* Performance monitor registers user accessibility is controlled
1346     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1347     * trapping to EL2 or EL3 for other accesses.
1348     */
1349    int el = arm_current_el(env);
1350
1351    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1352        return CP_ACCESS_TRAP;
1353    }
1354    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1355        && !arm_is_secure_below_el3(env)) {
1356        return CP_ACCESS_TRAP_EL2;
1357    }
1358    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1359        return CP_ACCESS_TRAP_EL3;
1360    }
1361
1362    return CP_ACCESS_OK;
1363}
1364
1365static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1366                                           const ARMCPRegInfo *ri,
1367                                           bool isread)
1368{
1369    /* ER: event counter read trap control */
1370    if (arm_feature(env, ARM_FEATURE_V8)
1371        && arm_current_el(env) == 0
1372        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1373        && isread) {
1374        return CP_ACCESS_OK;
1375    }
1376
1377    return pmreg_access(env, ri, isread);
1378}
1379
1380static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1381                                         const ARMCPRegInfo *ri,
1382                                         bool isread)
1383{
1384    /* SW: software increment write trap control */
1385    if (arm_feature(env, ARM_FEATURE_V8)
1386        && arm_current_el(env) == 0
1387        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1388        && !isread) {
1389        return CP_ACCESS_OK;
1390    }
1391
1392    return pmreg_access(env, ri, isread);
1393}
1394
1395static CPAccessResult pmreg_access_selr(CPUARMState *env,
1396                                        const ARMCPRegInfo *ri,
1397                                        bool isread)
1398{
1399    /* ER: event counter read trap control */
1400    if (arm_feature(env, ARM_FEATURE_V8)
1401        && arm_current_el(env) == 0
1402        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1403        return CP_ACCESS_OK;
1404    }
1405
1406    return pmreg_access(env, ri, isread);
1407}
1408
1409static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1410                                         const ARMCPRegInfo *ri,
1411                                         bool isread)
1412{
1413    /* CR: cycle counter read trap control */
1414    if (arm_feature(env, ARM_FEATURE_V8)
1415        && arm_current_el(env) == 0
1416        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1417        && isread) {
1418        return CP_ACCESS_OK;
1419    }
1420
1421    return pmreg_access(env, ri, isread);
1422}
1423
1424/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1425 * the current EL, security state, and register configuration.
1426 */
1427static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1428{
1429    uint64_t filter;
1430    bool e, p, u, nsk, nsu, nsh, m;
1431    bool enabled, prohibited, filtered;
1432    bool secure = arm_is_secure(env);
1433    int el = arm_current_el(env);
1434    uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1435
1436    if (!arm_feature(env, ARM_FEATURE_PMU)) {
1437        return false;
1438    }
1439
1440    if (!arm_feature(env, ARM_FEATURE_EL2) ||
1441            (counter < hpmn || counter == 31)) {
1442        e = env->cp15.c9_pmcr & PMCRE;
1443    } else {
1444        e = env->cp15.mdcr_el2 & MDCR_HPME;
1445    }
1446    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1447
1448    if (!secure) {
1449        if (el == 2 && (counter < hpmn || counter == 31)) {
1450            prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1451        } else {
1452            prohibited = false;
1453        }
1454    } else {
1455        prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1456           !(env->cp15.mdcr_el3 & MDCR_SPME);
1457    }
1458
1459    if (prohibited && counter == 31) {
1460        prohibited = env->cp15.c9_pmcr & PMCRDP;
1461    }
1462
1463    if (counter == 31) {
1464        filter = env->cp15.pmccfiltr_el0;
1465    } else {
1466        filter = env->cp15.c14_pmevtyper[counter];
1467    }
1468
1469    p   = filter & PMXEVTYPER_P;
1470    u   = filter & PMXEVTYPER_U;
1471    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1472    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1473    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1474    m   = arm_el_is_aa64(env, 1) &&
1475              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1476
1477    if (el == 0) {
1478        filtered = secure ? u : u != nsu;
1479    } else if (el == 1) {
1480        filtered = secure ? p : p != nsk;
1481    } else if (el == 2) {
1482        filtered = !nsh;
1483    } else { /* EL3 */
1484        filtered = m != p;
1485    }
1486
1487    if (counter != 31) {
1488        /*
1489         * If not checking PMCCNTR, ensure the counter is setup to an event we
1490         * support
1491         */
1492        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1493        if (!event_supported(event)) {
1494            return false;
1495        }
1496    }
1497
1498    return enabled && !prohibited && !filtered;
1499}
1500
1501static void pmu_update_irq(CPUARMState *env)
1502{
1503    ARMCPU *cpu = env_archcpu(env);
1504    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1505            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1506}
1507
1508/*
1509 * Ensure c15_ccnt is the guest-visible count so that operations such as
1510 * enabling/disabling the counter or filtering, modifying the count itself,
1511 * etc. can be done logically. This is essentially a no-op if the counter is
1512 * not enabled at the time of the call.
1513 */
1514static void pmccntr_op_start(CPUARMState *env)
1515{
1516    uint64_t cycles = cycles_get_count(env);
1517
1518    if (pmu_counter_enabled(env, 31)) {
1519        uint64_t eff_cycles = cycles;
1520        if (env->cp15.c9_pmcr & PMCRD) {
1521            /* Increment once every 64 processor clock cycles */
1522            eff_cycles /= 64;
1523        }
1524
1525        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1526
1527        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1528                                 1ull << 63 : 1ull << 31;
1529        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1530            env->cp15.c9_pmovsr |= (1 << 31);
1531            pmu_update_irq(env);
1532        }
1533
1534        env->cp15.c15_ccnt = new_pmccntr;
1535    }
1536    env->cp15.c15_ccnt_delta = cycles;
1537}
1538
1539/*
1540 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1541 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1542 * pmccntr_op_start.
1543 */
1544static void pmccntr_op_finish(CPUARMState *env)
1545{
1546    if (pmu_counter_enabled(env, 31)) {
1547#ifndef CONFIG_USER_ONLY
1548        /* Calculate when the counter will next overflow */
1549        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1550        if (!(env->cp15.c9_pmcr & PMCRLC)) {
1551            remaining_cycles = (uint32_t)remaining_cycles;
1552        }
1553        int64_t overflow_in = cycles_ns_per(remaining_cycles);
1554
1555        if (overflow_in > 0) {
1556            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1557                overflow_in;
1558            ARMCPU *cpu = env_archcpu(env);
1559            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1560        }
1561#endif
1562
1563        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1564        if (env->cp15.c9_pmcr & PMCRD) {
1565            /* Increment once every 64 processor clock cycles */
1566            prev_cycles /= 64;
1567        }
1568        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1569    }
1570}
1571
1572static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1573{
1574
1575    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1576    uint64_t count = 0;
1577    if (event_supported(event)) {
1578        uint16_t event_idx = supported_event_map[event];
1579        count = pm_events[event_idx].get_count(env);
1580    }
1581
1582    if (pmu_counter_enabled(env, counter)) {
1583        uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1584
1585        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1586            env->cp15.c9_pmovsr |= (1 << counter);
1587            pmu_update_irq(env);
1588        }
1589        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1590    }
1591    env->cp15.c14_pmevcntr_delta[counter] = count;
1592}
1593
1594static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1595{
1596    if (pmu_counter_enabled(env, counter)) {
1597#ifndef CONFIG_USER_ONLY
1598        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1599        uint16_t event_idx = supported_event_map[event];
1600        uint64_t delta = UINT32_MAX -
1601            (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1602        int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1603
1604        if (overflow_in > 0) {
1605            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1606                overflow_in;
1607            ARMCPU *cpu = env_archcpu(env);
1608            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1609        }
1610#endif
1611
1612        env->cp15.c14_pmevcntr_delta[counter] -=
1613            env->cp15.c14_pmevcntr[counter];
1614    }
1615}
1616
1617void pmu_op_start(CPUARMState *env)
1618{
1619    unsigned int i;
1620    pmccntr_op_start(env);
1621    for (i = 0; i < pmu_num_counters(env); i++) {
1622        pmevcntr_op_start(env, i);
1623    }
1624}
1625
1626void pmu_op_finish(CPUARMState *env)
1627{
1628    unsigned int i;
1629    pmccntr_op_finish(env);
1630    for (i = 0; i < pmu_num_counters(env); i++) {
1631        pmevcntr_op_finish(env, i);
1632    }
1633}
1634
1635void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1636{
1637    pmu_op_start(&cpu->env);
1638}
1639
1640void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1641{
1642    pmu_op_finish(&cpu->env);
1643}
1644
1645void arm_pmu_timer_cb(void *opaque)
1646{
1647    ARMCPU *cpu = opaque;
1648
1649    /*
1650     * Update all the counter values based on the current underlying counts,
1651     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1652     * has the effect of setting the cpu->pmu_timer to the next earliest time a
1653     * counter may expire.
1654     */
1655    pmu_op_start(&cpu->env);
1656    pmu_op_finish(&cpu->env);
1657}
1658
1659static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1660                       uint64_t value)
1661{
1662    pmu_op_start(env);
1663
1664    if (value & PMCRC) {
1665        /* The counter has been reset */
1666        env->cp15.c15_ccnt = 0;
1667    }
1668
1669    if (value & PMCRP) {
1670        unsigned int i;
1671        for (i = 0; i < pmu_num_counters(env); i++) {
1672            env->cp15.c14_pmevcntr[i] = 0;
1673        }
1674    }
1675
1676    env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK;
1677    env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK);
1678
1679    pmu_op_finish(env);
1680}
1681
1682static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1683                          uint64_t value)
1684{
1685    unsigned int i;
1686    for (i = 0; i < pmu_num_counters(env); i++) {
1687        /* Increment a counter's count iff: */
1688        if ((value & (1 << i)) && /* counter's bit is set */
1689                /* counter is enabled and not filtered */
1690                pmu_counter_enabled(env, i) &&
1691                /* counter is SW_INCR */
1692                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1693            pmevcntr_op_start(env, i);
1694
1695            /*
1696             * Detect if this write causes an overflow since we can't predict
1697             * PMSWINC overflows like we can for other events
1698             */
1699            uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1700
1701            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1702                env->cp15.c9_pmovsr |= (1 << i);
1703                pmu_update_irq(env);
1704            }
1705
1706            env->cp15.c14_pmevcntr[i] = new_pmswinc;
1707
1708            pmevcntr_op_finish(env, i);
1709        }
1710    }
1711}
1712
1713static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1714{
1715    uint64_t ret;
1716    pmccntr_op_start(env);
1717    ret = env->cp15.c15_ccnt;
1718    pmccntr_op_finish(env);
1719    return ret;
1720}
1721
1722static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1723                         uint64_t value)
1724{
1725    /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1726     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1727     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1728     * accessed.
1729     */
1730    env->cp15.c9_pmselr = value & 0x1f;
1731}
1732
1733static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1734                        uint64_t value)
1735{
1736    pmccntr_op_start(env);
1737    env->cp15.c15_ccnt = value;
1738    pmccntr_op_finish(env);
1739}
1740
1741static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1742                            uint64_t value)
1743{
1744    uint64_t cur_val = pmccntr_read(env, NULL);
1745
1746    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1747}
1748
1749static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1750                            uint64_t value)
1751{
1752    pmccntr_op_start(env);
1753    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1754    pmccntr_op_finish(env);
1755}
1756
1757static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1758                            uint64_t value)
1759{
1760    pmccntr_op_start(env);
1761    /* M is not accessible from AArch32 */
1762    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1763        (value & PMCCFILTR);
1764    pmccntr_op_finish(env);
1765}
1766
1767static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1768{
1769    /* M is not visible in AArch32 */
1770    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1771}
1772
1773static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1774                            uint64_t value)
1775{
1776    value &= pmu_counter_mask(env);
1777    env->cp15.c9_pmcnten |= value;
1778}
1779
1780static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1781                             uint64_t value)
1782{
1783    value &= pmu_counter_mask(env);
1784    env->cp15.c9_pmcnten &= ~value;
1785}
1786
1787static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1788                         uint64_t value)
1789{
1790    value &= pmu_counter_mask(env);
1791    env->cp15.c9_pmovsr &= ~value;
1792    pmu_update_irq(env);
1793}
1794
1795static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1796                         uint64_t value)
1797{
1798    value &= pmu_counter_mask(env);
1799    env->cp15.c9_pmovsr |= value;
1800    pmu_update_irq(env);
1801}
1802
1803static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1804                             uint64_t value, const uint8_t counter)
1805{
1806    if (counter == 31) {
1807        pmccfiltr_write(env, ri, value);
1808    } else if (counter < pmu_num_counters(env)) {
1809        pmevcntr_op_start(env, counter);
1810
1811        /*
1812         * If this counter's event type is changing, store the current
1813         * underlying count for the new type in c14_pmevcntr_delta[counter] so
1814         * pmevcntr_op_finish has the correct baseline when it converts back to
1815         * a delta.
1816         */
1817        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1818            PMXEVTYPER_EVTCOUNT;
1819        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1820        if (old_event != new_event) {
1821            uint64_t count = 0;
1822            if (event_supported(new_event)) {
1823                uint16_t event_idx = supported_event_map[new_event];
1824                count = pm_events[event_idx].get_count(env);
1825            }
1826            env->cp15.c14_pmevcntr_delta[counter] = count;
1827        }
1828
1829        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1830        pmevcntr_op_finish(env, counter);
1831    }
1832    /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1833     * PMSELR value is equal to or greater than the number of implemented
1834     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1835     */
1836}
1837
1838static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1839                               const uint8_t counter)
1840{
1841    if (counter == 31) {
1842        return env->cp15.pmccfiltr_el0;
1843    } else if (counter < pmu_num_counters(env)) {
1844        return env->cp15.c14_pmevtyper[counter];
1845    } else {
1846      /*
1847       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1848       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1849       */
1850        return 0;
1851    }
1852}
1853
1854static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1855                              uint64_t value)
1856{
1857    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1858    pmevtyper_write(env, ri, value, counter);
1859}
1860
1861static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1862                               uint64_t value)
1863{
1864    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1865    env->cp15.c14_pmevtyper[counter] = value;
1866
1867    /*
1868     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1869     * pmu_op_finish calls when loading saved state for a migration. Because
1870     * we're potentially updating the type of event here, the value written to
1871     * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1872     * different counter type. Therefore, we need to set this value to the
1873     * current count for the counter type we're writing so that pmu_op_finish
1874     * has the correct count for its calculation.
1875     */
1876    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1877    if (event_supported(event)) {
1878        uint16_t event_idx = supported_event_map[event];
1879        env->cp15.c14_pmevcntr_delta[counter] =
1880            pm_events[event_idx].get_count(env);
1881    }
1882}
1883
1884static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1885{
1886    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1887    return pmevtyper_read(env, ri, counter);
1888}
1889
1890static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1891                             uint64_t value)
1892{
1893    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1894}
1895
1896static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1897{
1898    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1899}
1900
1901static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1902                             uint64_t value, uint8_t counter)
1903{
1904    if (counter < pmu_num_counters(env)) {
1905        pmevcntr_op_start(env, counter);
1906        env->cp15.c14_pmevcntr[counter] = value;
1907        pmevcntr_op_finish(env, counter);
1908    }
1909    /*
1910     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1911     * are CONSTRAINED UNPREDICTABLE.
1912     */
1913}
1914
1915static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1916                              uint8_t counter)
1917{
1918    if (counter < pmu_num_counters(env)) {
1919        uint64_t ret;
1920        pmevcntr_op_start(env, counter);
1921        ret = env->cp15.c14_pmevcntr[counter];
1922        pmevcntr_op_finish(env, counter);
1923        return ret;
1924    } else {
1925      /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1926       * are CONSTRAINED UNPREDICTABLE. */
1927        return 0;
1928    }
1929}
1930
1931static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1932                             uint64_t value)
1933{
1934    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1935    pmevcntr_write(env, ri, value, counter);
1936}
1937
1938static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1939{
1940    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1941    return pmevcntr_read(env, ri, counter);
1942}
1943
1944static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1945                             uint64_t value)
1946{
1947    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1948    assert(counter < pmu_num_counters(env));
1949    env->cp15.c14_pmevcntr[counter] = value;
1950    pmevcntr_write(env, ri, value, counter);
1951}
1952
1953static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1954{
1955    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1956    assert(counter < pmu_num_counters(env));
1957    return env->cp15.c14_pmevcntr[counter];
1958}
1959
1960static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1961                             uint64_t value)
1962{
1963    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1964}
1965
1966static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1967{
1968    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1969}
1970
1971static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1972                            uint64_t value)
1973{
1974    if (arm_feature(env, ARM_FEATURE_V8)) {
1975        env->cp15.c9_pmuserenr = value & 0xf;
1976    } else {
1977        env->cp15.c9_pmuserenr = value & 1;
1978    }
1979}
1980
1981static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1982                             uint64_t value)
1983{
1984    /* We have no event counters so only the C bit can be changed */
1985    value &= pmu_counter_mask(env);
1986    env->cp15.c9_pminten |= value;
1987    pmu_update_irq(env);
1988}
1989
1990static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1991                             uint64_t value)
1992{
1993    value &= pmu_counter_mask(env);
1994    env->cp15.c9_pminten &= ~value;
1995    pmu_update_irq(env);
1996}
1997
1998static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1999                       uint64_t value)
2000{
2001    /* Note that even though the AArch64 view of this register has bits
2002     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
2003     * architectural requirements for bits which are RES0 only in some
2004     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
2005     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
2006     */
2007    raw_write(env, ri, value & ~0x1FULL);
2008}
2009
2010static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2011{
2012    /* Begin with base v8.0 state.  */
2013    uint32_t valid_mask = 0x3fff;
2014    ARMCPU *cpu = env_archcpu(env);
2015
2016    if (ri->state == ARM_CP_STATE_AA64) {
2017        value |= SCR_FW | SCR_AW;   /* these two bits are RES1.  */
2018        valid_mask &= ~SCR_NET;
2019
2020        if (cpu_isar_feature(aa64_lor, cpu)) {
2021            valid_mask |= SCR_TLOR;
2022        }
2023        if (cpu_isar_feature(aa64_pauth, cpu)) {
2024            valid_mask |= SCR_API | SCR_APK;
2025        }
2026        if (cpu_isar_feature(aa64_mte, cpu)) {
2027            valid_mask |= SCR_ATA;
2028        }
2029    } else {
2030        valid_mask &= ~(SCR_RW | SCR_ST);
2031    }
2032
2033    if (!arm_feature(env, ARM_FEATURE_EL2)) {
2034        valid_mask &= ~SCR_HCE;
2035
2036        /* On ARMv7, SMD (or SCD as it is called in v7) is only
2037         * supported if EL2 exists. The bit is UNK/SBZP when
2038         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
2039         * when EL2 is unavailable.
2040         * On ARMv8, this bit is always available.
2041         */
2042        if (arm_feature(env, ARM_FEATURE_V7) &&
2043            !arm_feature(env, ARM_FEATURE_V8)) {
2044            valid_mask &= ~SCR_SMD;
2045        }
2046    }
2047
2048    /* Clear all-context RES0 bits.  */
2049    value &= valid_mask;
2050    raw_write(env, ri, value);
2051}
2052
2053static CPAccessResult access_aa64_tid2(CPUARMState *env,
2054                                       const ARMCPRegInfo *ri,
2055                                       bool isread)
2056{
2057    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) {
2058        return CP_ACCESS_TRAP_EL2;
2059    }
2060
2061    return CP_ACCESS_OK;
2062}
2063
2064static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2065{
2066    ARMCPU *cpu = env_archcpu(env);
2067
2068    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
2069     * bank
2070     */
2071    uint32_t index = A32_BANKED_REG_GET(env, csselr,
2072                                        ri->secure & ARM_CP_SECSTATE_S);
2073
2074    return cpu->ccsidr[index];
2075}
2076
2077static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2078                         uint64_t value)
2079{
2080    raw_write(env, ri, value & 0xf);
2081}
2082
2083static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
2084{
2085    CPUState *cs = env_cpu(env);
2086    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
2087    uint64_t ret = 0;
2088    bool allow_virt = (arm_current_el(env) == 1 &&
2089                       (!arm_is_secure_below_el3(env) ||
2090                        (env->cp15.scr_el3 & SCR_EEL2)));
2091
2092    if (allow_virt && (hcr_el2 & HCR_IMO)) {
2093        if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
2094            ret |= CPSR_I;
2095        }
2096    } else {
2097        if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
2098            ret |= CPSR_I;
2099        }
2100    }
2101
2102    if (allow_virt && (hcr_el2 & HCR_FMO)) {
2103        if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
2104            ret |= CPSR_F;
2105        }
2106    } else {
2107        if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
2108            ret |= CPSR_F;
2109        }
2110    }
2111
2112    /* External aborts are not possible in QEMU so A bit is always clear */
2113    return ret;
2114}
2115
2116static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2117                                       bool isread)
2118{
2119    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) {
2120        return CP_ACCESS_TRAP_EL2;
2121    }
2122
2123    return CP_ACCESS_OK;
2124}
2125
2126static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri,
2127                                       bool isread)
2128{
2129    if (arm_feature(env, ARM_FEATURE_V8)) {
2130        return access_aa64_tid1(env, ri, isread);
2131    }
2132
2133    return CP_ACCESS_OK;
2134}
2135
2136static const ARMCPRegInfo v7_cp_reginfo[] = {
2137    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
2138    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
2139      .access = PL1_W, .type = ARM_CP_NOP },
2140    /* Performance monitors are implementation defined in v7,
2141     * but with an ARM recommended set of registers, which we
2142     * follow.
2143     *
2144     * Performance registers fall into three categories:
2145     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
2146     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
2147     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
2148     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
2149     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
2150     */
2151    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
2152      .access = PL0_RW, .type = ARM_CP_ALIAS,
2153      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2154      .writefn = pmcntenset_write,
2155      .accessfn = pmreg_access,
2156      .raw_writefn = raw_write },
2157    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
2158      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
2159      .access = PL0_RW, .accessfn = pmreg_access,
2160      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
2161      .writefn = pmcntenset_write, .raw_writefn = raw_write },
2162    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
2163      .access = PL0_RW,
2164      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
2165      .accessfn = pmreg_access,
2166      .writefn = pmcntenclr_write,
2167      .type = ARM_CP_ALIAS },
2168    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
2169      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
2170      .access = PL0_RW, .accessfn = pmreg_access,
2171      .type = ARM_CP_ALIAS,
2172      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
2173      .writefn = pmcntenclr_write },
2174    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
2175      .access = PL0_RW, .type = ARM_CP_IO,
2176      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2177      .accessfn = pmreg_access,
2178      .writefn = pmovsr_write,
2179      .raw_writefn = raw_write },
2180    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2181      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2182      .access = PL0_RW, .accessfn = pmreg_access,
2183      .type = ARM_CP_ALIAS | ARM_CP_IO,
2184      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2185      .writefn = pmovsr_write,
2186      .raw_writefn = raw_write },
2187    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2188      .access = PL0_W, .accessfn = pmreg_access_swinc,
2189      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2190      .writefn = pmswinc_write },
2191    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2192      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2193      .access = PL0_W, .accessfn = pmreg_access_swinc,
2194      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2195      .writefn = pmswinc_write },
2196    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2197      .access = PL0_RW, .type = ARM_CP_ALIAS,
2198      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2199      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2200      .raw_writefn = raw_write},
2201    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2202      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2203      .access = PL0_RW, .accessfn = pmreg_access_selr,
2204      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2205      .writefn = pmselr_write, .raw_writefn = raw_write, },
2206    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2207      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2208      .readfn = pmccntr_read, .writefn = pmccntr_write32,
2209      .accessfn = pmreg_access_ccntr },
2210    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2211      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2212      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2213      .type = ARM_CP_IO,
2214      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2215      .readfn = pmccntr_read, .writefn = pmccntr_write,
2216      .raw_readfn = raw_read, .raw_writefn = raw_write, },
2217    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2218      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2219      .access = PL0_RW, .accessfn = pmreg_access,
2220      .type = ARM_CP_ALIAS | ARM_CP_IO,
2221      .resetvalue = 0, },
2222    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2223      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2224      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2225      .access = PL0_RW, .accessfn = pmreg_access,
2226      .type = ARM_CP_IO,
2227      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2228      .resetvalue = 0, },
2229    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2230      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2231      .accessfn = pmreg_access,
2232      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2233    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2234      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2235      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2236      .accessfn = pmreg_access,
2237      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2238    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2239      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2240      .accessfn = pmreg_access_xevcntr,
2241      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2242    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2243      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2244      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2245      .accessfn = pmreg_access_xevcntr,
2246      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2247    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2248      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2249      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2250      .resetvalue = 0,
2251      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2252    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2253      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2254      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2255      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2256      .resetvalue = 0,
2257      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2258    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2259      .access = PL1_RW, .accessfn = access_tpm,
2260      .type = ARM_CP_ALIAS | ARM_CP_IO,
2261      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2262      .resetvalue = 0,
2263      .writefn = pmintenset_write, .raw_writefn = raw_write },
2264    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2265      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2266      .access = PL1_RW, .accessfn = access_tpm,
2267      .type = ARM_CP_IO,
2268      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2269      .writefn = pmintenset_write, .raw_writefn = raw_write,
2270      .resetvalue = 0x0 },
2271    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2272      .access = PL1_RW, .accessfn = access_tpm,
2273      .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2274      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2275      .writefn = pmintenclr_write, },
2276    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2277      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2278      .access = PL1_RW, .accessfn = access_tpm,
2279      .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
2280      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2281      .writefn = pmintenclr_write },
2282    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2283      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2284      .access = PL1_R,
2285      .accessfn = access_aa64_tid2,
2286      .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2287    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2288      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2289      .access = PL1_RW,
2290      .accessfn = access_aa64_tid2,
2291      .writefn = csselr_write, .resetvalue = 0,
2292      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2293                             offsetof(CPUARMState, cp15.csselr_ns) } },
2294    /* Auxiliary ID register: this actually has an IMPDEF value but for now
2295     * just RAZ for all cores:
2296     */
2297    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2298      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2299      .access = PL1_R, .type = ARM_CP_CONST,
2300      .accessfn = access_aa64_tid1,
2301      .resetvalue = 0 },
2302    /* Auxiliary fault status registers: these also are IMPDEF, and we
2303     * choose to RAZ/WI for all cores.
2304     */
2305    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2306      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2307      .access = PL1_RW, .accessfn = access_tvm_trvm,
2308      .type = ARM_CP_CONST, .resetvalue = 0 },
2309    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2310      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2311      .access = PL1_RW, .accessfn = access_tvm_trvm,
2312      .type = ARM_CP_CONST, .resetvalue = 0 },
2313    /* MAIR can just read-as-written because we don't implement caches
2314     * and so don't need to care about memory attributes.
2315     */
2316    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2317      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2318      .access = PL1_RW, .accessfn = access_tvm_trvm,
2319      .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2320      .resetvalue = 0 },
2321    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2322      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2323      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2324      .resetvalue = 0 },
2325    /* For non-long-descriptor page tables these are PRRR and NMRR;
2326     * regardless they still act as reads-as-written for QEMU.
2327     */
2328     /* MAIR0/1 are defined separately from their 64-bit counterpart which
2329      * allows them to assign the correct fieldoffset based on the endianness
2330      * handled in the field definitions.
2331      */
2332    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2333      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2334      .access = PL1_RW, .accessfn = access_tvm_trvm,
2335      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2336                             offsetof(CPUARMState, cp15.mair0_ns) },
2337      .resetfn = arm_cp_reset_ignore },
2338    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2339      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1,
2340      .access = PL1_RW, .accessfn = access_tvm_trvm,
2341      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2342                             offsetof(CPUARMState, cp15.mair1_ns) },
2343      .resetfn = arm_cp_reset_ignore },
2344    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2345      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2346      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2347    /* 32 bit ITLB invalidates */
2348    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2349      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2350      .writefn = tlbiall_write },
2351    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2352      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2353      .writefn = tlbimva_write },
2354    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2355      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2356      .writefn = tlbiasid_write },
2357    /* 32 bit DTLB invalidates */
2358    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2359      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2360      .writefn = tlbiall_write },
2361    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2362      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2363      .writefn = tlbimva_write },
2364    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2365      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2366      .writefn = tlbiasid_write },
2367    /* 32 bit TLB invalidates */
2368    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2369      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2370      .writefn = tlbiall_write },
2371    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2372      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2373      .writefn = tlbimva_write },
2374    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2375      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2376      .writefn = tlbiasid_write },
2377    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2378      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2379      .writefn = tlbimvaa_write },
2380    REGINFO_SENTINEL
2381};
2382
2383static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2384    /* 32 bit TLB invalidates, Inner Shareable */
2385    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2386      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2387      .writefn = tlbiall_is_write },
2388    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2389      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2390      .writefn = tlbimva_is_write },
2391    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2392      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2393      .writefn = tlbiasid_is_write },
2394    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2395      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
2396      .writefn = tlbimvaa_is_write },
2397    REGINFO_SENTINEL
2398};
2399
2400static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2401    /* PMOVSSET is not implemented in v7 before v7ve */
2402    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2403      .access = PL0_RW, .accessfn = pmreg_access,
2404      .type = ARM_CP_ALIAS | ARM_CP_IO,
2405      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2406      .writefn = pmovsset_write,
2407      .raw_writefn = raw_write },
2408    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2409      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2410      .access = PL0_RW, .accessfn = pmreg_access,
2411      .type = ARM_CP_ALIAS | ARM_CP_IO,
2412      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2413      .writefn = pmovsset_write,
2414      .raw_writefn = raw_write },
2415    REGINFO_SENTINEL
2416};
2417
2418static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2419                        uint64_t value)
2420{
2421    value &= 1;
2422    env->teecr = value;
2423}
2424
2425static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2426                                    bool isread)
2427{
2428    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2429        return CP_ACCESS_TRAP;
2430    }
2431    return CP_ACCESS_OK;
2432}
2433
2434static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2435    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2436      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2437      .resetvalue = 0,
2438      .writefn = teecr_write },
2439    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2440      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2441      .accessfn = teehbr_access, .resetvalue = 0 },
2442    REGINFO_SENTINEL
2443};
2444
2445static const ARMCPRegInfo v6k_cp_reginfo[] = {
2446    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2447      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2448      .access = PL0_RW,
2449      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2450    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2451      .access = PL0_RW,
2452      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2453                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2454      .resetfn = arm_cp_reset_ignore },
2455    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2456      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2457      .access = PL0_R|PL1_W,
2458      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2459      .resetvalue = 0},
2460    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2461      .access = PL0_R|PL1_W,
2462      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2463                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2464      .resetfn = arm_cp_reset_ignore },
2465    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2466      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2467      .access = PL1_RW,
2468      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2469    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2470      .access = PL1_RW,
2471      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2472                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2473      .resetvalue = 0 },
2474    REGINFO_SENTINEL
2475};
2476
2477#ifndef CONFIG_USER_ONLY
2478
2479static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2480                                       bool isread)
2481{
2482    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2483     * Writable only at the highest implemented exception level.
2484     */
2485    int el = arm_current_el(env);
2486    uint64_t hcr;
2487    uint32_t cntkctl;
2488
2489    switch (el) {
2490    case 0:
2491        hcr = arm_hcr_el2_eff(env);
2492        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2493            cntkctl = env->cp15.cnthctl_el2;
2494        } else {
2495            cntkctl = env->cp15.c14_cntkctl;
2496        }
2497        if (!extract32(cntkctl, 0, 2)) {
2498            return CP_ACCESS_TRAP;
2499        }
2500        break;
2501    case 1:
2502        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2503            arm_is_secure_below_el3(env)) {
2504            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2505            return CP_ACCESS_TRAP_UNCATEGORIZED;
2506        }
2507        break;
2508    case 2:
2509    case 3:
2510        break;
2511    }
2512
2513    if (!isread && el < arm_highest_el(env)) {
2514        return CP_ACCESS_TRAP_UNCATEGORIZED;
2515    }
2516
2517    return CP_ACCESS_OK;
2518}
2519
2520static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2521                                        bool isread)
2522{
2523    unsigned int cur_el = arm_current_el(env);
2524    bool secure = arm_is_secure(env);
2525    uint64_t hcr = arm_hcr_el2_eff(env);
2526
2527    switch (cur_el) {
2528    case 0:
2529        /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */
2530        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2531            return (extract32(env->cp15.cnthctl_el2, timeridx, 1)
2532                    ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2533        }
2534
2535        /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */
2536        if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2537            return CP_ACCESS_TRAP;
2538        }
2539
2540        /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */
2541        if (hcr & HCR_E2H) {
2542            if (timeridx == GTIMER_PHYS &&
2543                !extract32(env->cp15.cnthctl_el2, 10, 1)) {
2544                return CP_ACCESS_TRAP_EL2;
2545            }
2546        } else {
2547            /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2548            if (arm_feature(env, ARM_FEATURE_EL2) &&
2549                timeridx == GTIMER_PHYS && !secure &&
2550                !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2551                return CP_ACCESS_TRAP_EL2;
2552            }
2553        }
2554        break;
2555
2556    case 1:
2557        /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */
2558        if (arm_feature(env, ARM_FEATURE_EL2) &&
2559            timeridx == GTIMER_PHYS && !secure &&
2560            (hcr & HCR_E2H
2561             ? !extract32(env->cp15.cnthctl_el2, 10, 1)
2562             : !extract32(env->cp15.cnthctl_el2, 0, 1))) {
2563            return CP_ACCESS_TRAP_EL2;
2564        }
2565        break;
2566    }
2567    return CP_ACCESS_OK;
2568}
2569
2570static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2571                                      bool isread)
2572{
2573    unsigned int cur_el = arm_current_el(env);
2574    bool secure = arm_is_secure(env);
2575    uint64_t hcr = arm_hcr_el2_eff(env);
2576
2577    switch (cur_el) {
2578    case 0:
2579        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2580            /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */
2581            return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1)
2582                    ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2);
2583        }
2584
2585        /*
2586         * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from
2587         * EL0 if EL0[PV]TEN is zero.
2588         */
2589        if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2590            return CP_ACCESS_TRAP;
2591        }
2592        /* fall through */
2593
2594    case 1:
2595        if (arm_feature(env, ARM_FEATURE_EL2) &&
2596            timeridx == GTIMER_PHYS && !secure) {
2597            if (hcr & HCR_E2H) {
2598                /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */
2599                if (!extract32(env->cp15.cnthctl_el2, 11, 1)) {
2600                    return CP_ACCESS_TRAP_EL2;
2601                }
2602            } else {
2603                /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */
2604                if (!extract32(env->cp15.cnthctl_el2, 1, 1)) {
2605                    return CP_ACCESS_TRAP_EL2;
2606                }
2607            }
2608        }
2609        break;
2610    }
2611    return CP_ACCESS_OK;
2612}
2613
2614static CPAccessResult gt_pct_access(CPUARMState *env,
2615                                    const ARMCPRegInfo *ri,
2616                                    bool isread)
2617{
2618    return gt_counter_access(env, GTIMER_PHYS, isread);
2619}
2620
2621static CPAccessResult gt_vct_access(CPUARMState *env,
2622                                    const ARMCPRegInfo *ri,
2623                                    bool isread)
2624{
2625    return gt_counter_access(env, GTIMER_VIRT, isread);
2626}
2627
2628static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2629                                       bool isread)
2630{
2631    return gt_timer_access(env, GTIMER_PHYS, isread);
2632}
2633
2634static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2635                                       bool isread)
2636{
2637    return gt_timer_access(env, GTIMER_VIRT, isread);
2638}
2639
2640static CPAccessResult gt_stimer_access(CPUARMState *env,
2641                                       const ARMCPRegInfo *ri,
2642                                       bool isread)
2643{
2644    /* The AArch64 register view of the secure physical timer is
2645     * always accessible from EL3, and configurably accessible from
2646     * Secure EL1.
2647     */
2648    switch (arm_current_el(env)) {
2649    case 1:
2650        if (!arm_is_secure(env)) {
2651            return CP_ACCESS_TRAP;
2652        }
2653        if (!(env->cp15.scr_el3 & SCR_ST)) {
2654            return CP_ACCESS_TRAP_EL3;
2655        }
2656        return CP_ACCESS_OK;
2657    case 0:
2658    case 2:
2659        return CP_ACCESS_TRAP;
2660    case 3:
2661        return CP_ACCESS_OK;
2662    default:
2663        g_assert_not_reached();
2664    }
2665}
2666
2667static uint64_t gt_get_countervalue(CPUARMState *env)
2668{
2669    ARMCPU *cpu = env_archcpu(env);
2670
2671    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu);
2672}
2673
2674static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2675{
2676    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2677
2678    if (gt->ctl & 1) {
2679        /* Timer enabled: calculate and set current ISTATUS, irq, and
2680         * reset timer to when ISTATUS next has to change
2681         */
2682        uint64_t offset = timeridx == GTIMER_VIRT ?
2683                                      cpu->env.cp15.cntvoff_el2 : 0;
2684        uint64_t count = gt_get_countervalue(&cpu->env);
2685        /* Note that this must be unsigned 64 bit arithmetic: */
2686        int istatus = count - offset >= gt->cval;
2687        uint64_t nexttick;
2688        int irqstate;
2689
2690        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2691
2692        irqstate = (istatus && !(gt->ctl & 2));
2693        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2694
2695        if (istatus) {
2696            /* Next transition is when count rolls back over to zero */
2697            nexttick = UINT64_MAX;
2698        } else {
2699            /* Next transition is when we hit cval */
2700            nexttick = gt->cval + offset;
2701        }
2702        /* Note that the desired next expiry time might be beyond the
2703         * signed-64-bit range of a QEMUTimer -- in this case we just
2704         * set the timer for as far in the future as possible. When the
2705         * timer expires we will reset the timer for any remaining period.
2706         */
2707        if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
2708            timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX);
2709        } else {
2710            timer_mod(cpu->gt_timer[timeridx], nexttick);
2711        }
2712        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2713    } else {
2714        /* Timer disabled: ISTATUS and timer output always clear */
2715        gt->ctl &= ~4;
2716        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2717        timer_del(cpu->gt_timer[timeridx]);
2718        trace_arm_gt_recalc_disabled(timeridx);
2719    }
2720}
2721
2722static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2723                           int timeridx)
2724{
2725    ARMCPU *cpu = env_archcpu(env);
2726
2727    timer_del(cpu->gt_timer[timeridx]);
2728}
2729
2730static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2731{
2732    return gt_get_countervalue(env);
2733}
2734
2735static uint64_t gt_virt_cnt_offset(CPUARMState *env)
2736{
2737    uint64_t hcr;
2738
2739    switch (arm_current_el(env)) {
2740    case 2:
2741        hcr = arm_hcr_el2_eff(env);
2742        if (hcr & HCR_E2H) {
2743            return 0;
2744        }
2745        break;
2746    case 0:
2747        hcr = arm_hcr_el2_eff(env);
2748        if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
2749            return 0;
2750        }
2751        break;
2752    }
2753
2754    return env->cp15.cntvoff_el2;
2755}
2756
2757static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2758{
2759    return gt_get_countervalue(env) - gt_virt_cnt_offset(env);
2760}
2761
2762static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2763                          int timeridx,
2764                          uint64_t value)
2765{
2766    trace_arm_gt_cval_write(timeridx, value);
2767    env->cp15.c14_timer[timeridx].cval = value;
2768    gt_recalc_timer(env_archcpu(env), timeridx);
2769}
2770
2771static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2772                             int timeridx)
2773{
2774    uint64_t offset = 0;
2775
2776    switch (timeridx) {
2777    case GTIMER_VIRT:
2778    case GTIMER_HYPVIRT:
2779        offset = gt_virt_cnt_offset(env);
2780        break;
2781    }
2782
2783    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2784                      (gt_get_countervalue(env) - offset));
2785}
2786
2787static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2788                          int timeridx,
2789                          uint64_t value)
2790{
2791    uint64_t offset = 0;
2792
2793    switch (timeridx) {
2794    case GTIMER_VIRT:
2795    case GTIMER_HYPVIRT:
2796        offset = gt_virt_cnt_offset(env);
2797        break;
2798    }
2799
2800    trace_arm_gt_tval_write(timeridx, value);
2801    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2802                                         sextract64(value, 0, 32);
2803    gt_recalc_timer(env_archcpu(env), timeridx);
2804}
2805
2806static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2807                         int timeridx,
2808                         uint64_t value)
2809{
2810    ARMCPU *cpu = env_archcpu(env);
2811    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2812
2813    trace_arm_gt_ctl_write(timeridx, value);
2814    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2815    if ((oldval ^ value) & 1) {
2816        /* Enable toggled */
2817        gt_recalc_timer(cpu, timeridx);
2818    } else if ((oldval ^ value) & 2) {
2819        /* IMASK toggled: don't need to recalculate,
2820         * just set the interrupt line based on ISTATUS
2821         */
2822        int irqstate = (oldval & 4) && !(value & 2);
2823
2824        trace_arm_gt_imask_toggle(timeridx, irqstate);
2825        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2826    }
2827}
2828
2829static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2830{
2831    gt_timer_reset(env, ri, GTIMER_PHYS);
2832}
2833
2834static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2835                               uint64_t value)
2836{
2837    gt_cval_write(env, ri, GTIMER_PHYS, value);
2838}
2839
2840static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2841{
2842    return gt_tval_read(env, ri, GTIMER_PHYS);
2843}
2844
2845static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2846                               uint64_t value)
2847{
2848    gt_tval_write(env, ri, GTIMER_PHYS, value);
2849}
2850
2851static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2852                              uint64_t value)
2853{
2854    gt_ctl_write(env, ri, GTIMER_PHYS, value);
2855}
2856
2857static int gt_phys_redir_timeridx(CPUARMState *env)
2858{
2859    switch (arm_mmu_idx(env)) {
2860    case ARMMMUIdx_E20_0:
2861    case ARMMMUIdx_E20_2:
2862    case ARMMMUIdx_E20_2_PAN:
2863        return GTIMER_HYP;
2864    default:
2865        return GTIMER_PHYS;
2866    }
2867}
2868
2869static int gt_virt_redir_timeridx(CPUARMState *env)
2870{
2871    switch (arm_mmu_idx(env)) {
2872    case ARMMMUIdx_E20_0:
2873    case ARMMMUIdx_E20_2:
2874    case ARMMMUIdx_E20_2_PAN:
2875        return GTIMER_HYPVIRT;
2876    default:
2877        return GTIMER_VIRT;
2878    }
2879}
2880
2881static uint64_t gt_phys_redir_cval_read(CPUARMState *env,
2882                                        const ARMCPRegInfo *ri)
2883{
2884    int timeridx = gt_phys_redir_timeridx(env);
2885    return env->cp15.c14_timer[timeridx].cval;
2886}
2887
2888static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2889                                     uint64_t value)
2890{
2891    int timeridx = gt_phys_redir_timeridx(env);
2892    gt_cval_write(env, ri, timeridx, value);
2893}
2894
2895static uint64_t gt_phys_redir_tval_read(CPUARMState *env,
2896                                        const ARMCPRegInfo *ri)
2897{
2898    int timeridx = gt_phys_redir_timeridx(env);
2899    return gt_tval_read(env, ri, timeridx);
2900}
2901
2902static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2903                                     uint64_t value)
2904{
2905    int timeridx = gt_phys_redir_timeridx(env);
2906    gt_tval_write(env, ri, timeridx, value);
2907}
2908
2909static uint64_t gt_phys_redir_ctl_read(CPUARMState *env,
2910                                       const ARMCPRegInfo *ri)
2911{
2912    int timeridx = gt_phys_redir_timeridx(env);
2913    return env->cp15.c14_timer[timeridx].ctl;
2914}
2915
2916static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2917                                    uint64_t value)
2918{
2919    int timeridx = gt_phys_redir_timeridx(env);
2920    gt_ctl_write(env, ri, timeridx, value);
2921}
2922
2923static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2924{
2925    gt_timer_reset(env, ri, GTIMER_VIRT);
2926}
2927
2928static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2929                               uint64_t value)
2930{
2931    gt_cval_write(env, ri, GTIMER_VIRT, value);
2932}
2933
2934static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2935{
2936    return gt_tval_read(env, ri, GTIMER_VIRT);
2937}
2938
2939static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2940                               uint64_t value)
2941{
2942    gt_tval_write(env, ri, GTIMER_VIRT, value);
2943}
2944
2945static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2946                              uint64_t value)
2947{
2948    gt_ctl_write(env, ri, GTIMER_VIRT, value);
2949}
2950
2951static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2952                              uint64_t value)
2953{
2954    ARMCPU *cpu = env_archcpu(env);
2955
2956    trace_arm_gt_cntvoff_write(value);
2957    raw_write(env, ri, value);
2958    gt_recalc_timer(cpu, GTIMER_VIRT);
2959}
2960
2961static uint64_t gt_virt_redir_cval_read(CPUARMState *env,
2962                                        const ARMCPRegInfo *ri)
2963{
2964    int timeridx = gt_virt_redir_timeridx(env);
2965    return env->cp15.c14_timer[timeridx].cval;
2966}
2967
2968static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2969                                     uint64_t value)
2970{
2971    int timeridx = gt_virt_redir_timeridx(env);
2972    gt_cval_write(env, ri, timeridx, value);
2973}
2974
2975static uint64_t gt_virt_redir_tval_read(CPUARMState *env,
2976                                        const ARMCPRegInfo *ri)
2977{
2978    int timeridx = gt_virt_redir_timeridx(env);
2979    return gt_tval_read(env, ri, timeridx);
2980}
2981
2982static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2983                                     uint64_t value)
2984{
2985    int timeridx = gt_virt_redir_timeridx(env);
2986    gt_tval_write(env, ri, timeridx, value);
2987}
2988
2989static uint64_t gt_virt_redir_ctl_read(CPUARMState *env,
2990                                       const ARMCPRegInfo *ri)
2991{
2992    int timeridx = gt_virt_redir_timeridx(env);
2993    return env->cp15.c14_timer[timeridx].ctl;
2994}
2995
2996static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2997                                    uint64_t value)
2998{
2999    int timeridx = gt_virt_redir_timeridx(env);
3000    gt_ctl_write(env, ri, timeridx, value);
3001}
3002
3003static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3004{
3005    gt_timer_reset(env, ri, GTIMER_HYP);
3006}
3007
3008static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3009                              uint64_t value)
3010{
3011    gt_cval_write(env, ri, GTIMER_HYP, value);
3012}
3013
3014static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3015{
3016    return gt_tval_read(env, ri, GTIMER_HYP);
3017}
3018
3019static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3020                              uint64_t value)
3021{
3022    gt_tval_write(env, ri, GTIMER_HYP, value);
3023}
3024
3025static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3026                              uint64_t value)
3027{
3028    gt_ctl_write(env, ri, GTIMER_HYP, value);
3029}
3030
3031static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3032{
3033    gt_timer_reset(env, ri, GTIMER_SEC);
3034}
3035
3036static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3037                              uint64_t value)
3038{
3039    gt_cval_write(env, ri, GTIMER_SEC, value);
3040}
3041
3042static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3043{
3044    return gt_tval_read(env, ri, GTIMER_SEC);
3045}
3046
3047static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3048                              uint64_t value)
3049{
3050    gt_tval_write(env, ri, GTIMER_SEC, value);
3051}
3052
3053static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3054                              uint64_t value)
3055{
3056    gt_ctl_write(env, ri, GTIMER_SEC, value);
3057}
3058
3059static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3060{
3061    gt_timer_reset(env, ri, GTIMER_HYPVIRT);
3062}
3063
3064static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3065                             uint64_t value)
3066{
3067    gt_cval_write(env, ri, GTIMER_HYPVIRT, value);
3068}
3069
3070static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
3071{
3072    return gt_tval_read(env, ri, GTIMER_HYPVIRT);
3073}
3074
3075static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
3076                             uint64_t value)
3077{
3078    gt_tval_write(env, ri, GTIMER_HYPVIRT, value);
3079}
3080
3081static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082                            uint64_t value)
3083{
3084    gt_ctl_write(env, ri, GTIMER_HYPVIRT, value);
3085}
3086
3087void arm_gt_ptimer_cb(void *opaque)
3088{
3089    ARMCPU *cpu = opaque;
3090
3091    gt_recalc_timer(cpu, GTIMER_PHYS);
3092}
3093
3094void arm_gt_vtimer_cb(void *opaque)
3095{
3096    ARMCPU *cpu = opaque;
3097
3098    gt_recalc_timer(cpu, GTIMER_VIRT);
3099}
3100
3101void arm_gt_htimer_cb(void *opaque)
3102{
3103    ARMCPU *cpu = opaque;
3104
3105    gt_recalc_timer(cpu, GTIMER_HYP);
3106}
3107
3108void arm_gt_stimer_cb(void *opaque)
3109{
3110    ARMCPU *cpu = opaque;
3111
3112    gt_recalc_timer(cpu, GTIMER_SEC);
3113}
3114
3115void arm_gt_hvtimer_cb(void *opaque)
3116{
3117    ARMCPU *cpu = opaque;
3118
3119    gt_recalc_timer(cpu, GTIMER_HYPVIRT);
3120}
3121
3122static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
3123{
3124    ARMCPU *cpu = env_archcpu(env);
3125
3126    cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz;
3127}
3128
3129static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3130    /* Note that CNTFRQ is purely reads-as-written for the benefit
3131     * of software; writing it doesn't actually change the timer frequency.
3132     * Our reset value matches the fixed frequency we implement the timer at.
3133     */
3134    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
3135      .type = ARM_CP_ALIAS,
3136      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3137      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
3138    },
3139    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3140      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3141      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
3142      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3143      .resetfn = arm_gt_cntfrq_reset,
3144    },
3145    /* overall control: mostly access permissions */
3146    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
3147      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
3148      .access = PL1_RW,
3149      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
3150      .resetvalue = 0,
3151    },
3152    /* per-timer control */
3153    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3154      .secure = ARM_CP_SECSTATE_NS,
3155      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3156      .accessfn = gt_ptimer_access,
3157      .fieldoffset = offsetoflow32(CPUARMState,
3158                                   cp15.c14_timer[GTIMER_PHYS].ctl),
3159      .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3160      .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3161    },
3162    { .name = "CNTP_CTL_S",
3163      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
3164      .secure = ARM_CP_SECSTATE_S,
3165      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3166      .accessfn = gt_ptimer_access,
3167      .fieldoffset = offsetoflow32(CPUARMState,
3168                                   cp15.c14_timer[GTIMER_SEC].ctl),
3169      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3170    },
3171    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
3172      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
3173      .type = ARM_CP_IO, .access = PL0_RW,
3174      .accessfn = gt_ptimer_access,
3175      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
3176      .resetvalue = 0,
3177      .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read,
3178      .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write,
3179    },
3180    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
3181      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
3182      .accessfn = gt_vtimer_access,
3183      .fieldoffset = offsetoflow32(CPUARMState,
3184                                   cp15.c14_timer[GTIMER_VIRT].ctl),
3185      .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3186      .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3187    },
3188    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
3189      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
3190      .type = ARM_CP_IO, .access = PL0_RW,
3191      .accessfn = gt_vtimer_access,
3192      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
3193      .resetvalue = 0,
3194      .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read,
3195      .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write,
3196    },
3197    /* TimerValue views: a 32 bit downcounting view of the underlying state */
3198    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3199      .secure = ARM_CP_SECSTATE_NS,
3200      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3201      .accessfn = gt_ptimer_access,
3202      .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3203    },
3204    { .name = "CNTP_TVAL_S",
3205      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
3206      .secure = ARM_CP_SECSTATE_S,
3207      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3208      .accessfn = gt_ptimer_access,
3209      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
3210    },
3211    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3212      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
3213      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3214      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
3215      .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write,
3216    },
3217    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
3218      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3219      .accessfn = gt_vtimer_access,
3220      .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3221    },
3222    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
3223      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
3224      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
3225      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
3226      .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write,
3227    },
3228    /* The counter itself */
3229    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
3230      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3231      .accessfn = gt_pct_access,
3232      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
3233    },
3234    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
3235      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
3236      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3237      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
3238    },
3239    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
3240      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
3241      .accessfn = gt_vct_access,
3242      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
3243    },
3244    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3245      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3246      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3247      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
3248    },
3249    /* Comparison value, indicating when the timer goes off */
3250    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
3251      .secure = ARM_CP_SECSTATE_NS,
3252      .access = PL0_RW,
3253      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3254      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3255      .accessfn = gt_ptimer_access,
3256      .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3257      .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3258    },
3259    { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
3260      .secure = ARM_CP_SECSTATE_S,
3261      .access = PL0_RW,
3262      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3263      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3264      .accessfn = gt_ptimer_access,
3265      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3266    },
3267    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3268      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
3269      .access = PL0_RW,
3270      .type = ARM_CP_IO,
3271      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
3272      .resetvalue = 0, .accessfn = gt_ptimer_access,
3273      .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read,
3274      .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write,
3275    },
3276    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
3277      .access = PL0_RW,
3278      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
3279      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3280      .accessfn = gt_vtimer_access,
3281      .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3282      .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3283    },
3284    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
3285      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
3286      .access = PL0_RW,
3287      .type = ARM_CP_IO,
3288      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
3289      .resetvalue = 0, .accessfn = gt_vtimer_access,
3290      .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read,
3291      .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write,
3292    },
3293    /* Secure timer -- this is actually restricted to only EL3
3294     * and configurably Secure-EL1 via the accessfn.
3295     */
3296    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
3297      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
3298      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
3299      .accessfn = gt_stimer_access,
3300      .readfn = gt_sec_tval_read,
3301      .writefn = gt_sec_tval_write,
3302      .resetfn = gt_sec_timer_reset,
3303    },
3304    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
3305      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
3306      .type = ARM_CP_IO, .access = PL1_RW,
3307      .accessfn = gt_stimer_access,
3308      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
3309      .resetvalue = 0,
3310      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
3311    },
3312    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
3313      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
3314      .type = ARM_CP_IO, .access = PL1_RW,
3315      .accessfn = gt_stimer_access,
3316      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
3317      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
3318    },
3319    REGINFO_SENTINEL
3320};
3321
3322static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri,
3323                                 bool isread)
3324{
3325    if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
3326        return CP_ACCESS_TRAP;
3327    }
3328    return CP_ACCESS_OK;
3329}
3330
3331#else
3332
3333/* In user-mode most of the generic timer registers are inaccessible
3334 * however modern kernels (4.12+) allow access to cntvct_el0
3335 */
3336
3337static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
3338{
3339    ARMCPU *cpu = env_archcpu(env);
3340
3341    /* Currently we have no support for QEMUTimer in linux-user so we
3342     * can't call gt_get_countervalue(env), instead we directly
3343     * call the lower level functions.
3344     */
3345    return cpu_get_clock() / gt_cntfrq_period_ns(cpu);
3346}
3347
3348static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
3349    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
3350      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
3351      .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
3352      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
3353      .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
3354    },
3355    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
3356      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
3357      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
3358      .readfn = gt_virt_cnt_read,
3359    },
3360    REGINFO_SENTINEL
3361};
3362
3363#endif
3364
3365static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3366{
3367    if (arm_feature(env, ARM_FEATURE_LPAE)) {
3368        raw_write(env, ri, value);
3369    } else if (arm_feature(env, ARM_FEATURE_V7)) {
3370        raw_write(env, ri, value & 0xfffff6ff);
3371    } else {
3372        raw_write(env, ri, value & 0xfffff1ff);
3373    }
3374}
3375
3376#ifndef CONFIG_USER_ONLY
3377/* get_phys_addr() isn't present for user-mode-only targets */
3378
3379static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
3380                                 bool isread)
3381{
3382    if (ri->opc2 & 4) {
3383        /* The ATS12NSO* operations must trap to EL3 if executed in
3384         * Secure EL1 (which can only happen if EL3 is AArch64).
3385         * They are simply UNDEF if executed from NS EL1.
3386         * They function normally from EL2 or EL3.
3387         */
3388        if (arm_current_el(env) == 1) {
3389            if (arm_is_secure_below_el3(env)) {
3390                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
3391            }
3392            return CP_ACCESS_TRAP_UNCATEGORIZED;
3393        }
3394    }
3395    return CP_ACCESS_OK;
3396}
3397
3398#ifdef CONFIG_TCG
3399static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
3400                             MMUAccessType access_type, ARMMMUIdx mmu_idx)
3401{
3402    hwaddr phys_addr;
3403    target_ulong page_size;
3404    int prot;
3405    bool ret;
3406    uint64_t par64;
3407    bool format64 = false;
3408    MemTxAttrs attrs = {};
3409    ARMMMUFaultInfo fi = {};
3410    ARMCacheAttrs cacheattrs = {};
3411
3412    ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
3413                        &prot, &page_size, &fi, &cacheattrs);
3414
3415    if (ret) {
3416        /*
3417         * Some kinds of translation fault must cause exceptions rather
3418         * than being reported in the PAR.
3419         */
3420        int current_el = arm_current_el(env);
3421        int target_el;
3422        uint32_t syn, fsr, fsc;
3423        bool take_exc = false;
3424
3425        if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
3426            && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
3427            /*
3428             * Synchronous stage 2 fault on an access made as part of the
3429             * translation table walk for AT S1E0* or AT S1E1* insn
3430             * executed from NS EL1. If this is a synchronous external abort
3431             * and SCR_EL3.EA == 1, then we take a synchronous external abort
3432             * to EL3. Otherwise the fault is taken as an exception to EL2,
3433             * and HPFAR_EL2 holds the faulting IPA.
3434             */
3435            if (fi.type == ARMFault_SyncExternalOnWalk &&
3436                (env->cp15.scr_el3 & SCR_EA)) {
3437                target_el = 3;
3438            } else {
3439                env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
3440                target_el = 2;
3441            }
3442            take_exc = true;
3443        } else if (fi.type == ARMFault_SyncExternalOnWalk) {
3444            /*
3445             * Synchronous external aborts during a translation table walk
3446             * are taken as Data Abort exceptions.
3447             */
3448            if (fi.stage2) {
3449                if (current_el == 3) {
3450                    target_el = 3;
3451                } else {
3452                    target_el = 2;
3453                }
3454            } else {
3455                target_el = exception_target_el(env);
3456            }
3457            take_exc = true;
3458        }
3459
3460        if (take_exc) {
3461            /* Construct FSR and FSC using same logic as arm_deliver_fault() */
3462            if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
3463                arm_s1_regime_using_lpae_format(env, mmu_idx)) {
3464                fsr = arm_fi_to_lfsc(&fi);
3465                fsc = extract32(fsr, 0, 6);
3466            } else {
3467                fsr = arm_fi_to_sfsc(&fi);
3468                fsc = 0x3f;
3469            }
3470            /*
3471             * Report exception with ESR indicating a fault due to a
3472             * translation table walk for a cache maintenance instruction.
3473             */
3474            syn = syn_data_abort_no_iss(current_el == target_el, 0,
3475                                        fi.ea, 1, fi.s1ptw, 1, fsc);
3476            env->exception.vaddress = value;
3477            env->exception.fsr = fsr;
3478            raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
3479        }
3480    }
3481
3482    if (is_a64(env)) {
3483        format64 = true;
3484    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
3485        /*
3486         * ATS1Cxx:
3487         * * TTBCR.EAE determines whether the result is returned using the
3488         *   32-bit or the 64-bit PAR format
3489         * * Instructions executed in Hyp mode always use the 64bit format
3490         *
3491         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
3492         * * The Non-secure TTBCR.EAE bit is set to 1
3493         * * The implementation includes EL2, and the value of HCR.VM is 1
3494         *
3495         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
3496         *
3497         * ATS1Hx always uses the 64bit format.
3498         */
3499        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
3500
3501        if (arm_feature(env, ARM_FEATURE_EL2)) {
3502            if (mmu_idx == ARMMMUIdx_E10_0 ||
3503                mmu_idx == ARMMMUIdx_E10_1 ||
3504                mmu_idx == ARMMMUIdx_E10_1_PAN) {
3505                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
3506            } else {
3507                format64 |= arm_current_el(env) == 2;
3508            }
3509        }
3510    }
3511
3512    if (format64) {
3513        /* Create a 64-bit PAR */
3514        par64 = (1 << 11); /* LPAE bit always set */
3515        if (!ret) {
3516            par64 |= phys_addr & ~0xfffULL;
3517            if (!attrs.secure) {
3518                par64 |= (1 << 9); /* NS */
3519            }
3520            par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
3521            par64 |= cacheattrs.shareability << 7; /* SH */
3522        } else {
3523            uint32_t fsr = arm_fi_to_lfsc(&fi);
3524
3525            par64 |= 1; /* F */
3526            par64 |= (fsr & 0x3f) << 1; /* FS */
3527            if (fi.stage2) {
3528                par64 |= (1 << 9); /* S */
3529            }
3530            if (fi.s1ptw) {
3531                par64 |= (1 << 8); /* PTW */
3532            }
3533        }
3534    } else {
3535        /* fsr is a DFSR/IFSR value for the short descriptor
3536         * translation table format (with WnR always clear).
3537         * Convert it to a 32-bit PAR.
3538         */
3539        if (!ret) {
3540            /* We do not set any attribute bits in the PAR */
3541            if (page_size == (1 << 24)
3542                && arm_feature(env, ARM_FEATURE_V7)) {
3543                par64 = (phys_addr & 0xff000000) | (1 << 1);
3544            } else {
3545                par64 = phys_addr & 0xfffff000;
3546            }
3547            if (!attrs.secure) {
3548                par64 |= (1 << 9); /* NS */
3549            }
3550        } else {
3551            uint32_t fsr = arm_fi_to_sfsc(&fi);
3552
3553            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3554                    ((fsr & 0xf) << 1) | 1;
3555        }
3556    }
3557    return par64;
3558}
3559#endif /* CONFIG_TCG */
3560
3561static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3562{
3563#ifdef CONFIG_TCG
3564    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3565    uint64_t par64;
3566    ARMMMUIdx mmu_idx;
3567    int el = arm_current_el(env);
3568    bool secure = arm_is_secure_below_el3(env);
3569
3570    switch (ri->opc2 & 6) {
3571    case 0:
3572        /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
3573        switch (el) {
3574        case 3:
3575            mmu_idx = ARMMMUIdx_SE3;
3576            break;
3577        case 2:
3578            g_assert(!secure);  /* TODO: ARMv8.4-SecEL2 */
3579            /* fall through */
3580        case 1:
3581            if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
3582                mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
3583                           : ARMMMUIdx_Stage1_E1_PAN);
3584            } else {
3585                mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
3586            }
3587            break;
3588        default:
3589            g_assert_not_reached();
3590        }
3591        break;
3592    case 2:
3593        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3594        switch (el) {
3595        case 3:
3596            mmu_idx = ARMMMUIdx_SE10_0;
3597            break;
3598        case 2:
3599            mmu_idx = ARMMMUIdx_Stage1_E0;
3600            break;
3601        case 1:
3602            mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
3603            break;
3604        default:
3605            g_assert_not_reached();
3606        }
3607        break;
3608    case 4:
3609        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3610        mmu_idx = ARMMMUIdx_E10_1;
3611        break;
3612    case 6:
3613        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3614        mmu_idx = ARMMMUIdx_E10_0;
3615        break;
3616    default:
3617        g_assert_not_reached();
3618    }
3619
3620    par64 = do_ats_write(env, value, access_type, mmu_idx);
3621
3622    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3623#else
3624    /* Handled by hardware accelerator. */
3625    g_assert_not_reached();
3626#endif /* CONFIG_TCG */
3627}
3628
3629static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3630                        uint64_t value)
3631{
3632#ifdef CONFIG_TCG
3633    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3634    uint64_t par64;
3635
3636    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2);
3637
3638    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3639#else
3640    /* Handled by hardware accelerator. */
3641    g_assert_not_reached();
3642#endif /* CONFIG_TCG */
3643}
3644
3645static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3646                                     bool isread)
3647{
3648    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3649        return CP_ACCESS_TRAP;
3650    }
3651    return CP_ACCESS_OK;
3652}
3653
3654static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3655                        uint64_t value)
3656{
3657#ifdef CONFIG_TCG
3658    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3659    ARMMMUIdx mmu_idx;
3660    int secure = arm_is_secure_below_el3(env);
3661
3662    switch (ri->opc2 & 6) {
3663    case 0:
3664        switch (ri->opc1) {
3665        case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
3666            if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
3667                mmu_idx = (secure ? ARMMMUIdx_SE10_1_PAN
3668                           : ARMMMUIdx_Stage1_E1_PAN);
3669            } else {
3670                mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_Stage1_E1;
3671            }
3672            break;
3673        case 4: /* AT S1E2R, AT S1E2W */
3674            mmu_idx = ARMMMUIdx_E2;
3675            break;
3676        case 6: /* AT S1E3R, AT S1E3W */
3677            mmu_idx = ARMMMUIdx_SE3;
3678            break;
3679        default:
3680            g_assert_not_reached();
3681        }
3682        break;
3683    case 2: /* AT S1E0R, AT S1E0W */
3684        mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_Stage1_E0;
3685        break;
3686    case 4: /* AT S12E1R, AT S12E1W */
3687        mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
3688        break;
3689    case 6: /* AT S12E0R, AT S12E0W */
3690        mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
3691        break;
3692    default:
3693        g_assert_not_reached();
3694    }
3695
3696    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3697#else
3698    /* Handled by hardware accelerator. */
3699    g_assert_not_reached();
3700#endif /* CONFIG_TCG */
3701}
3702#endif
3703
3704static const ARMCPRegInfo vapa_cp_reginfo[] = {
3705    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3706      .access = PL1_RW, .resetvalue = 0,
3707      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3708                             offsetoflow32(CPUARMState, cp15.par_ns) },
3709      .writefn = par_write },
3710#ifndef CONFIG_USER_ONLY
3711    /* This underdecoding is safe because the reginfo is NO_RAW. */
3712    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3713      .access = PL1_W, .accessfn = ats_access,
3714      .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
3715#endif
3716    REGINFO_SENTINEL
3717};
3718
3719/* Return basic MPU access permission bits.  */
3720static uint32_t simple_mpu_ap_bits(uint32_t val)
3721{
3722    uint32_t ret;
3723    uint32_t mask;
3724    int i;
3725    ret = 0;
3726    mask = 3;
3727    for (i = 0; i < 16; i += 2) {
3728        ret |= (val >> i) & mask;
3729        mask <<= 2;
3730    }
3731    return ret;
3732}
3733
3734/* Pad basic MPU access permission bits to extended format.  */
3735static uint32_t extended_mpu_ap_bits(uint32_t val)
3736{
3737    uint32_t ret;
3738    uint32_t mask;
3739    int i;
3740    ret = 0;
3741    mask = 3;
3742    for (i = 0; i < 16; i += 2) {
3743        ret |= (val & mask) << i;
3744        mask <<= 2;
3745    }
3746    return ret;
3747}
3748
3749static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3750                                 uint64_t value)
3751{
3752    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3753}
3754
3755static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3756{
3757    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3758}
3759
3760static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3761                                 uint64_t value)
3762{
3763    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3764}
3765
3766static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3767{
3768    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3769}
3770
3771static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3772{
3773    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3774
3775    if (!u32p) {
3776        return 0;
3777    }
3778
3779    u32p += env->pmsav7.rnr[M_REG_NS];
3780    return *u32p;
3781}
3782
3783static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3784                         uint64_t value)
3785{
3786    ARMCPU *cpu = env_archcpu(env);
3787    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3788
3789    if (!u32p) {
3790        return;
3791    }
3792
3793    u32p += env->pmsav7.rnr[M_REG_NS];
3794    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3795    *u32p = value;
3796}
3797
3798static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3799                              uint64_t value)
3800{
3801    ARMCPU *cpu = env_archcpu(env);
3802    uint32_t nrgs = cpu->pmsav7_dregion;
3803
3804    if (value >= nrgs) {
3805        qemu_log_mask(LOG_GUEST_ERROR,
3806                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3807                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3808        return;
3809    }
3810
3811    raw_write(env, ri, value);
3812}
3813
3814static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3815    /* Reset for all these registers is handled in arm_cpu_reset(),
3816     * because the PMSAv7 is also used by M-profile CPUs, which do
3817     * not register cpregs but still need the state to be reset.
3818     */
3819    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3820      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3821      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3822      .readfn = pmsav7_read, .writefn = pmsav7_write,
3823      .resetfn = arm_cp_reset_ignore },
3824    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3825      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3826      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3827      .readfn = pmsav7_read, .writefn = pmsav7_write,
3828      .resetfn = arm_cp_reset_ignore },
3829    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3830      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3831      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3832      .readfn = pmsav7_read, .writefn = pmsav7_write,
3833      .resetfn = arm_cp_reset_ignore },
3834    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3835      .access = PL1_RW,
3836      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3837      .writefn = pmsav7_rgnr_write,
3838      .resetfn = arm_cp_reset_ignore },
3839    REGINFO_SENTINEL
3840};
3841
3842static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3843    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3844      .access = PL1_RW, .type = ARM_CP_ALIAS,
3845      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3846      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3847    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3848      .access = PL1_RW, .type = ARM_CP_ALIAS,
3849      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3850      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3851    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3852      .access = PL1_RW,
3853      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3854      .resetvalue = 0, },
3855    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3856      .access = PL1_RW,
3857      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3858      .resetvalue = 0, },
3859    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3860      .access = PL1_RW,
3861      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3862    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3863      .access = PL1_RW,
3864      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3865    /* Protection region base and size registers */
3866    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3867      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3868      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3869    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3870      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3871      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3872    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3873      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3874      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3875    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3876      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3877      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3878    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3879      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3880      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3881    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3882      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3883      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3884    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3885      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3886      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3887    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3888      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3889      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3890    REGINFO_SENTINEL
3891};
3892
3893static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3894                                 uint64_t value)
3895{
3896    TCR *tcr = raw_ptr(env, ri);
3897    int maskshift = extract32(value, 0, 3);
3898
3899    if (!arm_feature(env, ARM_FEATURE_V8)) {
3900        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3901            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3902             * using Long-desciptor translation table format */
3903            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3904        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3905            /* In an implementation that includes the Security Extensions
3906             * TTBCR has additional fields PD0 [4] and PD1 [5] for
3907             * Short-descriptor translation table format.
3908             */
3909            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3910        } else {
3911            value &= TTBCR_N;
3912        }
3913    }
3914
3915    /* Update the masks corresponding to the TCR bank being written
3916     * Note that we always calculate mask and base_mask, but
3917     * they are only used for short-descriptor tables (ie if EAE is 0);
3918     * for long-descriptor tables the TCR fields are used differently
3919     * and the mask and base_mask values are meaningless.
3920     */
3921    tcr->raw_tcr = value;
3922    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3923    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3924}
3925
3926static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3927                             uint64_t value)
3928{
3929    ARMCPU *cpu = env_archcpu(env);
3930    TCR *tcr = raw_ptr(env, ri);
3931
3932    if (arm_feature(env, ARM_FEATURE_LPAE)) {
3933        /* With LPAE the TTBCR could result in a change of ASID
3934         * via the TTBCR.A1 bit, so do a TLB flush.
3935         */
3936        tlb_flush(CPU(cpu));
3937    }
3938    /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
3939    value = deposit64(tcr->raw_tcr, 0, 32, value);
3940    vmsa_ttbcr_raw_write(env, ri, value);
3941}
3942
3943static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3944{
3945    TCR *tcr = raw_ptr(env, ri);
3946
3947    /* Reset both the TCR as well as the masks corresponding to the bank of
3948     * the TCR being reset.
3949     */
3950    tcr->raw_tcr = 0;
3951    tcr->mask = 0;
3952    tcr->base_mask = 0xffffc000u;
3953}
3954
3955static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri,
3956                               uint64_t value)
3957{
3958    ARMCPU *cpu = env_archcpu(env);
3959    TCR *tcr = raw_ptr(env, ri);
3960
3961    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3962    tlb_flush(CPU(cpu));
3963    tcr->raw_tcr = value;
3964}
3965
3966static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3967                            uint64_t value)
3968{
3969    /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
3970    if (cpreg_field_is_64bit(ri) &&
3971        extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3972        ARMCPU *cpu = env_archcpu(env);
3973        tlb_flush(CPU(cpu));
3974    }
3975    raw_write(env, ri, value);
3976}
3977
3978static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3979                                    uint64_t value)
3980{
3981    /*
3982     * If we are running with E2&0 regime, then an ASID is active.
3983     * Flush if that might be changing.  Note we're not checking
3984     * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that
3985     * holds the active ASID, only checking the field that might.
3986     */
3987    if (extract64(raw_read(env, ri) ^ value, 48, 16) &&
3988        (arm_hcr_el2_eff(env) & HCR_E2H)) {
3989        tlb_flush_by_mmuidx(env_cpu(env),
3990                            ARMMMUIdxBit_E20_2 |
3991                            ARMMMUIdxBit_E20_2_PAN |
3992                            ARMMMUIdxBit_E20_0);
3993    }
3994    raw_write(env, ri, value);
3995}
3996
3997static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3998                        uint64_t value)
3999{
4000    ARMCPU *cpu = env_archcpu(env);
4001    CPUState *cs = CPU(cpu);
4002
4003    /*
4004     * A change in VMID to the stage2 page table (Stage2) invalidates
4005     * the combined stage 1&2 tlbs (EL10_1 and EL10_0).
4006     */
4007    if (raw_read(env, ri) != value) {
4008        tlb_flush_by_mmuidx(cs,
4009                            ARMMMUIdxBit_E10_1 |
4010                            ARMMMUIdxBit_E10_1_PAN |
4011                            ARMMMUIdxBit_E10_0);
4012        raw_write(env, ri, value);
4013    }
4014}
4015
4016static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
4017    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
4018      .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS,
4019      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
4020                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
4021    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
4022      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4023      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
4024                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
4025    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
4026      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
4027      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
4028                             offsetof(CPUARMState, cp15.dfar_ns) } },
4029    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
4030      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
4031      .access = PL1_RW, .accessfn = access_tvm_trvm,
4032      .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
4033      .resetvalue = 0, },
4034    REGINFO_SENTINEL
4035};
4036
4037static const ARMCPRegInfo vmsa_cp_reginfo[] = {
4038    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
4039      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
4040      .access = PL1_RW, .accessfn = access_tvm_trvm,
4041      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
4042    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
4043      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
4044      .access = PL1_RW, .accessfn = access_tvm_trvm,
4045      .writefn = vmsa_ttbr_write, .resetvalue = 0,
4046      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4047                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
4048    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
4049      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
4050      .access = PL1_RW, .accessfn = access_tvm_trvm,
4051      .writefn = vmsa_ttbr_write, .resetvalue = 0,
4052      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4053                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
4054    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
4055      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4056      .access = PL1_RW, .accessfn = access_tvm_trvm,
4057      .writefn = vmsa_tcr_el12_write,
4058      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
4059      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
4060    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
4061      .access = PL1_RW, .accessfn = access_tvm_trvm,
4062      .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
4063      .raw_writefn = vmsa_ttbcr_raw_write,
4064      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
4065                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
4066    REGINFO_SENTINEL
4067};
4068
4069/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
4070 * qemu tlbs nor adjusting cached masks.
4071 */
4072static const ARMCPRegInfo ttbcr2_reginfo = {
4073    .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
4074    .access = PL1_RW, .accessfn = access_tvm_trvm,
4075    .type = ARM_CP_ALIAS,
4076    .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
4077                           offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
4078};
4079
4080static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
4081                                uint64_t value)
4082{
4083    env->cp15.c15_ticonfig = value & 0xe7;
4084    /* The OS_TYPE bit in this register changes the reported CPUID! */
4085    env->cp15.c0_cpuid = (value & (1 << 5)) ?
4086        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
4087}
4088
4089static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
4090                                uint64_t value)
4091{
4092    env->cp15.c15_threadid = value & 0xffff;
4093}
4094
4095static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
4096                           uint64_t value)
4097{
4098    /* Wait-for-interrupt (deprecated) */
4099    cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
4100}
4101
4102static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
4103                                  uint64_t value)
4104{
4105    /* On OMAP there are registers indicating the max/min index of dcache lines
4106     * containing a dirty line; cache flush operations have to reset these.
4107     */
4108    env->cp15.c15_i_max = 0x000;
4109    env->cp15.c15_i_min = 0xff0;
4110}
4111
4112static const ARMCPRegInfo omap_cp_reginfo[] = {
4113    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
4114      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
4115      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
4116      .resetvalue = 0, },
4117    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
4118      .access = PL1_RW, .type = ARM_CP_NOP },
4119    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
4120      .access = PL1_RW,
4121      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
4122      .writefn = omap_ticonfig_write },
4123    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
4124      .access = PL1_RW,
4125      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
4126    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
4127      .access = PL1_RW, .resetvalue = 0xff0,
4128      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
4129    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
4130      .access = PL1_RW,
4131      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
4132      .writefn = omap_threadid_write },
4133    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
4134      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4135      .type = ARM_CP_NO_RAW,
4136      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
4137    /* TODO: Peripheral port remap register:
4138     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
4139     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
4140     * when MMU is off.
4141     */
4142    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
4143      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
4144      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
4145      .writefn = omap_cachemaint_write },
4146    { .name = "C9", .cp = 15, .crn = 9,
4147      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
4148      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
4149    REGINFO_SENTINEL
4150};
4151
4152static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
4153                              uint64_t value)
4154{
4155    env->cp15.c15_cpar = value & 0x3fff;
4156}
4157
4158static const ARMCPRegInfo xscale_cp_reginfo[] = {
4159    { .name = "XSCALE_CPAR",
4160      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
4161      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
4162      .writefn = xscale_cpar_write, },
4163    { .name = "XSCALE_AUXCR",
4164      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
4165      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
4166      .resetvalue = 0, },
4167    /* XScale specific cache-lockdown: since we have no cache we NOP these
4168     * and hope the guest does not really rely on cache behaviour.
4169     */
4170    { .name = "XSCALE_LOCK_ICACHE_LINE",
4171      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
4172      .access = PL1_W, .type = ARM_CP_NOP },
4173    { .name = "XSCALE_UNLOCK_ICACHE",
4174      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
4175      .access = PL1_W, .type = ARM_CP_NOP },
4176    { .name = "XSCALE_DCACHE_LOCK",
4177      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
4178      .access = PL1_RW, .type = ARM_CP_NOP },
4179    { .name = "XSCALE_UNLOCK_DCACHE",
4180      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
4181      .access = PL1_W, .type = ARM_CP_NOP },
4182    REGINFO_SENTINEL
4183};
4184
4185static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
4186    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
4187     * implementation of this implementation-defined space.
4188     * Ideally this should eventually disappear in favour of actually
4189     * implementing the correct behaviour for all cores.
4190     */
4191    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
4192      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4193      .access = PL1_RW,
4194      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
4195      .resetvalue = 0 },
4196    REGINFO_SENTINEL
4197};
4198
4199static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
4200    /* Cache status: RAZ because we have no cache so it's always clean */
4201    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
4202      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4203      .resetvalue = 0 },
4204    REGINFO_SENTINEL
4205};
4206
4207static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
4208    /* We never have a a block transfer operation in progress */
4209    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
4210      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4211      .resetvalue = 0 },
4212    /* The cache ops themselves: these all NOP for QEMU */
4213    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
4214      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4215    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
4216      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4217    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
4218      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4219    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
4220      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4221    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
4222      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4223    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
4224      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
4225    REGINFO_SENTINEL
4226};
4227
4228static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
4229    /* The cache test-and-clean instructions always return (1 << 30)
4230     * to indicate that there are no dirty cache lines.
4231     */
4232    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
4233      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4234      .resetvalue = (1 << 30) },
4235    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
4236      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
4237      .resetvalue = (1 << 30) },
4238    REGINFO_SENTINEL
4239};
4240
4241static const ARMCPRegInfo strongarm_cp_reginfo[] = {
4242    /* Ignore ReadBuffer accesses */
4243    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
4244      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
4245      .access = PL1_RW, .resetvalue = 0,
4246      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
4247    REGINFO_SENTINEL
4248};
4249
4250static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4251{
4252    ARMCPU *cpu = env_archcpu(env);
4253    unsigned int cur_el = arm_current_el(env);
4254    bool secure = arm_is_secure(env);
4255
4256    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
4257        return env->cp15.vpidr_el2;
4258    }
4259    return raw_read(env, ri);
4260}
4261
4262static uint64_t mpidr_read_val(CPUARMState *env)
4263{
4264    ARMCPU *cpu = env_archcpu(env);
4265    uint64_t mpidr = cpu->mp_affinity;
4266
4267    if (arm_feature(env, ARM_FEATURE_V7MP)) {
4268        mpidr |= (1U << 31);
4269        /* Cores which are uniprocessor (non-coherent)
4270         * but still implement the MP extensions set
4271         * bit 30. (For instance, Cortex-R5).
4272         */
4273        if (cpu->mp_is_up) {
4274            mpidr |= (1u << 30);
4275        }
4276    }
4277    return mpidr;
4278}
4279
4280static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4281{
4282    unsigned int cur_el = arm_current_el(env);
4283    bool secure = arm_is_secure(env);
4284
4285    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
4286        return env->cp15.vmpidr_el2;
4287    }
4288    return mpidr_read_val(env);
4289}
4290
4291static const ARMCPRegInfo lpae_cp_reginfo[] = {
4292    /* NOP AMAIR0/1 */
4293    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
4294      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
4295      .access = PL1_RW, .accessfn = access_tvm_trvm,
4296      .type = ARM_CP_CONST, .resetvalue = 0 },
4297    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
4298    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
4299      .access = PL1_RW, .accessfn = access_tvm_trvm,
4300      .type = ARM_CP_CONST, .resetvalue = 0 },
4301    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
4302      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
4303      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
4304                             offsetof(CPUARMState, cp15.par_ns)} },
4305    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
4306      .access = PL1_RW, .accessfn = access_tvm_trvm,
4307      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4308      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
4309                             offsetof(CPUARMState, cp15.ttbr0_ns) },
4310      .writefn = vmsa_ttbr_write, },
4311    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
4312      .access = PL1_RW, .accessfn = access_tvm_trvm,
4313      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4314      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
4315                             offsetof(CPUARMState, cp15.ttbr1_ns) },
4316      .writefn = vmsa_ttbr_write, },
4317    REGINFO_SENTINEL
4318};
4319
4320static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4321{
4322    return vfp_get_fpcr(env);
4323}
4324
4325static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4326                            uint64_t value)
4327{
4328    vfp_set_fpcr(env, value);
4329}
4330
4331static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
4332{
4333    return vfp_get_fpsr(env);
4334}
4335
4336static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4337                            uint64_t value)
4338{
4339    vfp_set_fpsr(env, value);
4340}
4341
4342static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
4343                                       bool isread)
4344{
4345    if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) {
4346        return CP_ACCESS_TRAP;
4347    }
4348    return CP_ACCESS_OK;
4349}
4350
4351static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
4352                            uint64_t value)
4353{
4354    env->daif = value & PSTATE_DAIF;
4355}
4356
4357static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri)
4358{
4359    return env->pstate & PSTATE_PAN;
4360}
4361
4362static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri,
4363                           uint64_t value)
4364{
4365    env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN);
4366}
4367
4368static const ARMCPRegInfo pan_reginfo = {
4369    .name = "PAN", .state = ARM_CP_STATE_AA64,
4370    .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3,
4371    .type = ARM_CP_NO_RAW, .access = PL1_RW,
4372    .readfn = aa64_pan_read, .writefn = aa64_pan_write
4373};
4374
4375static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri)
4376{
4377    return env->pstate & PSTATE_UAO;
4378}
4379
4380static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri,
4381                           uint64_t value)
4382{
4383    env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO);
4384}
4385
4386static const ARMCPRegInfo uao_reginfo = {
4387    .name = "UAO", .state = ARM_CP_STATE_AA64,
4388    .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4,
4389    .type = ARM_CP_NO_RAW, .access = PL1_RW,
4390    .readfn = aa64_uao_read, .writefn = aa64_uao_write
4391};
4392
4393static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env,
4394                                              const ARMCPRegInfo *ri,
4395                                              bool isread)
4396{
4397    /* Cache invalidate/clean to Point of Coherency or Persistence...  */
4398    switch (arm_current_el(env)) {
4399    case 0:
4400        /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4401        if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4402            return CP_ACCESS_TRAP;
4403        }
4404        /* fall through */
4405    case 1:
4406        /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set.  */
4407        if (arm_hcr_el2_eff(env) & HCR_TPCP) {
4408            return CP_ACCESS_TRAP_EL2;
4409        }
4410        break;
4411    }
4412    return CP_ACCESS_OK;
4413}
4414
4415static CPAccessResult aa64_cacheop_pou_access(CPUARMState *env,
4416                                              const ARMCPRegInfo *ri,
4417                                              bool isread)
4418{
4419    /* Cache invalidate/clean to Point of Unification... */
4420    switch (arm_current_el(env)) {
4421    case 0:
4422        /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set.  */
4423        if (!(arm_sctlr(env, 0) & SCTLR_UCI)) {
4424            return CP_ACCESS_TRAP;
4425        }
4426        /* fall through */
4427    case 1:
4428        /* ... EL1 must trap to EL2 if HCR_EL2.TPU is set.  */
4429        if (arm_hcr_el2_eff(env) & HCR_TPU) {
4430            return CP_ACCESS_TRAP_EL2;
4431        }
4432        break;
4433    }
4434    return CP_ACCESS_OK;
4435}
4436
4437/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
4438 * Page D4-1736 (DDI0487A.b)
4439 */
4440
4441static int vae1_tlbmask(CPUARMState *env)
4442{
4443    /* Since we exclude secure first, we may read HCR_EL2 directly. */
4444    if (arm_is_secure_below_el3(env)) {
4445        return ARMMMUIdxBit_SE10_1 |
4446               ARMMMUIdxBit_SE10_1_PAN |
4447               ARMMMUIdxBit_SE10_0;
4448    } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
4449               == (HCR_E2H | HCR_TGE)) {
4450        return ARMMMUIdxBit_E20_2 |
4451               ARMMMUIdxBit_E20_2_PAN |
4452               ARMMMUIdxBit_E20_0;
4453    } else {
4454        return ARMMMUIdxBit_E10_1 |
4455               ARMMMUIdxBit_E10_1_PAN |
4456               ARMMMUIdxBit_E10_0;
4457    }
4458}
4459
4460/* Return 56 if TBI is enabled, 64 otherwise. */
4461static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx,
4462                              uint64_t addr)
4463{
4464    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
4465    int tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
4466    int select = extract64(addr, 55, 1);
4467
4468    return (tbi >> select) & 1 ? 56 : 64;
4469}
4470
4471static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
4472{
4473    ARMMMUIdx mmu_idx;
4474
4475    /* Only the regime of the mmu_idx below is significant. */
4476    if (arm_is_secure_below_el3(env)) {
4477        mmu_idx = ARMMMUIdx_SE10_0;
4478    } else if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE))
4479               == (HCR_E2H | HCR_TGE)) {
4480        mmu_idx = ARMMMUIdx_E20_0;
4481    } else {
4482        mmu_idx = ARMMMUIdx_E10_0;
4483    }
4484    return tlbbits_for_regime(env, mmu_idx, addr);
4485}
4486
4487static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4488                                      uint64_t value)
4489{
4490    CPUState *cs = env_cpu(env);
4491    int mask = vae1_tlbmask(env);
4492
4493    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4494}
4495
4496static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4497                                    uint64_t value)
4498{
4499    CPUState *cs = env_cpu(env);
4500    int mask = vae1_tlbmask(env);
4501
4502    if (tlb_force_broadcast(env)) {
4503        tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4504    } else {
4505        tlb_flush_by_mmuidx(cs, mask);
4506    }
4507}
4508
4509static int alle1_tlbmask(CPUARMState *env)
4510{
4511    /*
4512     * Note that the 'ALL' scope must invalidate both stage 1 and
4513     * stage 2 translations, whereas most other scopes only invalidate
4514     * stage 1 translations.
4515     */
4516    if (arm_is_secure_below_el3(env)) {
4517        return ARMMMUIdxBit_SE10_1 |
4518               ARMMMUIdxBit_SE10_1_PAN |
4519               ARMMMUIdxBit_SE10_0;
4520    } else {
4521        return ARMMMUIdxBit_E10_1 |
4522               ARMMMUIdxBit_E10_1_PAN |
4523               ARMMMUIdxBit_E10_0;
4524    }
4525}
4526
4527static int e2_tlbmask(CPUARMState *env)
4528{
4529    /* TODO: ARMv8.4-SecEL2 */
4530    return ARMMMUIdxBit_E20_0 |
4531           ARMMMUIdxBit_E20_2 |
4532           ARMMMUIdxBit_E20_2_PAN |
4533           ARMMMUIdxBit_E2;
4534}
4535
4536static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4537                                  uint64_t value)
4538{
4539    CPUState *cs = env_cpu(env);
4540    int mask = alle1_tlbmask(env);
4541
4542    tlb_flush_by_mmuidx(cs, mask);
4543}
4544
4545static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4546                                  uint64_t value)
4547{
4548    CPUState *cs = env_cpu(env);
4549    int mask = e2_tlbmask(env);
4550
4551    tlb_flush_by_mmuidx(cs, mask);
4552}
4553
4554static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4555                                  uint64_t value)
4556{
4557    ARMCPU *cpu = env_archcpu(env);
4558    CPUState *cs = CPU(cpu);
4559
4560    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
4561}
4562
4563static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4564                                    uint64_t value)
4565{
4566    CPUState *cs = env_cpu(env);
4567    int mask = alle1_tlbmask(env);
4568
4569    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4570}
4571
4572static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4573                                    uint64_t value)
4574{
4575    CPUState *cs = env_cpu(env);
4576    int mask = e2_tlbmask(env);
4577
4578    tlb_flush_by_mmuidx_all_cpus_synced(cs, mask);
4579}
4580
4581static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4582                                    uint64_t value)
4583{
4584    CPUState *cs = env_cpu(env);
4585
4586    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
4587}
4588
4589static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4590                                 uint64_t value)
4591{
4592    /* Invalidate by VA, EL2
4593     * Currently handles both VAE2 and VALE2, since we don't support
4594     * flush-last-level-only.
4595     */
4596    CPUState *cs = env_cpu(env);
4597    int mask = e2_tlbmask(env);
4598    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4599
4600    tlb_flush_page_by_mmuidx(cs, pageaddr, mask);
4601}
4602
4603static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
4604                                 uint64_t value)
4605{
4606    /* Invalidate by VA, EL3
4607     * Currently handles both VAE3 and VALE3, since we don't support
4608     * flush-last-level-only.
4609     */
4610    ARMCPU *cpu = env_archcpu(env);
4611    CPUState *cs = CPU(cpu);
4612    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4613
4614    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
4615}
4616
4617static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4618                                   uint64_t value)
4619{
4620    CPUState *cs = env_cpu(env);
4621    int mask = vae1_tlbmask(env);
4622    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4623    int bits = vae1_tlbbits(env, pageaddr);
4624
4625    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4626}
4627
4628static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4629                                 uint64_t value)
4630{
4631    /* Invalidate by VA, EL1&0 (AArch64 version).
4632     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
4633     * since we don't support flush-for-specific-ASID-only or
4634     * flush-last-level-only.
4635     */
4636    CPUState *cs = env_cpu(env);
4637    int mask = vae1_tlbmask(env);
4638    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4639    int bits = vae1_tlbbits(env, pageaddr);
4640
4641    if (tlb_force_broadcast(env)) {
4642        tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
4643    } else {
4644        tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits);
4645    }
4646}
4647
4648static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4649                                   uint64_t value)
4650{
4651    CPUState *cs = env_cpu(env);
4652    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4653    int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
4654
4655    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4656                                                  ARMMMUIdxBit_E2, bits);
4657}
4658
4659static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4660                                   uint64_t value)
4661{
4662    CPUState *cs = env_cpu(env);
4663    uint64_t pageaddr = sextract64(value << 12, 0, 56);
4664    int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
4665
4666    tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
4667                                                  ARMMMUIdxBit_SE3, bits);
4668}
4669
4670static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4671                                      bool isread)
4672{
4673    int cur_el = arm_current_el(env);
4674
4675    if (cur_el < 2) {
4676        uint64_t hcr = arm_hcr_el2_eff(env);
4677
4678        if (cur_el == 0) {
4679            if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
4680                if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) {
4681                    return CP_ACCESS_TRAP_EL2;
4682                }
4683            } else {
4684                if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4685                    return CP_ACCESS_TRAP;
4686                }
4687                if (hcr & HCR_TDZ) {
4688                    return CP_ACCESS_TRAP_EL2;
4689                }
4690            }
4691        } else if (hcr & HCR_TDZ) {
4692            return CP_ACCESS_TRAP_EL2;
4693        }
4694    }
4695    return CP_ACCESS_OK;
4696}
4697
4698static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4699{
4700    ARMCPU *cpu = env_archcpu(env);
4701    int dzp_bit = 1 << 4;
4702
4703    /* DZP indicates whether DC ZVA access is allowed */
4704    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4705        dzp_bit = 0;
4706    }
4707    return cpu->dcz_blocksize | dzp_bit;
4708}
4709
4710static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4711                                    bool isread)
4712{
4713    if (!(env->pstate & PSTATE_SP)) {
4714        /* Access to SP_EL0 is undefined if it's being used as
4715         * the stack pointer.
4716         */
4717        return CP_ACCESS_TRAP_UNCATEGORIZED;
4718    }
4719    return CP_ACCESS_OK;
4720}
4721
4722static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4723{
4724    return env->pstate & PSTATE_SP;
4725}
4726
4727static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4728{
4729    update_spsel(env, val);
4730}
4731
4732static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4733                        uint64_t value)
4734{
4735    ARMCPU *cpu = env_archcpu(env);
4736
4737    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4738        /* M bit is RAZ/WI for PMSA with no MPU implemented */
4739        value &= ~SCTLR_M;
4740    }
4741
4742    /* ??? Lots of these bits are not implemented.  */
4743
4744    if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) {
4745        if (ri->opc1 == 6) { /* SCTLR_EL3 */
4746            value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA);
4747        } else {
4748            value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF |
4749                       SCTLR_ATA0 | SCTLR_ATA);
4750        }
4751    }
4752
4753    if (raw_read(env, ri) == value) {
4754        /* Skip the TLB flush if nothing actually changed; Linux likes
4755         * to do a lot of pointless SCTLR writes.
4756         */
4757        return;
4758    }
4759
4760    raw_write(env, ri, value);
4761
4762    /* This may enable/disable the MMU, so do a TLB flush.  */
4763    tlb_flush(CPU(cpu));
4764
4765    if (ri->type & ARM_CP_SUPPRESS_TB_END) {
4766        /*
4767         * Normally we would always end the TB on an SCTLR write; see the
4768         * comment in ARMCPRegInfo sctlr initialization below for why Xscale
4769         * is special.  Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
4770         * of hflags from the translator, so do it here.
4771         */
4772        arm_rebuild_hflags(env);
4773    }
4774}
4775
4776static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4777                                     bool isread)
4778{
4779    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4780        return CP_ACCESS_TRAP_FP_EL2;
4781    }
4782    if (env->cp15.cptr_el[3] & CPTR_TFP) {
4783        return CP_ACCESS_TRAP_FP_EL3;
4784    }
4785    return CP_ACCESS_OK;
4786}
4787
4788static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4789                       uint64_t value)
4790{
4791    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4792}
4793
4794static const ARMCPRegInfo v8_cp_reginfo[] = {
4795    /* Minimal set of EL0-visible registers. This will need to be expanded
4796     * significantly for system emulation of AArch64 CPUs.
4797     */
4798    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4799      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4800      .access = PL0_RW, .type = ARM_CP_NZCV },
4801    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4802      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4803      .type = ARM_CP_NO_RAW,
4804      .access = PL0_RW, .accessfn = aa64_daif_access,
4805      .fieldoffset = offsetof(CPUARMState, daif),
4806      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4807    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4808      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4809      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4810      .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4811    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4812      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4813      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4814      .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4815    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4816      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4817      .access = PL0_R, .type = ARM_CP_NO_RAW,
4818      .readfn = aa64_dczid_read },
4819    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4820      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4821      .access = PL0_W, .type = ARM_CP_DC_ZVA,
4822#ifndef CONFIG_USER_ONLY
4823      /* Avoid overhead of an access check that always passes in user-mode */
4824      .accessfn = aa64_zva_access,
4825#endif
4826    },
4827    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4828      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4829      .access = PL1_R, .type = ARM_CP_CURRENTEL },
4830    /* Cache ops: all NOPs since we don't emulate caches */
4831    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4832      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4833      .access = PL1_W, .type = ARM_CP_NOP,
4834      .accessfn = aa64_cacheop_pou_access },
4835    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4836      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4837      .access = PL1_W, .type = ARM_CP_NOP,
4838      .accessfn = aa64_cacheop_pou_access },
4839    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4840      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4841      .access = PL0_W, .type = ARM_CP_NOP,
4842      .accessfn = aa64_cacheop_pou_access },
4843    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4844      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4845      .access = PL1_W, .accessfn = aa64_cacheop_poc_access,
4846      .type = ARM_CP_NOP },
4847    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4848      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4849      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4850    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4851      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4852      .access = PL0_W, .type = ARM_CP_NOP,
4853      .accessfn = aa64_cacheop_poc_access },
4854    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4855      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4856      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4857    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4858      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4859      .access = PL0_W, .type = ARM_CP_NOP,
4860      .accessfn = aa64_cacheop_pou_access },
4861    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4862      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4863      .access = PL0_W, .type = ARM_CP_NOP,
4864      .accessfn = aa64_cacheop_poc_access },
4865    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4866      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4867      .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
4868    /* TLBI operations */
4869    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4870      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4871      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4872      .writefn = tlbi_aa64_vmalle1is_write },
4873    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4874      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4875      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4876      .writefn = tlbi_aa64_vae1is_write },
4877    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4878      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4879      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4880      .writefn = tlbi_aa64_vmalle1is_write },
4881    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4882      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4883      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4884      .writefn = tlbi_aa64_vae1is_write },
4885    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4886      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4887      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4888      .writefn = tlbi_aa64_vae1is_write },
4889    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4890      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4891      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4892      .writefn = tlbi_aa64_vae1is_write },
4893    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4894      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4895      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4896      .writefn = tlbi_aa64_vmalle1_write },
4897    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4898      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4899      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4900      .writefn = tlbi_aa64_vae1_write },
4901    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4902      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4903      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4904      .writefn = tlbi_aa64_vmalle1_write },
4905    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4906      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4907      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4908      .writefn = tlbi_aa64_vae1_write },
4909    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4910      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4911      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4912      .writefn = tlbi_aa64_vae1_write },
4913    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4914      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4915      .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW,
4916      .writefn = tlbi_aa64_vae1_write },
4917    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4918      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4919      .access = PL2_W, .type = ARM_CP_NOP },
4920    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4921      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4922      .access = PL2_W, .type = ARM_CP_NOP },
4923    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4924      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4925      .access = PL2_W, .type = ARM_CP_NO_RAW,
4926      .writefn = tlbi_aa64_alle1is_write },
4927    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4928      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4929      .access = PL2_W, .type = ARM_CP_NO_RAW,
4930      .writefn = tlbi_aa64_alle1is_write },
4931    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4932      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4933      .access = PL2_W, .type = ARM_CP_NOP },
4934    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4935      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4936      .access = PL2_W, .type = ARM_CP_NOP },
4937    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4938      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4939      .access = PL2_W, .type = ARM_CP_NO_RAW,
4940      .writefn = tlbi_aa64_alle1_write },
4941    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4942      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4943      .access = PL2_W, .type = ARM_CP_NO_RAW,
4944      .writefn = tlbi_aa64_alle1is_write },
4945#ifndef CONFIG_USER_ONLY
4946    /* 64 bit address translation operations */
4947    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4948      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4949      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4950      .writefn = ats_write64 },
4951    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4952      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4953      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4954      .writefn = ats_write64 },
4955    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4956      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4957      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4958      .writefn = ats_write64 },
4959    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4960      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4961      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4962      .writefn = ats_write64 },
4963    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4964      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4965      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4966      .writefn = ats_write64 },
4967    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4968      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4969      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4970      .writefn = ats_write64 },
4971    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4972      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4973      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4974      .writefn = ats_write64 },
4975    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4976      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4977      .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4978      .writefn = ats_write64 },
4979    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4980    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4981      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4982      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4983      .writefn = ats_write64 },
4984    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4985      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4986      .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
4987      .writefn = ats_write64 },
4988    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4989      .type = ARM_CP_ALIAS,
4990      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4991      .access = PL1_RW, .resetvalue = 0,
4992      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4993      .writefn = par_write },
4994#endif
4995    /* TLB invalidate last level of translation table walk */
4996    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4997      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
4998      .writefn = tlbimva_is_write },
4999    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
5000      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5001      .writefn = tlbimvaa_is_write },
5002    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
5003      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5004      .writefn = tlbimva_write },
5005    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
5006      .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb,
5007      .writefn = tlbimvaa_write },
5008    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5009      .type = ARM_CP_NO_RAW, .access = PL2_W,
5010      .writefn = tlbimva_hyp_write },
5011    { .name = "TLBIMVALHIS",
5012      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5013      .type = ARM_CP_NO_RAW, .access = PL2_W,
5014      .writefn = tlbimva_hyp_is_write },
5015    { .name = "TLBIIPAS2",
5016      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
5017      .type = ARM_CP_NOP, .access = PL2_W },
5018    { .name = "TLBIIPAS2IS",
5019      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
5020      .type = ARM_CP_NOP, .access = PL2_W },
5021    { .name = "TLBIIPAS2L",
5022      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
5023      .type = ARM_CP_NOP, .access = PL2_W },
5024    { .name = "TLBIIPAS2LIS",
5025      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
5026      .type = ARM_CP_NOP, .access = PL2_W },
5027    /* 32 bit cache operations */
5028    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
5029      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5030    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
5031      .type = ARM_CP_NOP, .access = PL1_W },
5032    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
5033      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5034    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
5035      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5036    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
5037      .type = ARM_CP_NOP, .access = PL1_W },
5038    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
5039      .type = ARM_CP_NOP, .access = PL1_W },
5040    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
5041      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5042    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
5043      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5044    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
5045      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5046    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
5047      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5048    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
5049      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_pou_access },
5050    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
5051      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access },
5052    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
5053      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
5054    /* MMU Domain access control / MPU write buffer control */
5055    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
5056      .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0,
5057      .writefn = dacr_write, .raw_writefn = raw_write,
5058      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
5059                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
5060    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
5061      .type = ARM_CP_ALIAS,
5062      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
5063      .access = PL1_RW,
5064      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
5065    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
5066      .type = ARM_CP_ALIAS,
5067      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
5068      .access = PL1_RW,
5069      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
5070    /* We rely on the access checks not allowing the guest to write to the
5071     * state field when SPSel indicates that it's being used as the stack
5072     * pointer.
5073     */
5074    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
5075      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
5076      .access = PL1_RW, .accessfn = sp_el0_access,
5077      .type = ARM_CP_ALIAS,
5078      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
5079    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
5080      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
5081      .access = PL2_RW, .type = ARM_CP_ALIAS,
5082      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
5083    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
5084      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
5085      .type = ARM_CP_NO_RAW,
5086      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
5087    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
5088      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
5089      .type = ARM_CP_ALIAS,
5090      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
5091      .access = PL2_RW, .accessfn = fpexc32_access },
5092    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
5093      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
5094      .access = PL2_RW, .resetvalue = 0,
5095      .writefn = dacr_write, .raw_writefn = raw_write,
5096      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
5097    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
5098      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
5099      .access = PL2_RW, .resetvalue = 0,
5100      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
5101    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
5102      .type = ARM_CP_ALIAS,
5103      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
5104      .access = PL2_RW,
5105      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
5106    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
5107      .type = ARM_CP_ALIAS,
5108      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
5109      .access = PL2_RW,
5110      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
5111    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
5112      .type = ARM_CP_ALIAS,
5113      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
5114      .access = PL2_RW,
5115      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
5116    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
5117      .type = ARM_CP_ALIAS,
5118      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
5119      .access = PL2_RW,
5120      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
5121    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
5122      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
5123      .resetvalue = 0,
5124      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
5125    { .name = "SDCR", .type = ARM_CP_ALIAS,
5126      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
5127      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5128      .writefn = sdcr_write,
5129      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
5130    REGINFO_SENTINEL
5131};
5132
5133/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
5134static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
5135    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5136      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5137      .access = PL2_RW,
5138      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
5139    { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
5140      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5141      .access = PL2_RW,
5142      .type = ARM_CP_CONST, .resetvalue = 0 },
5143    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5144      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5145      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5146    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5147      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5148      .access = PL2_RW,
5149      .type = ARM_CP_CONST, .resetvalue = 0 },
5150    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5151      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5152      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5153    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5154      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5155      .access = PL2_RW, .type = ARM_CP_CONST,
5156      .resetvalue = 0 },
5157    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5158      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5159      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5160    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5161      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5162      .access = PL2_RW, .type = ARM_CP_CONST,
5163      .resetvalue = 0 },
5164    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5165      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5166      .access = PL2_RW, .type = ARM_CP_CONST,
5167      .resetvalue = 0 },
5168    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5169      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5170      .access = PL2_RW, .type = ARM_CP_CONST,
5171      .resetvalue = 0 },
5172    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5173      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5174      .access = PL2_RW, .type = ARM_CP_CONST,
5175      .resetvalue = 0 },
5176    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5177      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5178      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5179    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
5180      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5181      .access = PL2_RW, .accessfn = access_el3_aa32ns,
5182      .type = ARM_CP_CONST, .resetvalue = 0 },
5183    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5184      .cp = 15, .opc1 = 6, .crm = 2,
5185      .access = PL2_RW, .accessfn = access_el3_aa32ns,
5186      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
5187    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5188      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5189      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5190    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5191      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5192      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5193    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5194      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5195      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5196    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5197      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5198      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5199    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5200      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5201      .resetvalue = 0 },
5202    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5203      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5204      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5205    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5206      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5207      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5208    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5209      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5210      .resetvalue = 0 },
5211    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5212      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5213      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5214    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5215      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
5216      .resetvalue = 0 },
5217    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5218      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5219      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5220    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5221      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5222      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5223    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5224      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5225      .access = PL2_RW, .accessfn = access_tda,
5226      .type = ARM_CP_CONST, .resetvalue = 0 },
5227    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
5228      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5229      .access = PL2_RW, .accessfn = access_el3_aa32ns,
5230      .type = ARM_CP_CONST, .resetvalue = 0 },
5231    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5232      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5233      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5234    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5235      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5236      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5237    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5238      .type = ARM_CP_CONST,
5239      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5240      .access = PL2_RW, .resetvalue = 0 },
5241    REGINFO_SENTINEL
5242};
5243
5244/* Ditto, but for registers which exist in ARMv8 but not v7 */
5245static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
5246    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5247      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5248      .access = PL2_RW,
5249      .type = ARM_CP_CONST, .resetvalue = 0 },
5250    REGINFO_SENTINEL
5251};
5252
5253static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
5254{
5255    ARMCPU *cpu = env_archcpu(env);
5256
5257    if (arm_feature(env, ARM_FEATURE_V8)) {
5258        valid_mask |= MAKE_64BIT_MASK(0, 34);  /* ARMv8.0 */
5259    } else {
5260        valid_mask |= MAKE_64BIT_MASK(0, 28);  /* ARMv7VE */
5261    }
5262
5263    if (arm_feature(env, ARM_FEATURE_EL3)) {
5264        valid_mask &= ~HCR_HCD;
5265    } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
5266        /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
5267         * However, if we're using the SMC PSCI conduit then QEMU is
5268         * effectively acting like EL3 firmware and so the guest at
5269         * EL2 should retain the ability to prevent EL1 from being
5270         * able to make SMC calls into the ersatz firmware, so in
5271         * that case HCR.TSC should be read/write.
5272         */
5273        valid_mask &= ~HCR_TSC;
5274    }
5275
5276    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
5277        if (cpu_isar_feature(aa64_vh, cpu)) {
5278            valid_mask |= HCR_E2H;
5279        }
5280        if (cpu_isar_feature(aa64_lor, cpu)) {
5281            valid_mask |= HCR_TLOR;
5282        }
5283        if (cpu_isar_feature(aa64_pauth, cpu)) {
5284            valid_mask |= HCR_API | HCR_APK;
5285        }
5286        if (cpu_isar_feature(aa64_mte, cpu)) {
5287            valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5;
5288        }
5289    }
5290
5291    /* Clear RES0 bits.  */
5292    value &= valid_mask;
5293
5294    /*
5295     * These bits change the MMU setup:
5296     * HCR_VM enables stage 2 translation
5297     * HCR_PTW forbids certain page-table setups
5298     * HCR_DC disables stage1 and enables stage2 translation
5299     * HCR_DCT enables tagging on (disabled) stage1 translation
5300     */
5301    if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
5302        tlb_flush(CPU(cpu));
5303    }
5304    env->cp15.hcr_el2 = value;
5305
5306    /*
5307     * Updates to VI and VF require us to update the status of
5308     * virtual interrupts, which are the logical OR of these bits
5309     * and the state of the input lines from the GIC. (This requires
5310     * that we have the iothread lock, which is done by marking the
5311     * reginfo structs as ARM_CP_IO.)
5312     * Note that if a write to HCR pends a VIRQ or VFIQ it is never
5313     * possible for it to be taken immediately, because VIRQ and
5314     * VFIQ are masked unless running at EL0 or EL1, and HCR
5315     * can only be written at EL2.
5316     */
5317    g_assert(qemu_mutex_iothread_locked());
5318    arm_cpu_update_virq(cpu);
5319    arm_cpu_update_vfiq(cpu);
5320}
5321
5322static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
5323{
5324    do_hcr_write(env, value, 0);
5325}
5326
5327static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
5328                          uint64_t value)
5329{
5330    /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
5331    value = deposit64(env->cp15.hcr_el2, 32, 32, value);
5332    do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32));
5333}
5334
5335static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
5336                         uint64_t value)
5337{
5338    /* Handle HCR write, i.e. write to low half of HCR_EL2 */
5339    value = deposit64(env->cp15.hcr_el2, 0, 32, value);
5340    do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32));
5341}
5342
5343/*
5344 * Return the effective value of HCR_EL2.
5345 * Bits that are not included here:
5346 * RW       (read from SCR_EL3.RW as needed)
5347 */
5348uint64_t arm_hcr_el2_eff(CPUARMState *env)
5349{
5350    uint64_t ret = env->cp15.hcr_el2;
5351
5352    if (arm_is_secure_below_el3(env)) {
5353        /*
5354         * "This register has no effect if EL2 is not enabled in the
5355         * current Security state".  This is ARMv8.4-SecEL2 speak for
5356         * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
5357         *
5358         * Prior to that, the language was "In an implementation that
5359         * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
5360         * as if this field is 0 for all purposes other than a direct
5361         * read or write access of HCR_EL2".  With lots of enumeration
5362         * on a per-field basis.  In current QEMU, this is condition
5363         * is arm_is_secure_below_el3.
5364         *
5365         * Since the v8.4 language applies to the entire register, and
5366         * appears to be backward compatible, use that.
5367         */
5368        return 0;
5369    }
5370
5371    /*
5372     * For a cpu that supports both aarch64 and aarch32, we can set bits
5373     * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32.
5374     * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32.
5375     */
5376    if (!arm_el_is_aa64(env, 2)) {
5377        uint64_t aa32_valid;
5378
5379        /*
5380         * These bits are up-to-date as of ARMv8.6.
5381         * For HCR, it's easiest to list just the 2 bits that are invalid.
5382         * For HCR2, list those that are valid.
5383         */
5384        aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ);
5385        aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE |
5386                       HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS);
5387        ret &= aa32_valid;
5388    }
5389
5390    if (ret & HCR_TGE) {
5391        /* These bits are up-to-date as of ARMv8.6.  */
5392        if (ret & HCR_E2H) {
5393            ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
5394                     HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
5395                     HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
5396                     HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE |
5397                     HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT |
5398                     HCR_TTLBIS | HCR_TTLBOS | HCR_TID5);
5399        } else {
5400            ret |= HCR_FMO | HCR_IMO | HCR_AMO;
5401        }
5402        ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
5403                 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
5404                 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
5405                 HCR_TLOR);
5406    }
5407
5408    return ret;
5409}
5410
5411static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
5412                           uint64_t value)
5413{
5414    /*
5415     * For A-profile AArch32 EL3, if NSACR.CP10
5416     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5417     */
5418    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5419        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5420        value &= ~(0x3 << 10);
5421        value |= env->cp15.cptr_el[2] & (0x3 << 10);
5422    }
5423    env->cp15.cptr_el[2] = value;
5424}
5425
5426static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
5427{
5428    /*
5429     * For A-profile AArch32 EL3, if NSACR.CP10
5430     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
5431     */
5432    uint64_t value = env->cp15.cptr_el[2];
5433
5434    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
5435        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
5436        value |= 0x3 << 10;
5437    }
5438    return value;
5439}
5440
5441static const ARMCPRegInfo el2_cp_reginfo[] = {
5442    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
5443      .type = ARM_CP_IO,
5444      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5445      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5446      .writefn = hcr_write },
5447    { .name = "HCR", .state = ARM_CP_STATE_AA32,
5448      .type = ARM_CP_ALIAS | ARM_CP_IO,
5449      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
5450      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
5451      .writefn = hcr_writelow },
5452    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
5453      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
5454      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
5455    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
5456      .type = ARM_CP_ALIAS,
5457      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
5458      .access = PL2_RW,
5459      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
5460    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
5461      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
5462      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
5463    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
5464      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
5465      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
5466    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
5467      .type = ARM_CP_ALIAS,
5468      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
5469      .access = PL2_RW,
5470      .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
5471    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
5472      .type = ARM_CP_ALIAS,
5473      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
5474      .access = PL2_RW,
5475      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
5476    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
5477      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
5478      .access = PL2_RW, .writefn = vbar_write,
5479      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
5480      .resetvalue = 0 },
5481    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
5482      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
5483      .access = PL3_RW, .type = ARM_CP_ALIAS,
5484      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
5485    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
5486      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
5487      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
5488      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
5489      .readfn = cptr_el2_read, .writefn = cptr_el2_write },
5490    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
5491      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
5492      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
5493      .resetvalue = 0 },
5494    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
5495      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
5496      .access = PL2_RW, .type = ARM_CP_ALIAS,
5497      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
5498    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
5499      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
5500      .access = PL2_RW, .type = ARM_CP_CONST,
5501      .resetvalue = 0 },
5502    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
5503    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
5504      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
5505      .access = PL2_RW, .type = ARM_CP_CONST,
5506      .resetvalue = 0 },
5507    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
5508      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
5509      .access = PL2_RW, .type = ARM_CP_CONST,
5510      .resetvalue = 0 },
5511    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
5512      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
5513      .access = PL2_RW, .type = ARM_CP_CONST,
5514      .resetvalue = 0 },
5515    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
5516      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
5517      .access = PL2_RW, .writefn = vmsa_tcr_el12_write,
5518      /* no .raw_writefn or .resetfn needed as we never use mask/base_mask */
5519      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
5520    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
5521      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5522      .type = ARM_CP_ALIAS,
5523      .access = PL2_RW, .accessfn = access_el3_aa32ns,
5524      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5525    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
5526      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
5527      .access = PL2_RW,
5528      /* no .writefn needed as this can't cause an ASID change;
5529       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
5530       */
5531      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
5532    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
5533      .cp = 15, .opc1 = 6, .crm = 2,
5534      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5535      .access = PL2_RW, .accessfn = access_el3_aa32ns,
5536      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
5537      .writefn = vttbr_write },
5538    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
5539      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
5540      .access = PL2_RW, .writefn = vttbr_write,
5541      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
5542    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
5543      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
5544      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
5545      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
5546    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
5547      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
5548      .access = PL2_RW, .resetvalue = 0,
5549      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
5550    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
5551      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
5552      .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write,
5553      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5554    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
5555      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
5556      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
5557    { .name = "TLBIALLNSNH",
5558      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
5559      .type = ARM_CP_NO_RAW, .access = PL2_W,
5560      .writefn = tlbiall_nsnh_write },
5561    { .name = "TLBIALLNSNHIS",
5562      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
5563      .type = ARM_CP_NO_RAW, .access = PL2_W,
5564      .writefn = tlbiall_nsnh_is_write },
5565    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5566      .type = ARM_CP_NO_RAW, .access = PL2_W,
5567      .writefn = tlbiall_hyp_write },
5568    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5569      .type = ARM_CP_NO_RAW, .access = PL2_W,
5570      .writefn = tlbiall_hyp_is_write },
5571    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5572      .type = ARM_CP_NO_RAW, .access = PL2_W,
5573      .writefn = tlbimva_hyp_write },
5574    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5575      .type = ARM_CP_NO_RAW, .access = PL2_W,
5576      .writefn = tlbimva_hyp_is_write },
5577    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
5578      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
5579      .type = ARM_CP_NO_RAW, .access = PL2_W,
5580      .writefn = tlbi_aa64_alle2_write },
5581    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
5582      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
5583      .type = ARM_CP_NO_RAW, .access = PL2_W,
5584      .writefn = tlbi_aa64_vae2_write },
5585    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
5586      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
5587      .access = PL2_W, .type = ARM_CP_NO_RAW,
5588      .writefn = tlbi_aa64_vae2_write },
5589    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
5590      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
5591      .access = PL2_W, .type = ARM_CP_NO_RAW,
5592      .writefn = tlbi_aa64_alle2is_write },
5593    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
5594      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
5595      .type = ARM_CP_NO_RAW, .access = PL2_W,
5596      .writefn = tlbi_aa64_vae2is_write },
5597    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
5598      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
5599      .access = PL2_W, .type = ARM_CP_NO_RAW,
5600      .writefn = tlbi_aa64_vae2is_write },
5601#ifndef CONFIG_USER_ONLY
5602    /* Unlike the other EL2-related AT operations, these must
5603     * UNDEF from EL3 if EL2 is not implemented, which is why we
5604     * define them here rather than with the rest of the AT ops.
5605     */
5606    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
5607      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5608      .access = PL2_W, .accessfn = at_s1e2_access,
5609      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5610    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
5611      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5612      .access = PL2_W, .accessfn = at_s1e2_access,
5613      .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
5614    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
5615     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
5616     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
5617     * to behave as if SCR.NS was 1.
5618     */
5619    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
5620      .access = PL2_W,
5621      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5622    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
5623      .access = PL2_W,
5624      .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
5625    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
5626      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
5627      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
5628       * reset values as IMPDEF. We choose to reset to 3 to comply with
5629       * both ARMv7 and ARMv8.
5630       */
5631      .access = PL2_RW, .resetvalue = 3,
5632      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
5633    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
5634      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
5635      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
5636      .writefn = gt_cntvoff_write,
5637      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5638    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
5639      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
5640      .writefn = gt_cntvoff_write,
5641      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
5642    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
5643      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
5644      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5645      .type = ARM_CP_IO, .access = PL2_RW,
5646      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5647    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
5648      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
5649      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
5650      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
5651    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
5652      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
5653      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
5654      .resetfn = gt_hyp_timer_reset,
5655      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
5656    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
5657      .type = ARM_CP_IO,
5658      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
5659      .access = PL2_RW,
5660      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
5661      .resetvalue = 0,
5662      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
5663#endif
5664    /* The only field of MDCR_EL2 that has a defined architectural reset value
5665     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
5666     * don't implement any PMU event counters, so using zero as a reset
5667     * value for MDCR_EL2 is okay
5668     */
5669    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
5670      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
5671      .access = PL2_RW, .resetvalue = 0,
5672      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
5673    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
5674      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5675      .access = PL2_RW, .accessfn = access_el3_aa32ns,
5676      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5677    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
5678      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
5679      .access = PL2_RW,
5680      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
5681    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
5682      .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
5683      .access = PL2_RW,
5684      .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
5685    REGINFO_SENTINEL
5686};
5687
5688static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
5689    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
5690      .type = ARM_CP_ALIAS | ARM_CP_IO,
5691      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
5692      .access = PL2_RW,
5693      .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
5694      .writefn = hcr_writehigh },
5695    REGINFO_SENTINEL
5696};
5697
5698static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
5699                                   bool isread)
5700{
5701    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
5702     * At Secure EL1 it traps to EL3.
5703     */
5704    if (arm_current_el(env) == 3) {
5705        return CP_ACCESS_OK;
5706    }
5707    if (arm_is_secure_below_el3(env)) {
5708        return CP_ACCESS_TRAP_EL3;
5709    }
5710    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
5711    if (isread) {
5712        return CP_ACCESS_OK;
5713    }
5714    return CP_ACCESS_TRAP_UNCATEGORIZED;
5715}
5716
5717static const ARMCPRegInfo el3_cp_reginfo[] = {
5718    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5719      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5720      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5721      .resetvalue = 0, .writefn = scr_write },
5722    { .name = "SCR",  .type = ARM_CP_ALIAS | ARM_CP_NEWEL,
5723      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5724      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5725      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5726      .writefn = scr_write },
5727    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5728      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5729      .access = PL3_RW, .resetvalue = 0,
5730      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5731    { .name = "SDER",
5732      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5733      .access = PL3_RW, .resetvalue = 0,
5734      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5735    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5736      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5737      .writefn = vbar_write, .resetvalue = 0,
5738      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5739    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5740      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5741      .access = PL3_RW, .resetvalue = 0,
5742      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5743    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5744      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5745      .access = PL3_RW,
5746      /* no .writefn needed as this can't cause an ASID change;
5747       * we must provide a .raw_writefn and .resetfn because we handle
5748       * reset and migration for the AArch32 TTBCR(S), which might be
5749       * using mask and base_mask.
5750       */
5751      .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5752      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5753    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5754      .type = ARM_CP_ALIAS,
5755      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5756      .access = PL3_RW,
5757      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5758    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5759      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5760      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5761    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5762      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5763      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5764    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5765      .type = ARM_CP_ALIAS,
5766      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5767      .access = PL3_RW,
5768      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5769    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5770      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5771      .access = PL3_RW, .writefn = vbar_write,
5772      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5773      .resetvalue = 0 },
5774    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5775      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5776      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5777      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5778    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5779      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5780      .access = PL3_RW, .resetvalue = 0,
5781      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5782    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5783      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5784      .access = PL3_RW, .type = ARM_CP_CONST,
5785      .resetvalue = 0 },
5786    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5787      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5788      .access = PL3_RW, .type = ARM_CP_CONST,
5789      .resetvalue = 0 },
5790    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5791      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5792      .access = PL3_RW, .type = ARM_CP_CONST,
5793      .resetvalue = 0 },
5794    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5795      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5796      .access = PL3_W, .type = ARM_CP_NO_RAW,
5797      .writefn = tlbi_aa64_alle3is_write },
5798    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5799      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5800      .access = PL3_W, .type = ARM_CP_NO_RAW,
5801      .writefn = tlbi_aa64_vae3is_write },
5802    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5803      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5804      .access = PL3_W, .type = ARM_CP_NO_RAW,
5805      .writefn = tlbi_aa64_vae3is_write },
5806    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5807      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5808      .access = PL3_W, .type = ARM_CP_NO_RAW,
5809      .writefn = tlbi_aa64_alle3_write },
5810    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5811      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5812      .access = PL3_W, .type = ARM_CP_NO_RAW,
5813      .writefn = tlbi_aa64_vae3_write },
5814    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5815      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5816      .access = PL3_W, .type = ARM_CP_NO_RAW,
5817      .writefn = tlbi_aa64_vae3_write },
5818    REGINFO_SENTINEL
5819};
5820
5821#ifndef CONFIG_USER_ONLY
5822/* Test if system register redirection is to occur in the current state.  */
5823static bool redirect_for_e2h(CPUARMState *env)
5824{
5825    return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
5826}
5827
5828static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
5829{
5830    CPReadFn *readfn;
5831
5832    if (redirect_for_e2h(env)) {
5833        /* Switch to the saved EL2 version of the register.  */
5834        ri = ri->opaque;
5835        readfn = ri->readfn;
5836    } else {
5837        readfn = ri->orig_readfn;
5838    }
5839    if (readfn == NULL) {
5840        readfn = raw_read;
5841    }
5842    return readfn(env, ri);
5843}
5844
5845static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
5846                          uint64_t value)
5847{
5848    CPWriteFn *writefn;
5849
5850    if (redirect_for_e2h(env)) {
5851        /* Switch to the saved EL2 version of the register.  */
5852        ri = ri->opaque;
5853        writefn = ri->writefn;
5854    } else {
5855        writefn = ri->orig_writefn;
5856    }
5857    if (writefn == NULL) {
5858        writefn = raw_write;
5859    }
5860    writefn(env, ri, value);
5861}
5862
5863static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
5864{
5865    struct E2HAlias {
5866        uint32_t src_key, dst_key, new_key;
5867        const char *src_name, *dst_name, *new_name;
5868        bool (*feature)(const ARMISARegisters *id);
5869    };
5870
5871#define K(op0, op1, crn, crm, op2) \
5872    ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
5873
5874    static const struct E2HAlias aliases[] = {
5875        { K(3, 0,  1, 0, 0), K(3, 4,  1, 0, 0), K(3, 5, 1, 0, 0),
5876          "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
5877        { K(3, 0,  1, 0, 2), K(3, 4,  1, 1, 2), K(3, 5, 1, 0, 2),
5878          "CPACR", "CPTR_EL2", "CPACR_EL12" },
5879        { K(3, 0,  2, 0, 0), K(3, 4,  2, 0, 0), K(3, 5, 2, 0, 0),
5880          "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
5881        { K(3, 0,  2, 0, 1), K(3, 4,  2, 0, 1), K(3, 5, 2, 0, 1),
5882          "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
5883        { K(3, 0,  2, 0, 2), K(3, 4,  2, 0, 2), K(3, 5, 2, 0, 2),
5884          "TCR_EL1", "TCR_EL2", "TCR_EL12" },
5885        { K(3, 0,  4, 0, 0), K(3, 4,  4, 0, 0), K(3, 5, 4, 0, 0),
5886          "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
5887        { K(3, 0,  4, 0, 1), K(3, 4,  4, 0, 1), K(3, 5, 4, 0, 1),
5888          "ELR_EL1", "ELR_EL2", "ELR_EL12" },
5889        { K(3, 0,  5, 1, 0), K(3, 4,  5, 1, 0), K(3, 5, 5, 1, 0),
5890          "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
5891        { K(3, 0,  5, 1, 1), K(3, 4,  5, 1, 1), K(3, 5, 5, 1, 1),
5892          "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
5893        { K(3, 0,  5, 2, 0), K(3, 4,  5, 2, 0), K(3, 5, 5, 2, 0),
5894          "ESR_EL1", "ESR_EL2", "ESR_EL12" },
5895        { K(3, 0,  6, 0, 0), K(3, 4,  6, 0, 0), K(3, 5, 6, 0, 0),
5896          "FAR_EL1", "FAR_EL2", "FAR_EL12" },
5897        { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
5898          "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
5899        { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
5900          "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
5901        { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
5902          "VBAR", "VBAR_EL2", "VBAR_EL12" },
5903        { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
5904          "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
5905        { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
5906          "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
5907
5908        /*
5909         * Note that redirection of ZCR is mentioned in the description
5910         * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
5911         * not in the summary table.
5912         */
5913        { K(3, 0,  1, 2, 0), K(3, 4,  1, 2, 0), K(3, 5, 1, 2, 0),
5914          "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
5915
5916        { K(3, 0,  5, 6, 0), K(3, 4,  5, 6, 0), K(3, 5, 5, 6, 0),
5917          "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
5918
5919        /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
5920        /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
5921    };
5922#undef K
5923
5924    size_t i;
5925
5926    for (i = 0; i < ARRAY_SIZE(aliases); i++) {
5927        const struct E2HAlias *a = &aliases[i];
5928        ARMCPRegInfo *src_reg, *dst_reg;
5929
5930        if (a->feature && !a->feature(&cpu->isar)) {
5931            continue;
5932        }
5933
5934        src_reg = g_hash_table_lookup(cpu->cp_regs, &a->src_key);
5935        dst_reg = g_hash_table_lookup(cpu->cp_regs, &a->dst_key);
5936        g_assert(src_reg != NULL);
5937        g_assert(dst_reg != NULL);
5938
5939        /* Cross-compare names to detect typos in the keys.  */
5940        g_assert(strcmp(src_reg->name, a->src_name) == 0);
5941        g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
5942
5943        /* None of the core system registers use opaque; we will.  */
5944        g_assert(src_reg->opaque == NULL);
5945
5946        /* Create alias before redirection so we dup the right data. */
5947        if (a->new_key) {
5948            ARMCPRegInfo *new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
5949            uint32_t *new_key = g_memdup(&a->new_key, sizeof(uint32_t));
5950            bool ok;
5951
5952            new_reg->name = a->new_name;
5953            new_reg->type |= ARM_CP_ALIAS;
5954            /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place.  */
5955            new_reg->access &= PL2_RW | PL3_RW;
5956
5957            ok = g_hash_table_insert(cpu->cp_regs, new_key, new_reg);
5958            g_assert(ok);
5959        }
5960
5961        src_reg->opaque = dst_reg;
5962        src_reg->orig_readfn = src_reg->readfn ?: raw_read;
5963        src_reg->orig_writefn = src_reg->writefn ?: raw_write;
5964        if (!src_reg->raw_readfn) {
5965            src_reg->raw_readfn = raw_read;
5966        }
5967        if (!src_reg->raw_writefn) {
5968            src_reg->raw_writefn = raw_write;
5969        }
5970        src_reg->readfn = el2_e2h_read;
5971        src_reg->writefn = el2_e2h_write;
5972    }
5973}
5974#endif
5975
5976static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5977                                     bool isread)
5978{
5979    int cur_el = arm_current_el(env);
5980
5981    if (cur_el < 2) {
5982        uint64_t hcr = arm_hcr_el2_eff(env);
5983
5984        if (cur_el == 0) {
5985            if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) {
5986                if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) {
5987                    return CP_ACCESS_TRAP_EL2;
5988                }
5989            } else {
5990                if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5991                    return CP_ACCESS_TRAP;
5992                }
5993                if (hcr & HCR_TID2) {
5994                    return CP_ACCESS_TRAP_EL2;
5995                }
5996            }
5997        } else if (hcr & HCR_TID2) {
5998            return CP_ACCESS_TRAP_EL2;
5999        }
6000    }
6001
6002    if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) {
6003        return CP_ACCESS_TRAP_EL2;
6004    }
6005
6006    return CP_ACCESS_OK;
6007}
6008
6009static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
6010                        uint64_t value)
6011{
6012    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
6013     * read via a bit in OSLSR_EL1.
6014     */
6015    int oslock;
6016
6017    if (ri->state == ARM_CP_STATE_AA32) {
6018        oslock = (value == 0xC5ACCE55);
6019    } else {
6020        oslock = value & 1;
6021    }
6022
6023    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
6024}
6025
6026static const ARMCPRegInfo debug_cp_reginfo[] = {
6027    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
6028     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
6029     * unlike DBGDRAR it is never accessible from EL0.
6030     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
6031     * accessor.
6032     */
6033    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
6034      .access = PL0_R, .accessfn = access_tdra,
6035      .type = ARM_CP_CONST, .resetvalue = 0 },
6036    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
6037      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6038      .access = PL1_R, .accessfn = access_tdra,
6039      .type = ARM_CP_CONST, .resetvalue = 0 },
6040    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
6041      .access = PL0_R, .accessfn = access_tdra,
6042      .type = ARM_CP_CONST, .resetvalue = 0 },
6043    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
6044    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
6045      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
6046      .access = PL1_RW, .accessfn = access_tda,
6047      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
6048      .resetvalue = 0 },
6049    /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
6050     * We don't implement the configurable EL0 access.
6051     */
6052    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
6053      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
6054      .type = ARM_CP_ALIAS,
6055      .access = PL1_R, .accessfn = access_tda,
6056      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
6057    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
6058      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
6059      .access = PL1_W, .type = ARM_CP_NO_RAW,
6060      .accessfn = access_tdosa,
6061      .writefn = oslar_write },
6062    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
6063      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
6064      .access = PL1_R, .resetvalue = 10,
6065      .accessfn = access_tdosa,
6066      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
6067    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
6068    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
6069      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
6070      .access = PL1_RW, .accessfn = access_tdosa,
6071      .type = ARM_CP_NOP },
6072    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
6073     * implement vector catch debug events yet.
6074     */
6075    { .name = "DBGVCR",
6076      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6077      .access = PL1_RW, .accessfn = access_tda,
6078      .type = ARM_CP_NOP },
6079    /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
6080     * to save and restore a 32-bit guest's DBGVCR)
6081     */
6082    { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
6083      .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
6084      .access = PL2_RW, .accessfn = access_tda,
6085      .type = ARM_CP_NOP },
6086    /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
6087     * Channel but Linux may try to access this register. The 32-bit
6088     * alias is DBGDCCINT.
6089     */
6090    { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
6091      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
6092      .access = PL1_RW, .accessfn = access_tda,
6093      .type = ARM_CP_NOP },
6094    REGINFO_SENTINEL
6095};
6096
6097static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
6098    /* 64 bit access versions of the (dummy) debug registers */
6099    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
6100      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6101    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
6102      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
6103    REGINFO_SENTINEL
6104};
6105
6106/* Return the exception level to which exceptions should be taken
6107 * via SVEAccessTrap.  If an exception should be routed through
6108 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
6109 * take care of raising that exception.
6110 * C.f. the ARM pseudocode function CheckSVEEnabled.
6111 */
6112int sve_exception_el(CPUARMState *env, int el)
6113{
6114#ifndef CONFIG_USER_ONLY
6115    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
6116
6117    if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
6118        bool disabled = false;
6119
6120        /* The CPACR.ZEN controls traps to EL1:
6121         * 0, 2 : trap EL0 and EL1 accesses
6122         * 1    : trap only EL0 accesses
6123         * 3    : trap no accesses
6124         */
6125        if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
6126            disabled = true;
6127        } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
6128            disabled = el == 0;
6129        }
6130        if (disabled) {
6131            /* route_to_el2 */
6132            return hcr_el2 & HCR_TGE ? 2 : 1;
6133        }
6134
6135        /* Check CPACR.FPEN.  */
6136        if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
6137            disabled = true;
6138        } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
6139            disabled = el == 0;
6140        }
6141        if (disabled) {
6142            return 0;
6143        }
6144    }
6145
6146    /* CPTR_EL2.  Since TZ and TFP are positive,
6147     * they will be zero when EL2 is not present.
6148     */
6149    if (el <= 2 && !arm_is_secure_below_el3(env)) {
6150        if (env->cp15.cptr_el[2] & CPTR_TZ) {
6151            return 2;
6152        }
6153        if (env->cp15.cptr_el[2] & CPTR_TFP) {
6154            return 0;
6155        }
6156    }
6157
6158    /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
6159    if (arm_feature(env, ARM_FEATURE_EL3)
6160        && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
6161        return 3;
6162    }
6163#endif
6164    return 0;
6165}
6166
6167static uint32_t sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len)
6168{
6169    uint32_t end_len;
6170
6171    end_len = start_len &= 0xf;
6172    if (!test_bit(start_len, cpu->sve_vq_map)) {
6173        end_len = find_last_bit(cpu->sve_vq_map, start_len);
6174        assert(end_len < start_len);
6175    }
6176    return end_len;
6177}
6178
6179/*
6180 * Given that SVE is enabled, return the vector length for EL.
6181 */
6182uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
6183{
6184    ARMCPU *cpu = env_archcpu(env);
6185    uint32_t zcr_len = cpu->sve_max_vq - 1;
6186
6187    if (el <= 1) {
6188        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
6189    }
6190    if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6191        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
6192    }
6193    if (arm_feature(env, ARM_FEATURE_EL3)) {
6194        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
6195    }
6196
6197    return sve_zcr_get_valid_len(cpu, zcr_len);
6198}
6199
6200static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6201                      uint64_t value)
6202{
6203    int cur_el = arm_current_el(env);
6204    int old_len = sve_zcr_len_for_el(env, cur_el);
6205    int new_len;
6206
6207    /* Bits other than [3:0] are RAZ/WI.  */
6208    QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
6209    raw_write(env, ri, value & 0xf);
6210
6211    /*
6212     * Because we arrived here, we know both FP and SVE are enabled;
6213     * otherwise we would have trapped access to the ZCR_ELn register.
6214     */
6215    new_len = sve_zcr_len_for_el(env, cur_el);
6216    if (new_len < old_len) {
6217        aarch64_sve_narrow_vq(env, new_len + 1);
6218    }
6219}
6220
6221static const ARMCPRegInfo zcr_el1_reginfo = {
6222    .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
6223    .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
6224    .access = PL1_RW, .type = ARM_CP_SVE,
6225    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
6226    .writefn = zcr_write, .raw_writefn = raw_write
6227};
6228
6229static const ARMCPRegInfo zcr_el2_reginfo = {
6230    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6231    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6232    .access = PL2_RW, .type = ARM_CP_SVE,
6233    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
6234    .writefn = zcr_write, .raw_writefn = raw_write
6235};
6236
6237static const ARMCPRegInfo zcr_no_el2_reginfo = {
6238    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
6239    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
6240    .access = PL2_RW, .type = ARM_CP_SVE,
6241    .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
6242};
6243
6244static const ARMCPRegInfo zcr_el3_reginfo = {
6245    .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
6246    .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
6247    .access = PL3_RW, .type = ARM_CP_SVE,
6248    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
6249    .writefn = zcr_write, .raw_writefn = raw_write
6250};
6251
6252void hw_watchpoint_update(ARMCPU *cpu, int n)
6253{
6254    CPUARMState *env = &cpu->env;
6255    vaddr len = 0;
6256    vaddr wvr = env->cp15.dbgwvr[n];
6257    uint64_t wcr = env->cp15.dbgwcr[n];
6258    int mask;
6259    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
6260
6261    if (env->cpu_watchpoint[n]) {
6262        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
6263        env->cpu_watchpoint[n] = NULL;
6264    }
6265
6266    if (!extract64(wcr, 0, 1)) {
6267        /* E bit clear : watchpoint disabled */
6268        return;
6269    }
6270
6271    switch (extract64(wcr, 3, 2)) {
6272    case 0:
6273        /* LSC 00 is reserved and must behave as if the wp is disabled */
6274        return;
6275    case 1:
6276        flags |= BP_MEM_READ;
6277        break;
6278    case 2:
6279        flags |= BP_MEM_WRITE;
6280        break;
6281    case 3:
6282        flags |= BP_MEM_ACCESS;
6283        break;
6284    }
6285
6286    /* Attempts to use both MASK and BAS fields simultaneously are
6287     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
6288     * thus generating a watchpoint for every byte in the masked region.
6289     */
6290    mask = extract64(wcr, 24, 4);
6291    if (mask == 1 || mask == 2) {
6292        /* Reserved values of MASK; we must act as if the mask value was
6293         * some non-reserved value, or as if the watchpoint were disabled.
6294         * We choose the latter.
6295         */
6296        return;
6297    } else if (mask) {
6298        /* Watchpoint covers an aligned area up to 2GB in size */
6299        len = 1ULL << mask;
6300        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
6301         * whether the watchpoint fires when the unmasked bits match; we opt
6302         * to generate the exceptions.
6303         */
6304        wvr &= ~(len - 1);
6305    } else {
6306        /* Watchpoint covers bytes defined by the byte address select bits */
6307        int bas = extract64(wcr, 5, 8);
6308        int basstart;
6309
6310        if (extract64(wvr, 2, 1)) {
6311            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
6312             * ignored, and BAS[3:0] define which bytes to watch.
6313             */
6314            bas &= 0xf;
6315        }
6316
6317        if (bas == 0) {
6318            /* This must act as if the watchpoint is disabled */
6319            return;
6320        }
6321
6322        /* The BAS bits are supposed to be programmed to indicate a contiguous
6323         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
6324         * we fire for each byte in the word/doubleword addressed by the WVR.
6325         * We choose to ignore any non-zero bits after the first range of 1s.
6326         */
6327        basstart = ctz32(bas);
6328        len = cto32(bas >> basstart);
6329        wvr += basstart;
6330    }
6331
6332    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
6333                          &env->cpu_watchpoint[n]);
6334}
6335
6336void hw_watchpoint_update_all(ARMCPU *cpu)
6337{
6338    int i;
6339    CPUARMState *env = &cpu->env;
6340
6341    /* Completely clear out existing QEMU watchpoints and our array, to
6342     * avoid possible stale entries following migration load.
6343     */
6344    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
6345    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
6346
6347    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
6348        hw_watchpoint_update(cpu, i);
6349    }
6350}
6351
6352static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6353                         uint64_t value)
6354{
6355    ARMCPU *cpu = env_archcpu(env);
6356    int i = ri->crm;
6357
6358    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
6359     * register reads and behaves as if values written are sign extended.
6360     * Bits [1:0] are RES0.
6361     */
6362    value = sextract64(value, 0, 49) & ~3ULL;
6363
6364    raw_write(env, ri, value);
6365    hw_watchpoint_update(cpu, i);
6366}
6367
6368static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6369                         uint64_t value)
6370{
6371    ARMCPU *cpu = env_archcpu(env);
6372    int i = ri->crm;
6373
6374    raw_write(env, ri, value);
6375    hw_watchpoint_update(cpu, i);
6376}
6377
6378void hw_breakpoint_update(ARMCPU *cpu, int n)
6379{
6380    CPUARMState *env = &cpu->env;
6381    uint64_t bvr = env->cp15.dbgbvr[n];
6382    uint64_t bcr = env->cp15.dbgbcr[n];
6383    vaddr addr;
6384    int bt;
6385    int flags = BP_CPU;
6386
6387    if (env->cpu_breakpoint[n]) {
6388        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
6389        env->cpu_breakpoint[n] = NULL;
6390    }
6391
6392    if (!extract64(bcr, 0, 1)) {
6393        /* E bit clear : watchpoint disabled */
6394        return;
6395    }
6396
6397    bt = extract64(bcr, 20, 4);
6398
6399    switch (bt) {
6400    case 4: /* unlinked address mismatch (reserved if AArch64) */
6401    case 5: /* linked address mismatch (reserved if AArch64) */
6402        qemu_log_mask(LOG_UNIMP,
6403                      "arm: address mismatch breakpoint types not implemented\n");
6404        return;
6405    case 0: /* unlinked address match */
6406    case 1: /* linked address match */
6407    {
6408        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
6409         * we behave as if the register was sign extended. Bits [1:0] are
6410         * RES0. The BAS field is used to allow setting breakpoints on 16
6411         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
6412         * a bp will fire if the addresses covered by the bp and the addresses
6413         * covered by the insn overlap but the insn doesn't start at the
6414         * start of the bp address range. We choose to require the insn and
6415         * the bp to have the same address. The constraints on writing to
6416         * BAS enforced in dbgbcr_write mean we have only four cases:
6417         *  0b0000  => no breakpoint
6418         *  0b0011  => breakpoint on addr
6419         *  0b1100  => breakpoint on addr + 2
6420         *  0b1111  => breakpoint on addr
6421         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
6422         */
6423        int bas = extract64(bcr, 5, 4);
6424        addr = sextract64(bvr, 0, 49) & ~3ULL;
6425        if (bas == 0) {
6426            return;
6427        }
6428        if (bas == 0xc) {
6429            addr += 2;
6430        }
6431        break;
6432    }
6433    case 2: /* unlinked context ID match */
6434    case 8: /* unlinked VMID match (reserved if no EL2) */
6435    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
6436        qemu_log_mask(LOG_UNIMP,
6437                      "arm: unlinked context breakpoint types not implemented\n");
6438        return;
6439    case 9: /* linked VMID match (reserved if no EL2) */
6440    case 11: /* linked context ID and VMID match (reserved if no EL2) */
6441    case 3: /* linked context ID match */
6442    default:
6443        /* We must generate no events for Linked context matches (unless
6444         * they are linked to by some other bp/wp, which is handled in
6445         * updates for the linking bp/wp). We choose to also generate no events
6446         * for reserved values.
6447         */
6448        return;
6449    }
6450
6451    cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
6452}
6453
6454void hw_breakpoint_update_all(ARMCPU *cpu)
6455{
6456    int i;
6457    CPUARMState *env = &cpu->env;
6458
6459    /* Completely clear out existing QEMU breakpoints and our array, to
6460     * avoid possible stale entries following migration load.
6461     */
6462    cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
6463    memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
6464
6465    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
6466        hw_breakpoint_update(cpu, i);
6467    }
6468}
6469
6470static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6471                         uint64_t value)
6472{
6473    ARMCPU *cpu = env_archcpu(env);
6474    int i = ri->crm;
6475
6476    raw_write(env, ri, value);
6477    hw_breakpoint_update(cpu, i);
6478}
6479
6480static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
6481                         uint64_t value)
6482{
6483    ARMCPU *cpu = env_archcpu(env);
6484    int i = ri->crm;
6485
6486    /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
6487     * copy of BAS[0].
6488     */
6489    value = deposit64(value, 6, 1, extract64(value, 5, 1));
6490    value = deposit64(value, 8, 1, extract64(value, 7, 1));
6491
6492    raw_write(env, ri, value);
6493    hw_breakpoint_update(cpu, i);
6494}
6495
6496static void define_debug_regs(ARMCPU *cpu)
6497{
6498    /* Define v7 and v8 architectural debug registers.
6499     * These are just dummy implementations for now.
6500     */
6501    int i;
6502    int wrps, brps, ctx_cmps;
6503    ARMCPRegInfo dbgdidr = {
6504        .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
6505        .access = PL0_R, .accessfn = access_tda,
6506        .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr,
6507    };
6508
6509    /* Note that all these register fields hold "number of Xs minus 1". */
6510    brps = arm_num_brps(cpu);
6511    wrps = arm_num_wrps(cpu);
6512    ctx_cmps = arm_num_ctx_cmps(cpu);
6513
6514    assert(ctx_cmps <= brps);
6515
6516    define_one_arm_cp_reg(cpu, &dbgdidr);
6517    define_arm_cp_regs(cpu, debug_cp_reginfo);
6518
6519    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
6520        define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
6521    }
6522
6523    for (i = 0; i < brps; i++) {
6524        ARMCPRegInfo dbgregs[] = {
6525            { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
6526              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
6527              .access = PL1_RW, .accessfn = access_tda,
6528              .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
6529              .writefn = dbgbvr_write, .raw_writefn = raw_write
6530            },
6531            { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
6532              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
6533              .access = PL1_RW, .accessfn = access_tda,
6534              .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
6535              .writefn = dbgbcr_write, .raw_writefn = raw_write
6536            },
6537            REGINFO_SENTINEL
6538        };
6539        define_arm_cp_regs(cpu, dbgregs);
6540    }
6541
6542    for (i = 0; i < wrps; i++) {
6543        ARMCPRegInfo dbgregs[] = {
6544            { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
6545              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
6546              .access = PL1_RW, .accessfn = access_tda,
6547              .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
6548              .writefn = dbgwvr_write, .raw_writefn = raw_write
6549            },
6550            { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
6551              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
6552              .access = PL1_RW, .accessfn = access_tda,
6553              .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
6554              .writefn = dbgwcr_write, .raw_writefn = raw_write
6555            },
6556            REGINFO_SENTINEL
6557        };
6558        define_arm_cp_regs(cpu, dbgregs);
6559    }
6560}
6561
6562static void define_pmu_regs(ARMCPU *cpu)
6563{
6564    /*
6565     * v7 performance monitor control register: same implementor
6566     * field as main ID register, and we implement four counters in
6567     * addition to the cycle count register.
6568     */
6569    unsigned int i, pmcrn = 4;
6570    ARMCPRegInfo pmcr = {
6571        .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
6572        .access = PL0_RW,
6573        .type = ARM_CP_IO | ARM_CP_ALIAS,
6574        .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
6575        .accessfn = pmreg_access, .writefn = pmcr_write,
6576        .raw_writefn = raw_write,
6577    };
6578    ARMCPRegInfo pmcr64 = {
6579        .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
6580        .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
6581        .access = PL0_RW, .accessfn = pmreg_access,
6582        .type = ARM_CP_IO,
6583        .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6584        .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
6585                      PMCRLC,
6586        .writefn = pmcr_write, .raw_writefn = raw_write,
6587    };
6588    define_one_arm_cp_reg(cpu, &pmcr);
6589    define_one_arm_cp_reg(cpu, &pmcr64);
6590    for (i = 0; i < pmcrn; i++) {
6591        char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6592        char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6593        char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6594        char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6595        ARMCPRegInfo pmev_regs[] = {
6596            { .name = pmevcntr_name, .cp = 15, .crn = 14,
6597              .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6598              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6599              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6600              .accessfn = pmreg_access },
6601            { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6602              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6603              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6604              .type = ARM_CP_IO,
6605              .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6606              .raw_readfn = pmevcntr_rawread,
6607              .raw_writefn = pmevcntr_rawwrite },
6608            { .name = pmevtyper_name, .cp = 15, .crn = 14,
6609              .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6610              .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6611              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6612              .accessfn = pmreg_access },
6613            { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6614              .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6615              .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6616              .type = ARM_CP_IO,
6617              .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6618              .raw_writefn = pmevtyper_rawwrite },
6619            REGINFO_SENTINEL
6620        };
6621        define_arm_cp_regs(cpu, pmev_regs);
6622        g_free(pmevcntr_name);
6623        g_free(pmevcntr_el0_name);
6624        g_free(pmevtyper_name);
6625        g_free(pmevtyper_el0_name);
6626    }
6627    if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
6628        ARMCPRegInfo v81_pmu_regs[] = {
6629            { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6630              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6631              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6632              .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6633            { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6634              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6635              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6636              .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6637            REGINFO_SENTINEL
6638        };
6639        define_arm_cp_regs(cpu, v81_pmu_regs);
6640    }
6641    if (cpu_isar_feature(any_pmu_8_4, cpu)) {
6642        static const ARMCPRegInfo v84_pmmir = {
6643            .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
6644            .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
6645            .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6646            .resetvalue = 0
6647        };
6648        define_one_arm_cp_reg(cpu, &v84_pmmir);
6649    }
6650}
6651
6652/* We don't know until after realize whether there's a GICv3
6653 * attached, and that is what registers the gicv3 sysregs.
6654 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
6655 * at runtime.
6656 */
6657static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
6658{
6659    ARMCPU *cpu = env_archcpu(env);
6660    uint64_t pfr1 = cpu->isar.id_pfr1;
6661
6662    if (env->gicv3state) {
6663        pfr1 |= 1 << 28;
6664    }
6665    return pfr1;
6666}
6667
6668#ifndef CONFIG_USER_ONLY
6669static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
6670{
6671    ARMCPU *cpu = env_archcpu(env);
6672    uint64_t pfr0 = cpu->isar.id_aa64pfr0;
6673
6674    if (env->gicv3state) {
6675        pfr0 |= 1 << 24;
6676    }
6677    return pfr0;
6678}
6679#endif
6680
6681/* Shared logic between LORID and the rest of the LOR* registers.
6682 * Secure state exclusion has already been dealt with.
6683 */
6684static CPAccessResult access_lor_ns(CPUARMState *env,
6685                                    const ARMCPRegInfo *ri, bool isread)
6686{
6687    int el = arm_current_el(env);
6688
6689    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
6690        return CP_ACCESS_TRAP_EL2;
6691    }
6692    if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
6693        return CP_ACCESS_TRAP_EL3;
6694    }
6695    return CP_ACCESS_OK;
6696}
6697
6698static CPAccessResult access_lor_other(CPUARMState *env,
6699                                       const ARMCPRegInfo *ri, bool isread)
6700{
6701    if (arm_is_secure_below_el3(env)) {
6702        /* Access denied in secure mode.  */
6703        return CP_ACCESS_TRAP;
6704    }
6705    return access_lor_ns(env, ri, isread);
6706}
6707
6708/*
6709 * A trivial implementation of ARMv8.1-LOR leaves all of these
6710 * registers fixed at 0, which indicates that there are zero
6711 * supported Limited Ordering regions.
6712 */
6713static const ARMCPRegInfo lor_reginfo[] = {
6714    { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6715      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6716      .access = PL1_RW, .accessfn = access_lor_other,
6717      .type = ARM_CP_CONST, .resetvalue = 0 },
6718    { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6719      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6720      .access = PL1_RW, .accessfn = access_lor_other,
6721      .type = ARM_CP_CONST, .resetvalue = 0 },
6722    { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6723      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6724      .access = PL1_RW, .accessfn = access_lor_other,
6725      .type = ARM_CP_CONST, .resetvalue = 0 },
6726    { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6727      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6728      .access = PL1_RW, .accessfn = access_lor_other,
6729      .type = ARM_CP_CONST, .resetvalue = 0 },
6730    { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6731      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6732      .access = PL1_R, .accessfn = access_lor_ns,
6733      .type = ARM_CP_CONST, .resetvalue = 0 },
6734    REGINFO_SENTINEL
6735};
6736
6737#ifdef TARGET_AARCH64
6738static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
6739                                   bool isread)
6740{
6741    int el = arm_current_el(env);
6742
6743    if (el < 2 &&
6744        arm_feature(env, ARM_FEATURE_EL2) &&
6745        !(arm_hcr_el2_eff(env) & HCR_APK)) {
6746        return CP_ACCESS_TRAP_EL2;
6747    }
6748    if (el < 3 &&
6749        arm_feature(env, ARM_FEATURE_EL3) &&
6750        !(env->cp15.scr_el3 & SCR_APK)) {
6751        return CP_ACCESS_TRAP_EL3;
6752    }
6753    return CP_ACCESS_OK;
6754}
6755
6756static const ARMCPRegInfo pauth_reginfo[] = {
6757    { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6758      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
6759      .access = PL1_RW, .accessfn = access_pauth,
6760      .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
6761    { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6762      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
6763      .access = PL1_RW, .accessfn = access_pauth,
6764      .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
6765    { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6766      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
6767      .access = PL1_RW, .accessfn = access_pauth,
6768      .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
6769    { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6770      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
6771      .access = PL1_RW, .accessfn = access_pauth,
6772      .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
6773    { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6774      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
6775      .access = PL1_RW, .accessfn = access_pauth,
6776      .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
6777    { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6778      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
6779      .access = PL1_RW, .accessfn = access_pauth,
6780      .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
6781    { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6782      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
6783      .access = PL1_RW, .accessfn = access_pauth,
6784      .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
6785    { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6786      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
6787      .access = PL1_RW, .accessfn = access_pauth,
6788      .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
6789    { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
6790      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
6791      .access = PL1_RW, .accessfn = access_pauth,
6792      .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
6793    { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
6794      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
6795      .access = PL1_RW, .accessfn = access_pauth,
6796      .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
6797    REGINFO_SENTINEL
6798};
6799
6800static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
6801{
6802    Error *err = NULL;
6803    uint64_t ret;
6804
6805    /* Success sets NZCV = 0000.  */
6806    env->NF = env->CF = env->VF = 0, env->ZF = 1;
6807
6808    if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
6809        /*
6810         * ??? Failed, for unknown reasons in the crypto subsystem.
6811         * The best we can do is log the reason and return the
6812         * timed-out indication to the guest.  There is no reason
6813         * we know to expect this failure to be transitory, so the
6814         * guest may well hang retrying the operation.
6815         */
6816        qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
6817                      ri->name, error_get_pretty(err));
6818        error_free(err);
6819
6820        env->ZF = 0; /* NZCF = 0100 */
6821        return 0;
6822    }
6823    return ret;
6824}
6825
6826/* We do not support re-seeding, so the two registers operate the same.  */
6827static const ARMCPRegInfo rndr_reginfo[] = {
6828    { .name = "RNDR", .state = ARM_CP_STATE_AA64,
6829      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6830      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
6831      .access = PL0_R, .readfn = rndr_readfn },
6832    { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
6833      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
6834      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
6835      .access = PL0_R, .readfn = rndr_readfn },
6836    REGINFO_SENTINEL
6837};
6838
6839#ifndef CONFIG_USER_ONLY
6840static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
6841                          uint64_t value)
6842{
6843    ARMCPU *cpu = env_archcpu(env);
6844    /* CTR_EL0 System register -> DminLine, bits [19:16] */
6845    uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
6846    uint64_t vaddr_in = (uint64_t) value;
6847    uint64_t vaddr = vaddr_in & ~(dline_size - 1);
6848    void *haddr;
6849    int mem_idx = cpu_mmu_index(env, false);
6850
6851    /* This won't be crossing page boundaries */
6852    haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC());
6853    if (haddr) {
6854
6855        ram_addr_t offset;
6856        MemoryRegion *mr;
6857
6858        /* RCU lock is already being held */
6859        mr = memory_region_from_host(haddr, &offset);
6860
6861        if (mr) {
6862            memory_region_writeback(mr, offset, dline_size);
6863        }
6864    }
6865}
6866
6867static const ARMCPRegInfo dcpop_reg[] = {
6868    { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64,
6869      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1,
6870      .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6871      .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
6872    REGINFO_SENTINEL
6873};
6874
6875static const ARMCPRegInfo dcpodp_reg[] = {
6876    { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64,
6877      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1,
6878      .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END,
6879      .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn },
6880    REGINFO_SENTINEL
6881};
6882#endif /*CONFIG_USER_ONLY*/
6883
6884static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri,
6885                                       bool isread)
6886{
6887    if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) {
6888        return CP_ACCESS_TRAP_EL2;
6889    }
6890
6891    return CP_ACCESS_OK;
6892}
6893
6894static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri,
6895                                 bool isread)
6896{
6897    int el = arm_current_el(env);
6898
6899    if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
6900        uint64_t hcr = arm_hcr_el2_eff(env);
6901        if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
6902            return CP_ACCESS_TRAP_EL2;
6903        }
6904    }
6905    if (el < 3 &&
6906        arm_feature(env, ARM_FEATURE_EL3) &&
6907        !(env->cp15.scr_el3 & SCR_ATA)) {
6908        return CP_ACCESS_TRAP_EL3;
6909    }
6910    return CP_ACCESS_OK;
6911}
6912
6913static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri)
6914{
6915    return env->pstate & PSTATE_TCO;
6916}
6917
6918static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
6919{
6920    env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO);
6921}
6922
6923static const ARMCPRegInfo mte_reginfo[] = {
6924    { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64,
6925      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1,
6926      .access = PL1_RW, .accessfn = access_mte,
6927      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) },
6928    { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64,
6929      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
6930      .access = PL1_RW, .accessfn = access_mte,
6931      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
6932    { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
6933      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0,
6934      .access = PL2_RW, .accessfn = access_mte,
6935      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) },
6936    { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64,
6937      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0,
6938      .access = PL3_RW,
6939      .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) },
6940    { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64,
6941      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5,
6942      .access = PL1_RW, .accessfn = access_mte,
6943      .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) },
6944    { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64,
6945      .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
6946      .access = PL1_RW, .accessfn = access_mte,
6947      .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
6948    { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
6949      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
6950      .access = PL1_R, .accessfn = access_aa64_tid5,
6951      .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
6952    { .name = "TCO", .state = ARM_CP_STATE_AA64,
6953      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
6954      .type = ARM_CP_NO_RAW,
6955      .access = PL0_RW, .readfn = tco_read, .writefn = tco_write },
6956    { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64,
6957      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3,
6958      .type = ARM_CP_NOP, .access = PL1_W,
6959      .accessfn = aa64_cacheop_poc_access },
6960    { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64,
6961      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4,
6962      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6963    { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64,
6964      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5,
6965      .type = ARM_CP_NOP, .access = PL1_W,
6966      .accessfn = aa64_cacheop_poc_access },
6967    { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64,
6968      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6,
6969      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6970    { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64,
6971      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4,
6972      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6973    { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64,
6974      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6,
6975      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6976    { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64,
6977      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4,
6978      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6979    { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64,
6980      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6,
6981      .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw },
6982    REGINFO_SENTINEL
6983};
6984
6985static const ARMCPRegInfo mte_tco_ro_reginfo[] = {
6986    { .name = "TCO", .state = ARM_CP_STATE_AA64,
6987      .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
6988      .type = ARM_CP_CONST, .access = PL0_RW, },
6989    REGINFO_SENTINEL
6990};
6991
6992static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = {
6993    { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64,
6994      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3,
6995      .type = ARM_CP_NOP, .access = PL0_W,
6996      .accessfn = aa64_cacheop_poc_access },
6997    { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64,
6998      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5,
6999      .type = ARM_CP_NOP, .access = PL0_W,
7000      .accessfn = aa64_cacheop_poc_access },
7001    { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64,
7002      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3,
7003      .type = ARM_CP_NOP, .access = PL0_W,
7004      .accessfn = aa64_cacheop_poc_access },
7005    { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64,
7006      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5,
7007      .type = ARM_CP_NOP, .access = PL0_W,
7008      .accessfn = aa64_cacheop_poc_access },
7009    { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64,
7010      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3,
7011      .type = ARM_CP_NOP, .access = PL0_W,
7012      .accessfn = aa64_cacheop_poc_access },
7013    { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64,
7014      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5,
7015      .type = ARM_CP_NOP, .access = PL0_W,
7016      .accessfn = aa64_cacheop_poc_access },
7017    { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64,
7018      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3,
7019      .type = ARM_CP_NOP, .access = PL0_W,
7020      .accessfn = aa64_cacheop_poc_access },
7021    { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64,
7022      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5,
7023      .type = ARM_CP_NOP, .access = PL0_W,
7024      .accessfn = aa64_cacheop_poc_access },
7025    { .name = "DC_GVA", .state = ARM_CP_STATE_AA64,
7026      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3,
7027      .access = PL0_W, .type = ARM_CP_DC_GVA,
7028#ifndef CONFIG_USER_ONLY
7029      /* Avoid overhead of an access check that always passes in user-mode */
7030      .accessfn = aa64_zva_access,
7031#endif
7032    },
7033    { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64,
7034      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4,
7035      .access = PL0_W, .type = ARM_CP_DC_GZVA,
7036#ifndef CONFIG_USER_ONLY
7037      /* Avoid overhead of an access check that always passes in user-mode */
7038      .accessfn = aa64_zva_access,
7039#endif
7040    },
7041    REGINFO_SENTINEL
7042};
7043
7044#endif
7045
7046static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
7047                                     bool isread)
7048{
7049    int el = arm_current_el(env);
7050
7051    if (el == 0) {
7052        uint64_t sctlr = arm_sctlr(env, el);
7053        if (!(sctlr & SCTLR_EnRCTX)) {
7054            return CP_ACCESS_TRAP;
7055        }
7056    } else if (el == 1) {
7057        uint64_t hcr = arm_hcr_el2_eff(env);
7058        if (hcr & HCR_NV) {
7059            return CP_ACCESS_TRAP_EL2;
7060        }
7061    }
7062    return CP_ACCESS_OK;
7063}
7064
7065static const ARMCPRegInfo predinv_reginfo[] = {
7066    { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
7067      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
7068      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7069    { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
7070      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
7071      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7072    { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
7073      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
7074      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7075    /*
7076     * Note the AArch32 opcodes have a different OPC1.
7077     */
7078    { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
7079      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
7080      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7081    { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
7082      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
7083      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7084    { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
7085      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
7086      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
7087    REGINFO_SENTINEL
7088};
7089
7090static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri)
7091{
7092    /* Read the high 32 bits of the current CCSIDR */
7093    return extract64(ccsidr_read(env, ri), 32, 32);
7094}
7095
7096static const ARMCPRegInfo ccsidr2_reginfo[] = {
7097    { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH,
7098      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2,
7099      .access = PL1_R,
7100      .accessfn = access_aa64_tid2,
7101      .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW },
7102    REGINFO_SENTINEL
7103};
7104
7105static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7106                                       bool isread)
7107{
7108    if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) {
7109        return CP_ACCESS_TRAP_EL2;
7110    }
7111
7112    return CP_ACCESS_OK;
7113}
7114
7115static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri,
7116                                       bool isread)
7117{
7118    if (arm_feature(env, ARM_FEATURE_V8)) {
7119        return access_aa64_tid3(env, ri, isread);
7120    }
7121
7122    return CP_ACCESS_OK;
7123}
7124
7125static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri,
7126                                     bool isread)
7127{
7128    if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) {
7129        return CP_ACCESS_TRAP_EL2;
7130    }
7131
7132    return CP_ACCESS_OK;
7133}
7134
7135static const ARMCPRegInfo jazelle_regs[] = {
7136    { .name = "JIDR",
7137      .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0,
7138      .access = PL1_R, .accessfn = access_jazelle,
7139      .type = ARM_CP_CONST, .resetvalue = 0 },
7140    { .name = "JOSCR",
7141      .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0,
7142      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7143    { .name = "JMCR",
7144      .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0,
7145      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
7146    REGINFO_SENTINEL
7147};
7148
7149static const ARMCPRegInfo vhe_reginfo[] = {
7150    { .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64,
7151      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1,
7152      .access = PL2_RW,
7153      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) },
7154    { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64,
7155      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1,
7156      .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write,
7157      .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) },
7158#ifndef CONFIG_USER_ONLY
7159    { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64,
7160      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2,
7161      .fieldoffset =
7162        offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval),
7163      .type = ARM_CP_IO, .access = PL2_RW,
7164      .writefn = gt_hv_cval_write, .raw_writefn = raw_write },
7165    { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
7166      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0,
7167      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
7168      .resetfn = gt_hv_timer_reset,
7169      .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write },
7170    { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH,
7171      .type = ARM_CP_IO,
7172      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1,
7173      .access = PL2_RW,
7174      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl),
7175      .writefn = gt_hv_ctl_write, .raw_writefn = raw_write },
7176    { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64,
7177      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1,
7178      .type = ARM_CP_IO | ARM_CP_ALIAS,
7179      .access = PL2_RW, .accessfn = e2h_access,
7180      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
7181      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write },
7182    { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64,
7183      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1,
7184      .type = ARM_CP_IO | ARM_CP_ALIAS,
7185      .access = PL2_RW, .accessfn = e2h_access,
7186      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
7187      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write },
7188    { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7189      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0,
7190      .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7191      .access = PL2_RW, .accessfn = e2h_access,
7192      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write },
7193    { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64,
7194      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0,
7195      .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS,
7196      .access = PL2_RW, .accessfn = e2h_access,
7197      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write },
7198    { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7199      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2,
7200      .type = ARM_CP_IO | ARM_CP_ALIAS,
7201      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
7202      .access = PL2_RW, .accessfn = e2h_access,
7203      .writefn = gt_phys_cval_write, .raw_writefn = raw_write },
7204    { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64,
7205      .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2,
7206      .type = ARM_CP_IO | ARM_CP_ALIAS,
7207      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
7208      .access = PL2_RW, .accessfn = e2h_access,
7209      .writefn = gt_virt_cval_write, .raw_writefn = raw_write },
7210#endif
7211    REGINFO_SENTINEL
7212};
7213
7214#ifndef CONFIG_USER_ONLY
7215static const ARMCPRegInfo ats1e1_reginfo[] = {
7216    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
7217      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7218      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7219      .writefn = ats_write64 },
7220    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
7221      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7222      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7223      .writefn = ats_write64 },
7224    REGINFO_SENTINEL
7225};
7226
7227static const ARMCPRegInfo ats1cp_reginfo[] = {
7228    { .name = "ATS1CPRP",
7229      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
7230      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7231      .writefn = ats_write },
7232    { .name = "ATS1CPWP",
7233      .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
7234      .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
7235      .writefn = ats_write },
7236    REGINFO_SENTINEL
7237};
7238#endif
7239
7240/*
7241 * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
7242 * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
7243 * is non-zero, which is never for ARMv7, optionally in ARMv8
7244 * and mandatorily for ARMv8.2 and up.
7245 * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's
7246 * implementation is RAZ/WI we can ignore this detail, as we
7247 * do for ACTLR.
7248 */
7249static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
7250    { .name = "ACTLR2", .state = ARM_CP_STATE_AA32,
7251      .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3,
7252      .access = PL1_RW, .accessfn = access_tacr,
7253      .type = ARM_CP_CONST, .resetvalue = 0 },
7254    { .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
7255      .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
7256      .access = PL2_RW, .type = ARM_CP_CONST,
7257      .resetvalue = 0 },
7258    REGINFO_SENTINEL
7259};
7260
7261void register_cp_regs_for_features(ARMCPU *cpu)
7262{
7263    /* Register all the coprocessor registers based on feature bits */
7264    CPUARMState *env = &cpu->env;
7265    if (arm_feature(env, ARM_FEATURE_M)) {
7266        /* M profile has no coprocessor registers */
7267        return;
7268    }
7269
7270    define_arm_cp_regs(cpu, cp_reginfo);
7271    if (!arm_feature(env, ARM_FEATURE_V8)) {
7272        /* Must go early as it is full of wildcards that may be
7273         * overridden by later definitions.
7274         */
7275        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
7276    }
7277
7278    if (arm_feature(env, ARM_FEATURE_V6)) {
7279        /* The ID registers all have impdef reset values */
7280        ARMCPRegInfo v6_idregs[] = {
7281            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
7282              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
7283              .access = PL1_R, .type = ARM_CP_CONST,
7284              .accessfn = access_aa32_tid3,
7285              .resetvalue = cpu->isar.id_pfr0 },
7286            /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
7287             * the value of the GIC field until after we define these regs.
7288             */
7289            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
7290              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
7291              .access = PL1_R, .type = ARM_CP_NO_RAW,
7292              .accessfn = access_aa32_tid3,
7293              .readfn = id_pfr1_read,
7294              .writefn = arm_cp_write_ignore },
7295            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
7296              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
7297              .access = PL1_R, .type = ARM_CP_CONST,
7298              .accessfn = access_aa32_tid3,
7299              .resetvalue = cpu->isar.id_dfr0 },
7300            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
7301              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
7302              .access = PL1_R, .type = ARM_CP_CONST,
7303              .accessfn = access_aa32_tid3,
7304              .resetvalue = cpu->id_afr0 },
7305            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
7306              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
7307              .access = PL1_R, .type = ARM_CP_CONST,
7308              .accessfn = access_aa32_tid3,
7309              .resetvalue = cpu->isar.id_mmfr0 },
7310            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
7311              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
7312              .access = PL1_R, .type = ARM_CP_CONST,
7313              .accessfn = access_aa32_tid3,
7314              .resetvalue = cpu->isar.id_mmfr1 },
7315            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
7316              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
7317              .access = PL1_R, .type = ARM_CP_CONST,
7318              .accessfn = access_aa32_tid3,
7319              .resetvalue = cpu->isar.id_mmfr2 },
7320            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
7321              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
7322              .access = PL1_R, .type = ARM_CP_CONST,
7323              .accessfn = access_aa32_tid3,
7324              .resetvalue = cpu->isar.id_mmfr3 },
7325            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
7326              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
7327              .access = PL1_R, .type = ARM_CP_CONST,
7328              .accessfn = access_aa32_tid3,
7329              .resetvalue = cpu->isar.id_isar0 },
7330            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
7331              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
7332              .access = PL1_R, .type = ARM_CP_CONST,
7333              .accessfn = access_aa32_tid3,
7334              .resetvalue = cpu->isar.id_isar1 },
7335            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
7336              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
7337              .access = PL1_R, .type = ARM_CP_CONST,
7338              .accessfn = access_aa32_tid3,
7339              .resetvalue = cpu->isar.id_isar2 },
7340            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
7341              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
7342              .access = PL1_R, .type = ARM_CP_CONST,
7343              .accessfn = access_aa32_tid3,
7344              .resetvalue = cpu->isar.id_isar3 },
7345            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
7346              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
7347              .access = PL1_R, .type = ARM_CP_CONST,
7348              .accessfn = access_aa32_tid3,
7349              .resetvalue = cpu->isar.id_isar4 },
7350            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
7351              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
7352              .access = PL1_R, .type = ARM_CP_CONST,
7353              .accessfn = access_aa32_tid3,
7354              .resetvalue = cpu->isar.id_isar5 },
7355            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
7356              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
7357              .access = PL1_R, .type = ARM_CP_CONST,
7358              .accessfn = access_aa32_tid3,
7359              .resetvalue = cpu->isar.id_mmfr4 },
7360            { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
7361              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
7362              .access = PL1_R, .type = ARM_CP_CONST,
7363              .accessfn = access_aa32_tid3,
7364              .resetvalue = cpu->isar.id_isar6 },
7365            REGINFO_SENTINEL
7366        };
7367        define_arm_cp_regs(cpu, v6_idregs);
7368        define_arm_cp_regs(cpu, v6_cp_reginfo);
7369    } else {
7370        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
7371    }
7372    if (arm_feature(env, ARM_FEATURE_V6K)) {
7373        define_arm_cp_regs(cpu, v6k_cp_reginfo);
7374    }
7375    if (arm_feature(env, ARM_FEATURE_V7MP) &&
7376        !arm_feature(env, ARM_FEATURE_PMSA)) {
7377        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
7378    }
7379    if (arm_feature(env, ARM_FEATURE_V7VE)) {
7380        define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
7381    }
7382    if (arm_feature(env, ARM_FEATURE_V7)) {
7383        ARMCPRegInfo clidr = {
7384            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
7385            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
7386            .access = PL1_R, .type = ARM_CP_CONST,
7387            .accessfn = access_aa64_tid2,
7388            .resetvalue = cpu->clidr
7389        };
7390        define_one_arm_cp_reg(cpu, &clidr);
7391        define_arm_cp_regs(cpu, v7_cp_reginfo);
7392        define_debug_regs(cpu);
7393        define_pmu_regs(cpu);
7394    } else {
7395        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
7396    }
7397    if (arm_feature(env, ARM_FEATURE_V8)) {
7398        /* AArch64 ID registers, which all have impdef reset values.
7399         * Note that within the ID register ranges the unused slots
7400         * must all RAZ, not UNDEF; future architecture versions may
7401         * define new registers here.
7402         */
7403        ARMCPRegInfo v8_idregs[] = {
7404            /*
7405             * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
7406             * emulation because we don't know the right value for the
7407             * GIC field until after we define these regs.
7408             */
7409            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
7410              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
7411              .access = PL1_R,
7412#ifdef CONFIG_USER_ONLY
7413              .type = ARM_CP_CONST,
7414              .resetvalue = cpu->isar.id_aa64pfr0
7415#else
7416              .type = ARM_CP_NO_RAW,
7417              .accessfn = access_aa64_tid3,
7418              .readfn = id_aa64pfr0_read,
7419              .writefn = arm_cp_write_ignore
7420#endif
7421            },
7422            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
7423              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
7424              .access = PL1_R, .type = ARM_CP_CONST,
7425              .accessfn = access_aa64_tid3,
7426              .resetvalue = cpu->isar.id_aa64pfr1},
7427            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7428              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
7429              .access = PL1_R, .type = ARM_CP_CONST,
7430              .accessfn = access_aa64_tid3,
7431              .resetvalue = 0 },
7432            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7433              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
7434              .access = PL1_R, .type = ARM_CP_CONST,
7435              .accessfn = access_aa64_tid3,
7436              .resetvalue = 0 },
7437            { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
7438              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
7439              .access = PL1_R, .type = ARM_CP_CONST,
7440              .accessfn = access_aa64_tid3,
7441              /* At present, only SVEver == 0 is defined anyway.  */
7442              .resetvalue = 0 },
7443            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7444              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
7445              .access = PL1_R, .type = ARM_CP_CONST,
7446              .accessfn = access_aa64_tid3,
7447              .resetvalue = 0 },
7448            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7449              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
7450              .access = PL1_R, .type = ARM_CP_CONST,
7451              .accessfn = access_aa64_tid3,
7452              .resetvalue = 0 },
7453            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7454              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
7455              .access = PL1_R, .type = ARM_CP_CONST,
7456              .accessfn = access_aa64_tid3,
7457              .resetvalue = 0 },
7458            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
7459              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
7460              .access = PL1_R, .type = ARM_CP_CONST,
7461              .accessfn = access_aa64_tid3,
7462              .resetvalue = cpu->isar.id_aa64dfr0 },
7463            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
7464              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
7465              .access = PL1_R, .type = ARM_CP_CONST,
7466              .accessfn = access_aa64_tid3,
7467              .resetvalue = cpu->isar.id_aa64dfr1 },
7468            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7469              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
7470              .access = PL1_R, .type = ARM_CP_CONST,
7471              .accessfn = access_aa64_tid3,
7472              .resetvalue = 0 },
7473            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7474              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
7475              .access = PL1_R, .type = ARM_CP_CONST,
7476              .accessfn = access_aa64_tid3,
7477              .resetvalue = 0 },
7478            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
7479              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
7480              .access = PL1_R, .type = ARM_CP_CONST,
7481              .accessfn = access_aa64_tid3,
7482              .resetvalue = cpu->id_aa64afr0 },
7483            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
7484              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
7485              .access = PL1_R, .type = ARM_CP_CONST,
7486              .accessfn = access_aa64_tid3,
7487              .resetvalue = cpu->id_aa64afr1 },
7488            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7489              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
7490              .access = PL1_R, .type = ARM_CP_CONST,
7491              .accessfn = access_aa64_tid3,
7492              .resetvalue = 0 },
7493            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7494              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
7495              .access = PL1_R, .type = ARM_CP_CONST,
7496              .accessfn = access_aa64_tid3,
7497              .resetvalue = 0 },
7498            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
7499              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
7500              .access = PL1_R, .type = ARM_CP_CONST,
7501              .accessfn = access_aa64_tid3,
7502              .resetvalue = cpu->isar.id_aa64isar0 },
7503            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
7504              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
7505              .access = PL1_R, .type = ARM_CP_CONST,
7506              .accessfn = access_aa64_tid3,
7507              .resetvalue = cpu->isar.id_aa64isar1 },
7508            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7509              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
7510              .access = PL1_R, .type = ARM_CP_CONST,
7511              .accessfn = access_aa64_tid3,
7512              .resetvalue = 0 },
7513            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7514              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
7515              .access = PL1_R, .type = ARM_CP_CONST,
7516              .accessfn = access_aa64_tid3,
7517              .resetvalue = 0 },
7518            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7519              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
7520              .access = PL1_R, .type = ARM_CP_CONST,
7521              .accessfn = access_aa64_tid3,
7522              .resetvalue = 0 },
7523            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7524              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
7525              .access = PL1_R, .type = ARM_CP_CONST,
7526              .accessfn = access_aa64_tid3,
7527              .resetvalue = 0 },
7528            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7529              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
7530              .access = PL1_R, .type = ARM_CP_CONST,
7531              .accessfn = access_aa64_tid3,
7532              .resetvalue = 0 },
7533            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7534              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
7535              .access = PL1_R, .type = ARM_CP_CONST,
7536              .accessfn = access_aa64_tid3,
7537              .resetvalue = 0 },
7538            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
7539              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
7540              .access = PL1_R, .type = ARM_CP_CONST,
7541              .accessfn = access_aa64_tid3,
7542              .resetvalue = cpu->isar.id_aa64mmfr0 },
7543            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
7544              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
7545              .access = PL1_R, .type = ARM_CP_CONST,
7546              .accessfn = access_aa64_tid3,
7547              .resetvalue = cpu->isar.id_aa64mmfr1 },
7548            { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
7549              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
7550              .access = PL1_R, .type = ARM_CP_CONST,
7551              .accessfn = access_aa64_tid3,
7552              .resetvalue = cpu->isar.id_aa64mmfr2 },
7553            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7554              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
7555              .access = PL1_R, .type = ARM_CP_CONST,
7556              .accessfn = access_aa64_tid3,
7557              .resetvalue = 0 },
7558            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7559              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
7560              .access = PL1_R, .type = ARM_CP_CONST,
7561              .accessfn = access_aa64_tid3,
7562              .resetvalue = 0 },
7563            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7564              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
7565              .access = PL1_R, .type = ARM_CP_CONST,
7566              .accessfn = access_aa64_tid3,
7567              .resetvalue = 0 },
7568            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7569              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
7570              .access = PL1_R, .type = ARM_CP_CONST,
7571              .accessfn = access_aa64_tid3,
7572              .resetvalue = 0 },
7573            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7574              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
7575              .access = PL1_R, .type = ARM_CP_CONST,
7576              .accessfn = access_aa64_tid3,
7577              .resetvalue = 0 },
7578            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
7579              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
7580              .access = PL1_R, .type = ARM_CP_CONST,
7581              .accessfn = access_aa64_tid3,
7582              .resetvalue = cpu->isar.mvfr0 },
7583            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
7584              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
7585              .access = PL1_R, .type = ARM_CP_CONST,
7586              .accessfn = access_aa64_tid3,
7587              .resetvalue = cpu->isar.mvfr1 },
7588            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
7589              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
7590              .access = PL1_R, .type = ARM_CP_CONST,
7591              .accessfn = access_aa64_tid3,
7592              .resetvalue = cpu->isar.mvfr2 },
7593            { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7594              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
7595              .access = PL1_R, .type = ARM_CP_CONST,
7596              .accessfn = access_aa64_tid3,
7597              .resetvalue = 0 },
7598            { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7599              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
7600              .access = PL1_R, .type = ARM_CP_CONST,
7601              .accessfn = access_aa64_tid3,
7602              .resetvalue = 0 },
7603            { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7604              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
7605              .access = PL1_R, .type = ARM_CP_CONST,
7606              .accessfn = access_aa64_tid3,
7607              .resetvalue = 0 },
7608            { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7609              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
7610              .access = PL1_R, .type = ARM_CP_CONST,
7611              .accessfn = access_aa64_tid3,
7612              .resetvalue = 0 },
7613            { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
7614              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
7615              .access = PL1_R, .type = ARM_CP_CONST,
7616              .accessfn = access_aa64_tid3,
7617              .resetvalue = 0 },
7618            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
7619              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
7620              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7621              .resetvalue = extract64(cpu->pmceid0, 0, 32) },
7622            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
7623              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
7624              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7625              .resetvalue = cpu->pmceid0 },
7626            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
7627              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
7628              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7629              .resetvalue = extract64(cpu->pmceid1, 0, 32) },
7630            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
7631              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
7632              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
7633              .resetvalue = cpu->pmceid1 },
7634            REGINFO_SENTINEL
7635        };
7636#ifdef CONFIG_USER_ONLY
7637        ARMCPRegUserSpaceInfo v8_user_idregs[] = {
7638            { .name = "ID_AA64PFR0_EL1",
7639              .exported_bits = 0x000f000f00ff0000,
7640              .fixed_bits    = 0x0000000000000011 },
7641            { .name = "ID_AA64PFR1_EL1",
7642              .exported_bits = 0x00000000000000f0 },
7643            { .name = "ID_AA64PFR*_EL1_RESERVED",
7644              .is_glob = true                     },
7645            { .name = "ID_AA64ZFR0_EL1"           },
7646            { .name = "ID_AA64MMFR0_EL1",
7647              .fixed_bits    = 0x00000000ff000000 },
7648            { .name = "ID_AA64MMFR1_EL1"          },
7649            { .name = "ID_AA64MMFR*_EL1_RESERVED",
7650              .is_glob = true                     },
7651            { .name = "ID_AA64DFR0_EL1",
7652              .fixed_bits    = 0x0000000000000006 },
7653            { .name = "ID_AA64DFR1_EL1"           },
7654            { .name = "ID_AA64DFR*_EL1_RESERVED",
7655              .is_glob = true                     },
7656            { .name = "ID_AA64AFR*",
7657              .is_glob = true                     },
7658            { .name = "ID_AA64ISAR0_EL1",
7659              .exported_bits = 0x00fffffff0fffff0 },
7660            { .name = "ID_AA64ISAR1_EL1",
7661              .exported_bits = 0x000000f0ffffffff },
7662            { .name = "ID_AA64ISAR*_EL1_RESERVED",
7663              .is_glob = true                     },
7664            REGUSERINFO_SENTINEL
7665        };
7666        modify_arm_cp_regs(v8_idregs, v8_user_idregs);
7667#endif
7668        /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
7669        if (!arm_feature(env, ARM_FEATURE_EL3) &&
7670            !arm_feature(env, ARM_FEATURE_EL2)) {
7671            ARMCPRegInfo rvbar = {
7672                .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
7673                .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
7674                .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
7675            };
7676            define_one_arm_cp_reg(cpu, &rvbar);
7677        }
7678        define_arm_cp_regs(cpu, v8_idregs);
7679        define_arm_cp_regs(cpu, v8_cp_reginfo);
7680    }
7681    if (arm_feature(env, ARM_FEATURE_EL2)) {
7682        uint64_t vmpidr_def = mpidr_read_val(env);
7683        ARMCPRegInfo vpidr_regs[] = {
7684            { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
7685              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7686              .access = PL2_RW, .accessfn = access_el3_aa32ns,
7687              .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
7688              .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
7689            { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
7690              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7691              .access = PL2_RW, .resetvalue = cpu->midr,
7692              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7693            { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
7694              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7695              .access = PL2_RW, .accessfn = access_el3_aa32ns,
7696              .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
7697              .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
7698            { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
7699              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7700              .access = PL2_RW,
7701              .resetvalue = vmpidr_def,
7702              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
7703            REGINFO_SENTINEL
7704        };
7705        define_arm_cp_regs(cpu, vpidr_regs);
7706        define_arm_cp_regs(cpu, el2_cp_reginfo);
7707        if (arm_feature(env, ARM_FEATURE_V8)) {
7708            define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
7709        }
7710        /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
7711        if (!arm_feature(env, ARM_FEATURE_EL3)) {
7712            ARMCPRegInfo rvbar = {
7713                .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
7714                .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
7715                .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
7716            };
7717            define_one_arm_cp_reg(cpu, &rvbar);
7718        }
7719    } else {
7720        /* If EL2 is missing but higher ELs are enabled, we need to
7721         * register the no_el2 reginfos.
7722         */
7723        if (arm_feature(env, ARM_FEATURE_EL3)) {
7724            /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
7725             * of MIDR_EL1 and MPIDR_EL1.
7726             */
7727            ARMCPRegInfo vpidr_regs[] = {
7728                { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7729                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
7730                  .access = PL2_RW, .accessfn = access_el3_aa32ns,
7731                  .type = ARM_CP_CONST, .resetvalue = cpu->midr,
7732                  .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
7733                { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
7734                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
7735                  .access = PL2_RW, .accessfn = access_el3_aa32ns,
7736                  .type = ARM_CP_NO_RAW,
7737                  .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
7738                REGINFO_SENTINEL
7739            };
7740            define_arm_cp_regs(cpu, vpidr_regs);
7741            define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
7742            if (arm_feature(env, ARM_FEATURE_V8)) {
7743                define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
7744            }
7745        }
7746    }
7747    if (arm_feature(env, ARM_FEATURE_EL3)) {
7748        define_arm_cp_regs(cpu, el3_cp_reginfo);
7749        ARMCPRegInfo el3_regs[] = {
7750            { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
7751              .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
7752              .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
7753            { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
7754              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
7755              .access = PL3_RW,
7756              .raw_writefn = raw_write, .writefn = sctlr_write,
7757              .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
7758              .resetvalue = cpu->reset_sctlr },
7759            REGINFO_SENTINEL
7760        };
7761
7762        define_arm_cp_regs(cpu, el3_regs);
7763    }
7764    /* The behaviour of NSACR is sufficiently various that we don't
7765     * try to describe it in a single reginfo:
7766     *  if EL3 is 64 bit, then trap to EL3 from S EL1,
7767     *     reads as constant 0xc00 from NS EL1 and NS EL2
7768     *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
7769     *  if v7 without EL3, register doesn't exist
7770     *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
7771     */
7772    if (arm_feature(env, ARM_FEATURE_EL3)) {
7773        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
7774            ARMCPRegInfo nsacr = {
7775                .name = "NSACR", .type = ARM_CP_CONST,
7776                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7777                .access = PL1_RW, .accessfn = nsacr_access,
7778                .resetvalue = 0xc00
7779            };
7780            define_one_arm_cp_reg(cpu, &nsacr);
7781        } else {
7782            ARMCPRegInfo nsacr = {
7783                .name = "NSACR",
7784                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7785                .access = PL3_RW | PL1_R,
7786                .resetvalue = 0,
7787                .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
7788            };
7789            define_one_arm_cp_reg(cpu, &nsacr);
7790        }
7791    } else {
7792        if (arm_feature(env, ARM_FEATURE_V8)) {
7793            ARMCPRegInfo nsacr = {
7794                .name = "NSACR", .type = ARM_CP_CONST,
7795                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
7796                .access = PL1_R,
7797                .resetvalue = 0xc00
7798            };
7799            define_one_arm_cp_reg(cpu, &nsacr);
7800        }
7801    }
7802
7803    if (arm_feature(env, ARM_FEATURE_PMSA)) {
7804        if (arm_feature(env, ARM_FEATURE_V6)) {
7805            /* PMSAv6 not implemented */
7806            assert(arm_feature(env, ARM_FEATURE_V7));
7807            define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7808            define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
7809        } else {
7810            define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
7811        }
7812    } else {
7813        define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
7814        define_arm_cp_regs(cpu, vmsa_cp_reginfo);
7815        /* TTCBR2 is introduced with ARMv8.2-AA32HPD.  */
7816        if (cpu_isar_feature(aa32_hpd, cpu)) {
7817            define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
7818        }
7819    }
7820    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
7821        define_arm_cp_regs(cpu, t2ee_cp_reginfo);
7822    }
7823    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
7824        define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
7825    }
7826    if (arm_feature(env, ARM_FEATURE_VAPA)) {
7827        define_arm_cp_regs(cpu, vapa_cp_reginfo);
7828    }
7829    if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
7830        define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
7831    }
7832    if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
7833        define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
7834    }
7835    if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
7836        define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
7837    }
7838    if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
7839        define_arm_cp_regs(cpu, omap_cp_reginfo);
7840    }
7841    if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
7842        define_arm_cp_regs(cpu, strongarm_cp_reginfo);
7843    }
7844    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
7845        define_arm_cp_regs(cpu, xscale_cp_reginfo);
7846    }
7847    if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
7848        define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
7849    }
7850    if (arm_feature(env, ARM_FEATURE_LPAE)) {
7851        define_arm_cp_regs(cpu, lpae_cp_reginfo);
7852    }
7853    if (cpu_isar_feature(aa32_jazelle, cpu)) {
7854        define_arm_cp_regs(cpu, jazelle_regs);
7855    }
7856    /* Slightly awkwardly, the OMAP and StrongARM cores need all of
7857     * cp15 crn=0 to be writes-ignored, whereas for other cores they should
7858     * be read-only (ie write causes UNDEF exception).
7859     */
7860    {
7861        ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
7862            /* Pre-v8 MIDR space.
7863             * Note that the MIDR isn't a simple constant register because
7864             * of the TI925 behaviour where writes to another register can
7865             * cause the MIDR value to change.
7866             *
7867             * Unimplemented registers in the c15 0 0 0 space default to
7868             * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
7869             * and friends override accordingly.
7870             */
7871            { .name = "MIDR",
7872              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
7873              .access = PL1_R, .resetvalue = cpu->midr,
7874              .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
7875              .readfn = midr_read,
7876              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7877              .type = ARM_CP_OVERRIDE },
7878            /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
7879            { .name = "DUMMY",
7880              .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
7881              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7882            { .name = "DUMMY",
7883              .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
7884              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7885            { .name = "DUMMY",
7886              .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
7887              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7888            { .name = "DUMMY",
7889              .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
7890              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7891            { .name = "DUMMY",
7892              .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
7893              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
7894            REGINFO_SENTINEL
7895        };
7896        ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
7897            { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
7898              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
7899              .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
7900              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
7901              .readfn = midr_read },
7902            /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
7903            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7904              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7905              .access = PL1_R, .resetvalue = cpu->midr },
7906            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
7907              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
7908              .access = PL1_R, .resetvalue = cpu->midr },
7909            { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
7910              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
7911              .access = PL1_R,
7912              .accessfn = access_aa64_tid1,
7913              .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
7914            REGINFO_SENTINEL
7915        };
7916        ARMCPRegInfo id_cp_reginfo[] = {
7917            /* These are common to v8 and pre-v8 */
7918            { .name = "CTR",
7919              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
7920              .access = PL1_R, .accessfn = ctr_el0_access,
7921              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7922            { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
7923              .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
7924              .access = PL0_R, .accessfn = ctr_el0_access,
7925              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
7926            /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
7927            { .name = "TCMTR",
7928              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
7929              .access = PL1_R,
7930              .accessfn = access_aa32_tid1,
7931              .type = ARM_CP_CONST, .resetvalue = 0 },
7932            REGINFO_SENTINEL
7933        };
7934        /* TLBTR is specific to VMSA */
7935        ARMCPRegInfo id_tlbtr_reginfo = {
7936              .name = "TLBTR",
7937              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
7938              .access = PL1_R,
7939              .accessfn = access_aa32_tid1,
7940              .type = ARM_CP_CONST, .resetvalue = 0,
7941        };
7942        /* MPUIR is specific to PMSA V6+ */
7943        ARMCPRegInfo id_mpuir_reginfo = {
7944              .name = "MPUIR",
7945              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
7946              .access = PL1_R, .type = ARM_CP_CONST,
7947              .resetvalue = cpu->pmsav7_dregion << 8
7948        };
7949        ARMCPRegInfo crn0_wi_reginfo = {
7950            .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
7951            .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
7952            .type = ARM_CP_NOP | ARM_CP_OVERRIDE
7953        };
7954#ifdef CONFIG_USER_ONLY
7955        ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
7956            { .name = "MIDR_EL1",
7957              .exported_bits = 0x00000000ffffffff },
7958            { .name = "REVIDR_EL1"                },
7959            REGUSERINFO_SENTINEL
7960        };
7961        modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
7962#endif
7963        if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
7964            arm_feature(env, ARM_FEATURE_STRONGARM)) {
7965            ARMCPRegInfo *r;
7966            /* Register the blanket "writes ignored" value first to cover the
7967             * whole space. Then update the specific ID registers to allow write
7968             * access, so that they ignore writes rather than causing them to
7969             * UNDEF.
7970             */
7971            define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
7972            for (r = id_pre_v8_midr_cp_reginfo;
7973                 r->type != ARM_CP_SENTINEL; r++) {
7974                r->access = PL1_RW;
7975            }
7976            for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
7977                r->access = PL1_RW;
7978            }
7979            id_mpuir_reginfo.access = PL1_RW;
7980            id_tlbtr_reginfo.access = PL1_RW;
7981        }
7982        if (arm_feature(env, ARM_FEATURE_V8)) {
7983            define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
7984        } else {
7985            define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
7986        }
7987        define_arm_cp_regs(cpu, id_cp_reginfo);
7988        if (!arm_feature(env, ARM_FEATURE_PMSA)) {
7989            define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
7990        } else if (arm_feature(env, ARM_FEATURE_V7)) {
7991            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
7992        }
7993    }
7994
7995    if (arm_feature(env, ARM_FEATURE_MPIDR)) {
7996        ARMCPRegInfo mpidr_cp_reginfo[] = {
7997            { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
7998              .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
7999              .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
8000            REGINFO_SENTINEL
8001        };
8002#ifdef CONFIG_USER_ONLY
8003        ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
8004            { .name = "MPIDR_EL1",
8005              .fixed_bits = 0x0000000080000000 },
8006            REGUSERINFO_SENTINEL
8007        };
8008        modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
8009#endif
8010        define_arm_cp_regs(cpu, mpidr_cp_reginfo);
8011    }
8012
8013    if (arm_feature(env, ARM_FEATURE_AUXCR)) {
8014        ARMCPRegInfo auxcr_reginfo[] = {
8015            { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
8016              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
8017              .access = PL1_RW, .accessfn = access_tacr,
8018              .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr },
8019            { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
8020              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
8021              .access = PL2_RW, .type = ARM_CP_CONST,
8022              .resetvalue = 0 },
8023            { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
8024              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
8025              .access = PL3_RW, .type = ARM_CP_CONST,
8026              .resetvalue = 0 },
8027            REGINFO_SENTINEL
8028        };
8029        define_arm_cp_regs(cpu, auxcr_reginfo);
8030        if (cpu_isar_feature(aa32_ac2, cpu)) {
8031            define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo);
8032        }
8033    }
8034
8035    if (arm_feature(env, ARM_FEATURE_CBAR)) {
8036        /*
8037         * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
8038         * There are two flavours:
8039         *  (1) older 32-bit only cores have a simple 32-bit CBAR
8040         *  (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
8041         *      32-bit register visible to AArch32 at a different encoding
8042         *      to the "flavour 1" register and with the bits rearranged to
8043         *      be able to squash a 64-bit address into the 32-bit view.
8044         * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
8045         * in future if we support AArch32-only configs of some of the
8046         * AArch64 cores we might need to add a specific feature flag
8047         * to indicate cores with "flavour 2" CBAR.
8048         */
8049        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8050            /* 32 bit view is [31:18] 0...0 [43:32]. */
8051            uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
8052                | extract64(cpu->reset_cbar, 32, 12);
8053            ARMCPRegInfo cbar_reginfo[] = {
8054                { .name = "CBAR",
8055                  .type = ARM_CP_CONST,
8056                  .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
8057                  .access = PL1_R, .resetvalue = cbar32 },
8058                { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
8059                  .type = ARM_CP_CONST,
8060                  .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
8061                  .access = PL1_R, .resetvalue = cpu->reset_cbar },
8062                REGINFO_SENTINEL
8063            };
8064            /* We don't implement a r/w 64 bit CBAR currently */
8065            assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
8066            define_arm_cp_regs(cpu, cbar_reginfo);
8067        } else {
8068            ARMCPRegInfo cbar = {
8069                .name = "CBAR",
8070                .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
8071                .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
8072                .fieldoffset = offsetof(CPUARMState,
8073                                        cp15.c15_config_base_address)
8074            };
8075            if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
8076                cbar.access = PL1_R;
8077                cbar.fieldoffset = 0;
8078                cbar.type = ARM_CP_CONST;
8079            }
8080            define_one_arm_cp_reg(cpu, &cbar);
8081        }
8082    }
8083
8084    if (arm_feature(env, ARM_FEATURE_VBAR)) {
8085        ARMCPRegInfo vbar_cp_reginfo[] = {
8086            { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
8087              .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
8088              .access = PL1_RW, .writefn = vbar_write,
8089              .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
8090                                     offsetof(CPUARMState, cp15.vbar_ns) },
8091              .resetvalue = 0 },
8092            REGINFO_SENTINEL
8093        };
8094        define_arm_cp_regs(cpu, vbar_cp_reginfo);
8095    }
8096
8097    /* Generic registers whose values depend on the implementation */
8098    {
8099        ARMCPRegInfo sctlr = {
8100            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
8101            .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
8102            .access = PL1_RW, .accessfn = access_tvm_trvm,
8103            .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
8104                                   offsetof(CPUARMState, cp15.sctlr_ns) },
8105            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
8106            .raw_writefn = raw_write,
8107        };
8108        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
8109            /* Normally we would always end the TB on an SCTLR write, but Linux
8110             * arch/arm/mach-pxa/sleep.S expects two instructions following
8111             * an MMU enable to execute from cache.  Imitate this behaviour.
8112             */
8113            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
8114        }
8115        define_one_arm_cp_reg(cpu, &sctlr);
8116    }
8117
8118    if (cpu_isar_feature(aa64_lor, cpu)) {
8119        define_arm_cp_regs(cpu, lor_reginfo);
8120    }
8121    if (cpu_isar_feature(aa64_pan, cpu)) {
8122        define_one_arm_cp_reg(cpu, &pan_reginfo);
8123    }
8124#ifndef CONFIG_USER_ONLY
8125    if (cpu_isar_feature(aa64_ats1e1, cpu)) {
8126        define_arm_cp_regs(cpu, ats1e1_reginfo);
8127    }
8128    if (cpu_isar_feature(aa32_ats1e1, cpu)) {
8129        define_arm_cp_regs(cpu, ats1cp_reginfo);
8130    }
8131#endif
8132    if (cpu_isar_feature(aa64_uao, cpu)) {
8133        define_one_arm_cp_reg(cpu, &uao_reginfo);
8134    }
8135
8136    if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8137        define_arm_cp_regs(cpu, vhe_reginfo);
8138    }
8139
8140    if (cpu_isar_feature(aa64_sve, cpu)) {
8141        define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
8142        if (arm_feature(env, ARM_FEATURE_EL2)) {
8143            define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
8144        } else {
8145            define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
8146        }
8147        if (arm_feature(env, ARM_FEATURE_EL3)) {
8148            define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
8149        }
8150    }
8151
8152#ifdef TARGET_AARCH64
8153    if (cpu_isar_feature(aa64_pauth, cpu)) {
8154        define_arm_cp_regs(cpu, pauth_reginfo);
8155    }
8156    if (cpu_isar_feature(aa64_rndr, cpu)) {
8157        define_arm_cp_regs(cpu, rndr_reginfo);
8158    }
8159#ifndef CONFIG_USER_ONLY
8160    /* Data Cache clean instructions up to PoP */
8161    if (cpu_isar_feature(aa64_dcpop, cpu)) {
8162        define_one_arm_cp_reg(cpu, dcpop_reg);
8163
8164        if (cpu_isar_feature(aa64_dcpodp, cpu)) {
8165            define_one_arm_cp_reg(cpu, dcpodp_reg);
8166        }
8167    }
8168#endif /*CONFIG_USER_ONLY*/
8169
8170    /*
8171     * If full MTE is enabled, add all of the system registers.
8172     * If only "instructions available at EL0" are enabled,
8173     * then define only a RAZ/WI version of PSTATE.TCO.
8174     */
8175    if (cpu_isar_feature(aa64_mte, cpu)) {
8176        define_arm_cp_regs(cpu, mte_reginfo);
8177        define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8178    } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
8179        define_arm_cp_regs(cpu, mte_tco_ro_reginfo);
8180        define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
8181    }
8182#endif
8183
8184    if (cpu_isar_feature(any_predinv, cpu)) {
8185        define_arm_cp_regs(cpu, predinv_reginfo);
8186    }
8187
8188    if (cpu_isar_feature(any_ccidx, cpu)) {
8189        define_arm_cp_regs(cpu, ccsidr2_reginfo);
8190    }
8191
8192#ifndef CONFIG_USER_ONLY
8193    /*
8194     * Register redirections and aliases must be done last,
8195     * after the registers from the other extensions have been defined.
8196     */
8197    if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
8198        define_arm_vh_e2h_redirects_aliases(cpu);
8199    }
8200#endif
8201}
8202
8203void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
8204{
8205    CPUState *cs = CPU(cpu);
8206    CPUARMState *env = &cpu->env;
8207
8208    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
8209        /*
8210         * The lower part of each SVE register aliases to the FPU
8211         * registers so we don't need to include both.
8212         */
8213#ifdef TARGET_AARCH64
8214        if (isar_feature_aa64_sve(&cpu->isar)) {
8215            gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg,
8216                                     arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs),
8217                                     "sve-registers.xml", 0);
8218        } else
8219#endif
8220        {
8221            gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
8222                                     aarch64_fpu_gdb_set_reg,
8223                                     34, "aarch64-fpu.xml", 0);
8224        }
8225    } else if (arm_feature(env, ARM_FEATURE_NEON)) {
8226        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
8227                                 51, "arm-neon.xml", 0);
8228    } else if (cpu_isar_feature(aa32_simd_r32, cpu)) {
8229        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
8230                                 35, "arm-vfp3.xml", 0);
8231    } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
8232        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
8233                                 19, "arm-vfp.xml", 0);
8234    }
8235    gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
8236                             arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs),
8237                             "system-registers.xml", 0);
8238
8239}
8240
8241/* Sort alphabetically by type name, except for "any". */
8242static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
8243{
8244    ObjectClass *class_a = (ObjectClass *)a;
8245    ObjectClass *class_b = (ObjectClass *)b;
8246    const char *name_a, *name_b;
8247
8248    name_a = object_class_get_name(class_a);
8249    name_b = object_class_get_name(class_b);
8250    if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
8251        return 1;
8252    } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
8253        return -1;
8254    } else {
8255        return strcmp(name_a, name_b);
8256    }
8257}
8258
8259static void arm_cpu_list_entry(gpointer data, gpointer user_data)
8260{
8261    ObjectClass *oc = data;
8262    const char *typename;
8263    char *name;
8264
8265    typename = object_class_get_name(oc);
8266    name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
8267    qemu_printf("  %s\n", name);
8268    g_free(name);
8269}
8270
8271void arm_cpu_list(void)
8272{
8273    GSList *list;
8274
8275    list = object_class_get_list(TYPE_ARM_CPU, false);
8276    list = g_slist_sort(list, arm_cpu_list_compare);
8277    qemu_printf("Available CPUs:\n");
8278    g_slist_foreach(list, arm_cpu_list_entry, NULL);
8279    g_slist_free(list);
8280}
8281
8282static void arm_cpu_add_definition(gpointer data, gpointer user_data)
8283{
8284    ObjectClass *oc = data;
8285    CpuDefinitionInfoList **cpu_list = user_data;
8286    CpuDefinitionInfoList *entry;
8287    CpuDefinitionInfo *info;
8288    const char *typename;
8289
8290    typename = object_class_get_name(oc);
8291    info = g_malloc0(sizeof(*info));
8292    info->name = g_strndup(typename,
8293                           strlen(typename) - strlen("-" TYPE_ARM_CPU));
8294    info->q_typename = g_strdup(typename);
8295
8296    entry = g_malloc0(sizeof(*entry));
8297    entry->value = info;
8298    entry->next = *cpu_list;
8299    *cpu_list = entry;
8300}
8301
8302CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
8303{
8304    CpuDefinitionInfoList *cpu_list = NULL;
8305    GSList *list;
8306
8307    list = object_class_get_list(TYPE_ARM_CPU, false);
8308    g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
8309    g_slist_free(list);
8310
8311    return cpu_list;
8312}
8313
8314static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
8315                                   void *opaque, int state, int secstate,
8316                                   int crm, int opc1, int opc2,
8317                                   const char *name)
8318{
8319    /* Private utility function for define_one_arm_cp_reg_with_opaque():
8320     * add a single reginfo struct to the hash table.
8321     */
8322    uint32_t *key = g_new(uint32_t, 1);
8323    ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
8324    int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
8325    int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
8326
8327    r2->name = g_strdup(name);
8328    /* Reset the secure state to the specific incoming state.  This is
8329     * necessary as the register may have been defined with both states.
8330     */
8331    r2->secure = secstate;
8332
8333    if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
8334        /* Register is banked (using both entries in array).
8335         * Overwriting fieldoffset as the array is only used to define
8336         * banked registers but later only fieldoffset is used.
8337         */
8338        r2->fieldoffset = r->bank_fieldoffsets[ns];
8339    }
8340
8341    if (state == ARM_CP_STATE_AA32) {
8342        if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
8343            /* If the register is banked then we don't need to migrate or
8344             * reset the 32-bit instance in certain cases:
8345             *
8346             * 1) If the register has both 32-bit and 64-bit instances then we
8347             *    can count on the 64-bit instance taking care of the
8348             *    non-secure bank.
8349             * 2) If ARMv8 is enabled then we can count on a 64-bit version
8350             *    taking care of the secure bank.  This requires that separate
8351             *    32 and 64-bit definitions are provided.
8352             */
8353            if ((r->state == ARM_CP_STATE_BOTH && ns) ||
8354                (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
8355                r2->type |= ARM_CP_ALIAS;
8356            }
8357        } else if ((secstate != r->secure) && !ns) {
8358            /* The register is not banked so we only want to allow migration of
8359             * the non-secure instance.
8360             */
8361            r2->type |= ARM_CP_ALIAS;
8362        }
8363
8364        if (r->state == ARM_CP_STATE_BOTH) {
8365            /* We assume it is a cp15 register if the .cp field is left unset.
8366             */
8367            if (r2->cp == 0) {
8368                r2->cp = 15;
8369            }
8370
8371#ifdef HOST_WORDS_BIGENDIAN
8372            if (r2->fieldoffset) {
8373                r2->fieldoffset += sizeof(uint32_t);
8374            }
8375#endif
8376        }
8377    }
8378    if (state == ARM_CP_STATE_AA64) {
8379        /* To allow abbreviation of ARMCPRegInfo
8380         * definitions, we treat cp == 0 as equivalent to
8381         * the value for "standard guest-visible sysreg".
8382         * STATE_BOTH definitions are also always "standard
8383         * sysreg" in their AArch64 view (the .cp value may
8384         * be non-zero for the benefit of the AArch32 view).
8385         */
8386        if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
8387            r2->cp = CP_REG_ARM64_SYSREG_CP;
8388        }
8389        *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
8390                                  r2->opc0, opc1, opc2);
8391    } else {
8392        *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
8393    }
8394    if (opaque) {
8395        r2->opaque = opaque;
8396    }
8397    /* reginfo passed to helpers is correct for the actual access,
8398     * and is never ARM_CP_STATE_BOTH:
8399     */
8400    r2->state = state;
8401    /* Make sure reginfo passed to helpers for wildcarded regs
8402     * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
8403     */
8404    r2->crm = crm;
8405    r2->opc1 = opc1;
8406    r2->opc2 = opc2;
8407    /* By convention, for wildcarded registers only the first
8408     * entry is used for migration; the others are marked as
8409     * ALIAS so we don't try to transfer the register
8410     * multiple times. Special registers (ie NOP/WFI) are
8411     * never migratable and not even raw-accessible.
8412     */
8413    if ((r->type & ARM_CP_SPECIAL)) {
8414        r2->type |= ARM_CP_NO_RAW;
8415    }
8416    if (((r->crm == CP_ANY) && crm != 0) ||
8417        ((r->opc1 == CP_ANY) && opc1 != 0) ||
8418        ((r->opc2 == CP_ANY) && opc2 != 0)) {
8419        r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
8420    }
8421
8422    /* Check that raw accesses are either forbidden or handled. Note that
8423     * we can't assert this earlier because the setup of fieldoffset for
8424     * banked registers has to be done first.
8425     */
8426    if (!(r2->type & ARM_CP_NO_RAW)) {
8427        assert(!raw_accessors_invalid(r2));
8428    }
8429
8430    /* Overriding of an existing definition must be explicitly
8431     * requested.
8432     */
8433    if (!(r->type & ARM_CP_OVERRIDE)) {
8434        ARMCPRegInfo *oldreg;
8435        oldreg = g_hash_table_lookup(cpu->cp_regs, key);
8436        if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
8437            fprintf(stderr, "Register redefined: cp=%d %d bit "
8438                    "crn=%d crm=%d opc1=%d opc2=%d, "
8439                    "was %s, now %s\n", r2->cp, 32 + 32 * is64,
8440                    r2->crn, r2->crm, r2->opc1, r2->opc2,
8441                    oldreg->name, r2->name);
8442            g_assert_not_reached();
8443        }
8444    }
8445    g_hash_table_insert(cpu->cp_regs, key, r2);
8446}
8447
8448
8449void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
8450                                       const ARMCPRegInfo *r, void *opaque)
8451{
8452    /* Define implementations of coprocessor registers.
8453     * We store these in a hashtable because typically
8454     * there are less than 150 registers in a space which
8455     * is 16*16*16*8*8 = 262144 in size.
8456     * Wildcarding is supported for the crm, opc1 and opc2 fields.
8457     * If a register is defined twice then the second definition is
8458     * used, so this can be used to define some generic registers and
8459     * then override them with implementation specific variations.
8460     * At least one of the original and the second definition should
8461     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
8462     * against accidental use.
8463     *
8464     * The state field defines whether the register is to be
8465     * visible in the AArch32 or AArch64 execution state. If the
8466     * state is set to ARM_CP_STATE_BOTH then we synthesise a
8467     * reginfo structure for the AArch32 view, which sees the lower
8468     * 32 bits of the 64 bit register.
8469     *
8470     * Only registers visible in AArch64 may set r->opc0; opc0 cannot
8471     * be wildcarded. AArch64 registers are always considered to be 64
8472     * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
8473     * the register, if any.
8474     */
8475    int crm, opc1, opc2, state;
8476    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
8477    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
8478    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
8479    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
8480    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
8481    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
8482    /* 64 bit registers have only CRm and Opc1 fields */
8483    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
8484    /* op0 only exists in the AArch64 encodings */
8485    assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
8486    /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
8487    assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
8488    /*
8489     * This API is only for Arm's system coprocessors (14 and 15) or
8490     * (M-profile or v7A-and-earlier only) for implementation defined
8491     * coprocessors in the range 0..7.  Our decode assumes this, since
8492     * 8..13 can be used for other insns including VFP and Neon. See
8493     * valid_cp() in translate.c.  Assert here that we haven't tried
8494     * to use an invalid coprocessor number.
8495     */
8496    switch (r->state) {
8497    case ARM_CP_STATE_BOTH:
8498        /* 0 has a special meaning, but otherwise the same rules as AA32. */
8499        if (r->cp == 0) {
8500            break;
8501        }
8502        /* fall through */
8503    case ARM_CP_STATE_AA32:
8504        if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
8505            !arm_feature(&cpu->env, ARM_FEATURE_M)) {
8506            assert(r->cp >= 14 && r->cp <= 15);
8507        } else {
8508            assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
8509        }
8510        break;
8511    case ARM_CP_STATE_AA64:
8512        assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
8513        break;
8514    default:
8515        g_assert_not_reached();
8516    }
8517    /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
8518     * encodes a minimum access level for the register. We roll this
8519     * runtime check into our general permission check code, so check
8520     * here that the reginfo's specified permissions are strict enough
8521     * to encompass the generic architectural permission check.
8522     */
8523    if (r->state != ARM_CP_STATE_AA32) {
8524        int mask = 0;
8525        switch (r->opc1) {
8526        case 0:
8527            /* min_EL EL1, but some accessible to EL0 via kernel ABI */
8528            mask = PL0U_R | PL1_RW;
8529            break;
8530        case 1: case 2:
8531            /* min_EL EL1 */
8532            mask = PL1_RW;
8533            break;
8534        case 3:
8535            /* min_EL EL0 */
8536            mask = PL0_RW;
8537            break;
8538        case 4:
8539        case 5:
8540            /* min_EL EL2 */
8541            mask = PL2_RW;
8542            break;
8543        case 6:
8544            /* min_EL EL3 */
8545            mask = PL3_RW;
8546            break;
8547        case 7:
8548            /* min_EL EL1, secure mode only (we don't check the latter) */
8549            mask = PL1_RW;
8550            break;
8551        default:
8552            /* broken reginfo with out-of-range opc1 */
8553            assert(false);
8554            break;
8555        }
8556        /* assert our permissions are not too lax (stricter is fine) */
8557        assert((r->access & ~mask) == 0);
8558    }
8559
8560    /* Check that the register definition has enough info to handle
8561     * reads and writes if they are permitted.
8562     */
8563    if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
8564        if (r->access & PL3_R) {
8565            assert((r->fieldoffset ||
8566                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8567                   r->readfn);
8568        }
8569        if (r->access & PL3_W) {
8570            assert((r->fieldoffset ||
8571                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
8572                   r->writefn);
8573        }
8574    }
8575    /* Bad type field probably means missing sentinel at end of reg list */
8576    assert(cptype_valid(r->type));
8577    for (crm = crmmin; crm <= crmmax; crm++) {
8578        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
8579            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
8580                for (state = ARM_CP_STATE_AA32;
8581                     state <= ARM_CP_STATE_AA64; state++) {
8582                    if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
8583                        continue;
8584                    }
8585                    if (state == ARM_CP_STATE_AA32) {
8586                        /* Under AArch32 CP registers can be common
8587                         * (same for secure and non-secure world) or banked.
8588                         */
8589                        char *name;
8590
8591                        switch (r->secure) {
8592                        case ARM_CP_SECSTATE_S:
8593                        case ARM_CP_SECSTATE_NS:
8594                            add_cpreg_to_hashtable(cpu, r, opaque, state,
8595                                                   r->secure, crm, opc1, opc2,
8596                                                   r->name);
8597                            break;
8598                        default:
8599                            name = g_strdup_printf("%s_S", r->name);
8600                            add_cpreg_to_hashtable(cpu, r, opaque, state,
8601                                                   ARM_CP_SECSTATE_S,
8602                                                   crm, opc1, opc2, name);
8603                            g_free(name);
8604                            add_cpreg_to_hashtable(cpu, r, opaque, state,
8605                                                   ARM_CP_SECSTATE_NS,
8606                                                   crm, opc1, opc2, r->name);
8607                            break;
8608                        }
8609                    } else {
8610                        /* AArch64 registers get mapped to non-secure instance
8611                         * of AArch32 */
8612                        add_cpreg_to_hashtable(cpu, r, opaque, state,
8613                                               ARM_CP_SECSTATE_NS,
8614                                               crm, opc1, opc2, r->name);
8615                    }
8616                }
8617            }
8618        }
8619    }
8620}
8621
8622void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
8623                                    const ARMCPRegInfo *regs, void *opaque)
8624{
8625    /* Define a whole list of registers */
8626    const ARMCPRegInfo *r;
8627    for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8628        define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
8629    }
8630}
8631
8632/*
8633 * Modify ARMCPRegInfo for access from userspace.
8634 *
8635 * This is a data driven modification directed by
8636 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
8637 * user-space cannot alter any values and dynamic values pertaining to
8638 * execution state are hidden from user space view anyway.
8639 */
8640void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
8641{
8642    const ARMCPRegUserSpaceInfo *m;
8643    ARMCPRegInfo *r;
8644
8645    for (m = mods; m->name; m++) {
8646        GPatternSpec *pat = NULL;
8647        if (m->is_glob) {
8648            pat = g_pattern_spec_new(m->name);
8649        }
8650        for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
8651            if (pat && g_pattern_match_string(pat, r->name)) {
8652                r->type = ARM_CP_CONST;
8653                r->access = PL0U_R;
8654                r->resetvalue = 0;
8655                /* continue */
8656            } else if (strcmp(r->name, m->name) == 0) {
8657                r->type = ARM_CP_CONST;
8658                r->access = PL0U_R;
8659                r->resetvalue &= m->exported_bits;
8660                r->resetvalue |= m->fixed_bits;
8661                break;
8662            }
8663        }
8664        if (pat) {
8665            g_pattern_spec_free(pat);
8666        }
8667    }
8668}
8669
8670const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
8671{
8672    return g_hash_table_lookup(cpregs, &encoded_cp);
8673}
8674
8675void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
8676                         uint64_t value)
8677{
8678    /* Helper coprocessor write function for write-ignore registers */
8679}
8680
8681uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
8682{
8683    /* Helper coprocessor write function for read-as-zero registers */
8684    return 0;
8685}
8686
8687void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
8688{
8689    /* Helper coprocessor reset function for do-nothing-on-reset registers */
8690}
8691
8692static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
8693{
8694    /* Return true if it is not valid for us to switch to
8695     * this CPU mode (ie all the UNPREDICTABLE cases in
8696     * the ARM ARM CPSRWriteByInstr pseudocode).
8697     */
8698
8699    /* Changes to or from Hyp via MSR and CPS are illegal. */
8700    if (write_type == CPSRWriteByInstr &&
8701        ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
8702         mode == ARM_CPU_MODE_HYP)) {
8703        return 1;
8704    }
8705
8706    switch (mode) {
8707    case ARM_CPU_MODE_USR:
8708        return 0;
8709    case ARM_CPU_MODE_SYS:
8710    case ARM_CPU_MODE_SVC:
8711    case ARM_CPU_MODE_ABT:
8712    case ARM_CPU_MODE_UND:
8713    case ARM_CPU_MODE_IRQ:
8714    case ARM_CPU_MODE_FIQ:
8715        /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
8716         * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
8717         */
8718        /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
8719         * and CPS are treated as illegal mode changes.
8720         */
8721        if (write_type == CPSRWriteByInstr &&
8722            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
8723            (arm_hcr_el2_eff(env) & HCR_TGE)) {
8724            return 1;
8725        }
8726        return 0;
8727    case ARM_CPU_MODE_HYP:
8728        return !arm_feature(env, ARM_FEATURE_EL2)
8729            || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
8730    case ARM_CPU_MODE_MON:
8731        return arm_current_el(env) < 3;
8732    default:
8733        return 1;
8734    }
8735}
8736
8737uint32_t cpsr_read(CPUARMState *env)
8738{
8739    int ZF;
8740    ZF = (env->ZF == 0);
8741    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
8742        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
8743        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
8744        | ((env->condexec_bits & 0xfc) << 8)
8745        | (env->GE << 16) | (env->daif & CPSR_AIF);
8746}
8747
8748void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
8749                CPSRWriteType write_type)
8750{
8751    uint32_t changed_daif;
8752
8753    if (mask & CPSR_NZCV) {
8754        env->ZF = (~val) & CPSR_Z;
8755        env->NF = val;
8756        env->CF = (val >> 29) & 1;
8757        env->VF = (val << 3) & 0x80000000;
8758    }
8759    if (mask & CPSR_Q)
8760        env->QF = ((val & CPSR_Q) != 0);
8761    if (mask & CPSR_T)
8762        env->thumb = ((val & CPSR_T) != 0);
8763    if (mask & CPSR_IT_0_1) {
8764        env->condexec_bits &= ~3;
8765        env->condexec_bits |= (val >> 25) & 3;
8766    }
8767    if (mask & CPSR_IT_2_7) {
8768        env->condexec_bits &= 3;
8769        env->condexec_bits |= (val >> 8) & 0xfc;
8770    }
8771    if (mask & CPSR_GE) {
8772        env->GE = (val >> 16) & 0xf;
8773    }
8774
8775    /* In a V7 implementation that includes the security extensions but does
8776     * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
8777     * whether non-secure software is allowed to change the CPSR_F and CPSR_A
8778     * bits respectively.
8779     *
8780     * In a V8 implementation, it is permitted for privileged software to
8781     * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
8782     */
8783    if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
8784        arm_feature(env, ARM_FEATURE_EL3) &&
8785        !arm_feature(env, ARM_FEATURE_EL2) &&
8786        !arm_is_secure(env)) {
8787
8788        changed_daif = (env->daif ^ val) & mask;
8789
8790        if (changed_daif & CPSR_A) {
8791            /* Check to see if we are allowed to change the masking of async
8792             * abort exceptions from a non-secure state.
8793             */
8794            if (!(env->cp15.scr_el3 & SCR_AW)) {
8795                qemu_log_mask(LOG_GUEST_ERROR,
8796                              "Ignoring attempt to switch CPSR_A flag from "
8797                              "non-secure world with SCR.AW bit clear\n");
8798                mask &= ~CPSR_A;
8799            }
8800        }
8801
8802        if (changed_daif & CPSR_F) {
8803            /* Check to see if we are allowed to change the masking of FIQ
8804             * exceptions from a non-secure state.
8805             */
8806            if (!(env->cp15.scr_el3 & SCR_FW)) {
8807                qemu_log_mask(LOG_GUEST_ERROR,
8808                              "Ignoring attempt to switch CPSR_F flag from "
8809                              "non-secure world with SCR.FW bit clear\n");
8810                mask &= ~CPSR_F;
8811            }
8812
8813            /* Check whether non-maskable FIQ (NMFI) support is enabled.
8814             * If this bit is set software is not allowed to mask
8815             * FIQs, but is allowed to set CPSR_F to 0.
8816             */
8817            if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
8818                (val & CPSR_F)) {
8819                qemu_log_mask(LOG_GUEST_ERROR,
8820                              "Ignoring attempt to enable CPSR_F flag "
8821                              "(non-maskable FIQ [NMFI] support enabled)\n");
8822                mask &= ~CPSR_F;
8823            }
8824        }
8825    }
8826
8827    env->daif &= ~(CPSR_AIF & mask);
8828    env->daif |= val & CPSR_AIF & mask;
8829
8830    if (write_type != CPSRWriteRaw &&
8831        ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
8832        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
8833            /* Note that we can only get here in USR mode if this is a
8834             * gdb stub write; for this case we follow the architectural
8835             * behaviour for guest writes in USR mode of ignoring an attempt
8836             * to switch mode. (Those are caught by translate.c for writes
8837             * triggered by guest instructions.)
8838             */
8839            mask &= ~CPSR_M;
8840        } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
8841            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
8842             * v7, and has defined behaviour in v8:
8843             *  + leave CPSR.M untouched
8844             *  + allow changes to the other CPSR fields
8845             *  + set PSTATE.IL
8846             * For user changes via the GDB stub, we don't set PSTATE.IL,
8847             * as this would be unnecessarily harsh for a user error.
8848             */
8849            mask &= ~CPSR_M;
8850            if (write_type != CPSRWriteByGDBStub &&
8851                arm_feature(env, ARM_FEATURE_V8)) {
8852                mask |= CPSR_IL;
8853                val |= CPSR_IL;
8854            }
8855            qemu_log_mask(LOG_GUEST_ERROR,
8856                          "Illegal AArch32 mode switch attempt from %s to %s\n",
8857                          aarch32_mode_name(env->uncached_cpsr),
8858                          aarch32_mode_name(val));
8859        } else {
8860            qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
8861                          write_type == CPSRWriteExceptionReturn ?
8862                          "Exception return from AArch32" :
8863                          "AArch32 mode switch from",
8864                          aarch32_mode_name(env->uncached_cpsr),
8865                          aarch32_mode_name(val), env->regs[15]);
8866            switch_mode(env, val & CPSR_M);
8867        }
8868    }
8869    mask &= ~CACHED_CPSR_BITS;
8870    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
8871}
8872
8873/* Sign/zero extend */
8874uint32_t HELPER(sxtb16)(uint32_t x)
8875{
8876    uint32_t res;
8877    res = (uint16_t)(int8_t)x;
8878    res |= (uint32_t)(int8_t)(x >> 16) << 16;
8879    return res;
8880}
8881
8882uint32_t HELPER(uxtb16)(uint32_t x)
8883{
8884    uint32_t res;
8885    res = (uint16_t)(uint8_t)x;
8886    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
8887    return res;
8888}
8889
8890int32_t HELPER(sdiv)(int32_t num, int32_t den)
8891{
8892    if (den == 0)
8893      return 0;
8894    if (num == INT_MIN && den == -1)
8895      return INT_MIN;
8896    return num / den;
8897}
8898
8899uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
8900{
8901    if (den == 0)
8902      return 0;
8903    return num / den;
8904}
8905
8906uint32_t HELPER(rbit)(uint32_t x)
8907{
8908    return revbit32(x);
8909}
8910
8911#ifdef CONFIG_USER_ONLY
8912
8913static void switch_mode(CPUARMState *env, int mode)
8914{
8915    ARMCPU *cpu = env_archcpu(env);
8916
8917    if (mode != ARM_CPU_MODE_USR) {
8918        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
8919    }
8920}
8921
8922uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
8923                                 uint32_t cur_el, bool secure)
8924{
8925    return 1;
8926}
8927
8928void aarch64_sync_64_to_32(CPUARMState *env)
8929{
8930    g_assert_not_reached();
8931}
8932
8933#else
8934
8935static void switch_mode(CPUARMState *env, int mode)
8936{
8937    int old_mode;
8938    int i;
8939
8940    old_mode = env->uncached_cpsr & CPSR_M;
8941    if (mode == old_mode)
8942        return;
8943
8944    if (old_mode == ARM_CPU_MODE_FIQ) {
8945        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
8946        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
8947    } else if (mode == ARM_CPU_MODE_FIQ) {
8948        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
8949        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
8950    }
8951
8952    i = bank_number(old_mode);
8953    env->banked_r13[i] = env->regs[13];
8954    env->banked_spsr[i] = env->spsr;
8955
8956    i = bank_number(mode);
8957    env->regs[13] = env->banked_r13[i];
8958    env->spsr = env->banked_spsr[i];
8959
8960    env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
8961    env->regs[14] = env->banked_r14[r14_bank_number(mode)];
8962}
8963
8964/* Physical Interrupt Target EL Lookup Table
8965 *
8966 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
8967 *
8968 * The below multi-dimensional table is used for looking up the target
8969 * exception level given numerous condition criteria.  Specifically, the
8970 * target EL is based on SCR and HCR routing controls as well as the
8971 * currently executing EL and secure state.
8972 *
8973 *    Dimensions:
8974 *    target_el_table[2][2][2][2][2][4]
8975 *                    |  |  |  |  |  +--- Current EL
8976 *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
8977 *                    |  |  |  +--------- HCR mask override
8978 *                    |  |  +------------ SCR exec state control
8979 *                    |  +--------------- SCR mask override
8980 *                    +------------------ 32-bit(0)/64-bit(1) EL3
8981 *
8982 *    The table values are as such:
8983 *    0-3 = EL0-EL3
8984 *     -1 = Cannot occur
8985 *
8986 * The ARM ARM target EL table includes entries indicating that an "exception
8987 * is not taken".  The two cases where this is applicable are:
8988 *    1) An exception is taken from EL3 but the SCR does not have the exception
8989 *    routed to EL3.
8990 *    2) An exception is taken from EL2 but the HCR does not have the exception
8991 *    routed to EL2.
8992 * In these two cases, the below table contain a target of EL1.  This value is
8993 * returned as it is expected that the consumer of the table data will check
8994 * for "target EL >= current EL" to ensure the exception is not taken.
8995 *
8996 *            SCR     HCR
8997 *         64  EA     AMO                 From
8998 *        BIT IRQ     IMO      Non-secure         Secure
8999 *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
9000 */
9001static const int8_t target_el_table[2][2][2][2][2][4] = {
9002    {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
9003       {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
9004      {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
9005       {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
9006     {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
9007       {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
9008      {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
9009       {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
9010    {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
9011       {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
9012      {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
9013       {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
9014     {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
9015       {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
9016      {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
9017       {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
9018};
9019
9020/*
9021 * Determine the target EL for physical exceptions
9022 */
9023uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
9024                                 uint32_t cur_el, bool secure)
9025{
9026    CPUARMState *env = cs->env_ptr;
9027    bool rw;
9028    bool scr;
9029    bool hcr;
9030    int target_el;
9031    /* Is the highest EL AArch64? */
9032    bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
9033    uint64_t hcr_el2;
9034
9035    if (arm_feature(env, ARM_FEATURE_EL3)) {
9036        rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
9037    } else {
9038        /* Either EL2 is the highest EL (and so the EL2 register width
9039         * is given by is64); or there is no EL2 or EL3, in which case
9040         * the value of 'rw' does not affect the table lookup anyway.
9041         */
9042        rw = is64;
9043    }
9044
9045    hcr_el2 = arm_hcr_el2_eff(env);
9046    switch (excp_idx) {
9047    case EXCP_IRQ:
9048        scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
9049        hcr = hcr_el2 & HCR_IMO;
9050        break;
9051    case EXCP_FIQ:
9052        scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
9053        hcr = hcr_el2 & HCR_FMO;
9054        break;
9055    default:
9056        scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
9057        hcr = hcr_el2 & HCR_AMO;
9058        break;
9059    };
9060
9061    /*
9062     * For these purposes, TGE and AMO/IMO/FMO both force the
9063     * interrupt to EL2.  Fold TGE into the bit extracted above.
9064     */
9065    hcr |= (hcr_el2 & HCR_TGE) != 0;
9066
9067    /* Perform a table-lookup for the target EL given the current state */
9068    target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
9069
9070    assert(target_el > 0);
9071
9072    return target_el;
9073}
9074
9075void arm_log_exception(int idx)
9076{
9077    if (qemu_loglevel_mask(CPU_LOG_INT)) {
9078        const char *exc = NULL;
9079        static const char * const excnames[] = {
9080            [EXCP_UDEF] = "Undefined Instruction",
9081            [EXCP_SWI] = "SVC",
9082            [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
9083            [EXCP_DATA_ABORT] = "Data Abort",
9084            [EXCP_IRQ] = "IRQ",
9085            [EXCP_FIQ] = "FIQ",
9086            [EXCP_BKPT] = "Breakpoint",
9087            [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
9088            [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
9089            [EXCP_HVC] = "Hypervisor Call",
9090            [EXCP_HYP_TRAP] = "Hypervisor Trap",
9091            [EXCP_SMC] = "Secure Monitor Call",
9092            [EXCP_VIRQ] = "Virtual IRQ",
9093            [EXCP_VFIQ] = "Virtual FIQ",
9094            [EXCP_SEMIHOST] = "Semihosting call",
9095            [EXCP_NOCP] = "v7M NOCP UsageFault",
9096            [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
9097            [EXCP_STKOF] = "v8M STKOF UsageFault",
9098            [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
9099            [EXCP_LSERR] = "v8M LSERR UsageFault",
9100            [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
9101        };
9102
9103        if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
9104            exc = excnames[idx];
9105        }
9106        if (!exc) {
9107            exc = "unknown";
9108        }
9109        qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
9110    }
9111}
9112
9113/*
9114 * Function used to synchronize QEMU's AArch64 register set with AArch32
9115 * register set.  This is necessary when switching between AArch32 and AArch64
9116 * execution state.
9117 */
9118void aarch64_sync_32_to_64(CPUARMState *env)
9119{
9120    int i;
9121    uint32_t mode = env->uncached_cpsr & CPSR_M;
9122
9123    /* We can blanket copy R[0:7] to X[0:7] */
9124    for (i = 0; i < 8; i++) {
9125        env->xregs[i] = env->regs[i];
9126    }
9127
9128    /*
9129     * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
9130     * Otherwise, they come from the banked user regs.
9131     */
9132    if (mode == ARM_CPU_MODE_FIQ) {
9133        for (i = 8; i < 13; i++) {
9134            env->xregs[i] = env->usr_regs[i - 8];
9135        }
9136    } else {
9137        for (i = 8; i < 13; i++) {
9138            env->xregs[i] = env->regs[i];
9139        }
9140    }
9141
9142    /*
9143     * Registers x13-x23 are the various mode SP and FP registers. Registers
9144     * r13 and r14 are only copied if we are in that mode, otherwise we copy
9145     * from the mode banked register.
9146     */
9147    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9148        env->xregs[13] = env->regs[13];
9149        env->xregs[14] = env->regs[14];
9150    } else {
9151        env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
9152        /* HYP is an exception in that it is copied from r14 */
9153        if (mode == ARM_CPU_MODE_HYP) {
9154            env->xregs[14] = env->regs[14];
9155        } else {
9156            env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
9157        }
9158    }
9159
9160    if (mode == ARM_CPU_MODE_HYP) {
9161        env->xregs[15] = env->regs[13];
9162    } else {
9163        env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
9164    }
9165
9166    if (mode == ARM_CPU_MODE_IRQ) {
9167        env->xregs[16] = env->regs[14];
9168        env->xregs[17] = env->regs[13];
9169    } else {
9170        env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
9171        env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
9172    }
9173
9174    if (mode == ARM_CPU_MODE_SVC) {
9175        env->xregs[18] = env->regs[14];
9176        env->xregs[19] = env->regs[13];
9177    } else {
9178        env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
9179        env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
9180    }
9181
9182    if (mode == ARM_CPU_MODE_ABT) {
9183        env->xregs[20] = env->regs[14];
9184        env->xregs[21] = env->regs[13];
9185    } else {
9186        env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
9187        env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
9188    }
9189
9190    if (mode == ARM_CPU_MODE_UND) {
9191        env->xregs[22] = env->regs[14];
9192        env->xregs[23] = env->regs[13];
9193    } else {
9194        env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
9195        env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
9196    }
9197
9198    /*
9199     * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9200     * mode, then we can copy from r8-r14.  Otherwise, we copy from the
9201     * FIQ bank for r8-r14.
9202     */
9203    if (mode == ARM_CPU_MODE_FIQ) {
9204        for (i = 24; i < 31; i++) {
9205            env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
9206        }
9207    } else {
9208        for (i = 24; i < 29; i++) {
9209            env->xregs[i] = env->fiq_regs[i - 24];
9210        }
9211        env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
9212        env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
9213    }
9214
9215    env->pc = env->regs[15];
9216}
9217
9218/*
9219 * Function used to synchronize QEMU's AArch32 register set with AArch64
9220 * register set.  This is necessary when switching between AArch32 and AArch64
9221 * execution state.
9222 */
9223void aarch64_sync_64_to_32(CPUARMState *env)
9224{
9225    int i;
9226    uint32_t mode = env->uncached_cpsr & CPSR_M;
9227
9228    /* We can blanket copy X[0:7] to R[0:7] */
9229    for (i = 0; i < 8; i++) {
9230        env->regs[i] = env->xregs[i];
9231    }
9232
9233    /*
9234     * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
9235     * Otherwise, we copy x8-x12 into the banked user regs.
9236     */
9237    if (mode == ARM_CPU_MODE_FIQ) {
9238        for (i = 8; i < 13; i++) {
9239            env->usr_regs[i - 8] = env->xregs[i];
9240        }
9241    } else {
9242        for (i = 8; i < 13; i++) {
9243            env->regs[i] = env->xregs[i];
9244        }
9245    }
9246
9247    /*
9248     * Registers r13 & r14 depend on the current mode.
9249     * If we are in a given mode, we copy the corresponding x registers to r13
9250     * and r14.  Otherwise, we copy the x register to the banked r13 and r14
9251     * for the mode.
9252     */
9253    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
9254        env->regs[13] = env->xregs[13];
9255        env->regs[14] = env->xregs[14];
9256    } else {
9257        env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
9258
9259        /*
9260         * HYP is an exception in that it does not have its own banked r14 but
9261         * shares the USR r14
9262         */
9263        if (mode == ARM_CPU_MODE_HYP) {
9264            env->regs[14] = env->xregs[14];
9265        } else {
9266            env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
9267        }
9268    }
9269
9270    if (mode == ARM_CPU_MODE_HYP) {
9271        env->regs[13] = env->xregs[15];
9272    } else {
9273        env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
9274    }
9275
9276    if (mode == ARM_CPU_MODE_IRQ) {
9277        env->regs[14] = env->xregs[16];
9278        env->regs[13] = env->xregs[17];
9279    } else {
9280        env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
9281        env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
9282    }
9283
9284    if (mode == ARM_CPU_MODE_SVC) {
9285        env->regs[14] = env->xregs[18];
9286        env->regs[13] = env->xregs[19];
9287    } else {
9288        env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
9289        env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
9290    }
9291
9292    if (mode == ARM_CPU_MODE_ABT) {
9293        env->regs[14] = env->xregs[20];
9294        env->regs[13] = env->xregs[21];
9295    } else {
9296        env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
9297        env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
9298    }
9299
9300    if (mode == ARM_CPU_MODE_UND) {
9301        env->regs[14] = env->xregs[22];
9302        env->regs[13] = env->xregs[23];
9303    } else {
9304        env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
9305        env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
9306    }
9307
9308    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
9309     * mode, then we can copy to r8-r14.  Otherwise, we copy to the
9310     * FIQ bank for r8-r14.
9311     */
9312    if (mode == ARM_CPU_MODE_FIQ) {
9313        for (i = 24; i < 31; i++) {
9314            env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
9315        }
9316    } else {
9317        for (i = 24; i < 29; i++) {
9318            env->fiq_regs[i - 24] = env->xregs[i];
9319        }
9320        env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
9321        env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
9322    }
9323
9324    env->regs[15] = env->pc;
9325}
9326
9327static void take_aarch32_exception(CPUARMState *env, int new_mode,
9328                                   uint32_t mask, uint32_t offset,
9329                                   uint32_t newpc)
9330{
9331    int new_el;
9332
9333    /* Change the CPU state so as to actually take the exception. */
9334    switch_mode(env, new_mode);
9335
9336    /*
9337     * For exceptions taken to AArch32 we must clear the SS bit in both
9338     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
9339     */
9340    env->uncached_cpsr &= ~PSTATE_SS;
9341    env->spsr = cpsr_read(env);
9342    /* Clear IT bits.  */
9343    env->condexec_bits = 0;
9344    /* Switch to the new mode, and to the correct instruction set.  */
9345    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
9346
9347    /* This must be after mode switching. */
9348    new_el = arm_current_el(env);
9349
9350    /* Set new mode endianness */
9351    env->uncached_cpsr &= ~CPSR_E;
9352    if (env->cp15.sctlr_el[new_el] & SCTLR_EE) {
9353        env->uncached_cpsr |= CPSR_E;
9354    }
9355    /* J and IL must always be cleared for exception entry */
9356    env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
9357    env->daif |= mask;
9358
9359    if (new_mode == ARM_CPU_MODE_HYP) {
9360        env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
9361        env->elr_el[2] = env->regs[15];
9362    } else {
9363        /* CPSR.PAN is normally preserved preserved unless...  */
9364        if (cpu_isar_feature(aa32_pan, env_archcpu(env))) {
9365            switch (new_el) {
9366            case 3:
9367                if (!arm_is_secure_below_el3(env)) {
9368                    /* ... the target is EL3, from non-secure state.  */
9369                    env->uncached_cpsr &= ~CPSR_PAN;
9370                    break;
9371                }
9372                /* ... the target is EL3, from secure state ... */
9373                /* fall through */
9374            case 1:
9375                /* ... the target is EL1 and SCTLR.SPAN is 0.  */
9376                if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) {
9377                    env->uncached_cpsr |= CPSR_PAN;
9378                }
9379                break;
9380            }
9381        }
9382        /*
9383         * this is a lie, as there was no c1_sys on V4T/V5, but who cares
9384         * and we should just guard the thumb mode on V4
9385         */
9386        if (arm_feature(env, ARM_FEATURE_V4T)) {
9387            env->thumb =
9388                (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
9389        }
9390        env->regs[14] = env->regs[15] + offset;
9391    }
9392    env->regs[15] = newpc;
9393    arm_rebuild_hflags(env);
9394}
9395
9396static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
9397{
9398    /*
9399     * Handle exception entry to Hyp mode; this is sufficiently
9400     * different to entry to other AArch32 modes that we handle it
9401     * separately here.
9402     *
9403     * The vector table entry used is always the 0x14 Hyp mode entry point,
9404     * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
9405     * The offset applied to the preferred return address is always zero
9406     * (see DDI0487C.a section G1.12.3).
9407     * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
9408     */
9409    uint32_t addr, mask;
9410    ARMCPU *cpu = ARM_CPU(cs);
9411    CPUARMState *env = &cpu->env;
9412
9413    switch (cs->exception_index) {
9414    case EXCP_UDEF:
9415        addr = 0x04;
9416        break;
9417    case EXCP_SWI:
9418        addr = 0x14;
9419        break;
9420    case EXCP_BKPT:
9421        /* Fall through to prefetch abort.  */
9422    case EXCP_PREFETCH_ABORT:
9423        env->cp15.ifar_s = env->exception.vaddress;
9424        qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
9425                      (uint32_t)env->exception.vaddress);
9426        addr = 0x0c;
9427        break;
9428    case EXCP_DATA_ABORT:
9429        env->cp15.dfar_s = env->exception.vaddress;
9430        qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
9431                      (uint32_t)env->exception.vaddress);
9432        addr = 0x10;
9433        break;
9434    case EXCP_IRQ:
9435        addr = 0x18;
9436        break;
9437    case EXCP_FIQ:
9438        addr = 0x1c;
9439        break;
9440    case EXCP_HVC:
9441        addr = 0x08;
9442        break;
9443    case EXCP_HYP_TRAP:
9444        addr = 0x14;
9445        break;
9446    default:
9447        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9448    }
9449
9450    if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
9451        if (!arm_feature(env, ARM_FEATURE_V8)) {
9452            /*
9453             * QEMU syndrome values are v8-style. v7 has the IL bit
9454             * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
9455             * If this is a v7 CPU, squash the IL bit in those cases.
9456             */
9457            if (cs->exception_index == EXCP_PREFETCH_ABORT ||
9458                (cs->exception_index == EXCP_DATA_ABORT &&
9459                 !(env->exception.syndrome & ARM_EL_ISV)) ||
9460                syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
9461                env->exception.syndrome &= ~ARM_EL_IL;
9462            }
9463        }
9464        env->cp15.esr_el[2] = env->exception.syndrome;
9465    }
9466
9467    if (arm_current_el(env) != 2 && addr < 0x14) {
9468        addr = 0x14;
9469    }
9470
9471    mask = 0;
9472    if (!(env->cp15.scr_el3 & SCR_EA)) {
9473        mask |= CPSR_A;
9474    }
9475    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
9476        mask |= CPSR_I;
9477    }
9478    if (!(env->cp15.scr_el3 & SCR_FIQ)) {
9479        mask |= CPSR_F;
9480    }
9481
9482    addr += env->cp15.hvbar;
9483
9484    take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
9485}
9486
9487static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
9488{
9489    ARMCPU *cpu = ARM_CPU(cs);
9490    CPUARMState *env = &cpu->env;
9491    uint32_t addr;
9492    uint32_t mask;
9493    int new_mode;
9494    uint32_t offset;
9495    uint32_t moe;
9496
9497    /* If this is a debug exception we must update the DBGDSCR.MOE bits */
9498    switch (syn_get_ec(env->exception.syndrome)) {
9499    case EC_BREAKPOINT:
9500    case EC_BREAKPOINT_SAME_EL:
9501        moe = 1;
9502        break;
9503    case EC_WATCHPOINT:
9504    case EC_WATCHPOINT_SAME_EL:
9505        moe = 10;
9506        break;
9507    case EC_AA32_BKPT:
9508        moe = 3;
9509        break;
9510    case EC_VECTORCATCH:
9511        moe = 5;
9512        break;
9513    default:
9514        moe = 0;
9515        break;
9516    }
9517
9518    if (moe) {
9519        env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
9520    }
9521
9522    if (env->exception.target_el == 2) {
9523        arm_cpu_do_interrupt_aarch32_hyp(cs);
9524        return;
9525    }
9526
9527    switch (cs->exception_index) {
9528    case EXCP_UDEF:
9529        new_mode = ARM_CPU_MODE_UND;
9530        addr = 0x04;
9531        mask = CPSR_I;
9532        if (env->thumb)
9533            offset = 2;
9534        else
9535            offset = 4;
9536        break;
9537    case EXCP_SWI:
9538        new_mode = ARM_CPU_MODE_SVC;
9539        addr = 0x08;
9540        mask = CPSR_I;
9541        /* The PC already points to the next instruction.  */
9542        offset = 0;
9543        break;
9544    case EXCP_BKPT:
9545        /* Fall through to prefetch abort.  */
9546    case EXCP_PREFETCH_ABORT:
9547        A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
9548        A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
9549        qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
9550                      env->exception.fsr, (uint32_t)env->exception.vaddress);
9551        new_mode = ARM_CPU_MODE_ABT;
9552        addr = 0x0c;
9553        mask = CPSR_A | CPSR_I;
9554        offset = 4;
9555        break;
9556    case EXCP_DATA_ABORT:
9557        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
9558        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
9559        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
9560                      env->exception.fsr,
9561                      (uint32_t)env->exception.vaddress);
9562        new_mode = ARM_CPU_MODE_ABT;
9563        addr = 0x10;
9564        mask = CPSR_A | CPSR_I;
9565        offset = 8;
9566        break;
9567    case EXCP_IRQ:
9568        new_mode = ARM_CPU_MODE_IRQ;
9569        addr = 0x18;
9570        /* Disable IRQ and imprecise data aborts.  */
9571        mask = CPSR_A | CPSR_I;
9572        offset = 4;
9573        if (env->cp15.scr_el3 & SCR_IRQ) {
9574            /* IRQ routed to monitor mode */
9575            new_mode = ARM_CPU_MODE_MON;
9576            mask |= CPSR_F;
9577        }
9578        break;
9579    case EXCP_FIQ:
9580        new_mode = ARM_CPU_MODE_FIQ;
9581        addr = 0x1c;
9582        /* Disable FIQ, IRQ and imprecise data aborts.  */
9583        mask = CPSR_A | CPSR_I | CPSR_F;
9584        if (env->cp15.scr_el3 & SCR_FIQ) {
9585            /* FIQ routed to monitor mode */
9586            new_mode = ARM_CPU_MODE_MON;
9587        }
9588        offset = 4;
9589        break;
9590    case EXCP_VIRQ:
9591        new_mode = ARM_CPU_MODE_IRQ;
9592        addr = 0x18;
9593        /* Disable IRQ and imprecise data aborts.  */
9594        mask = CPSR_A | CPSR_I;
9595        offset = 4;
9596        break;
9597    case EXCP_VFIQ:
9598        new_mode = ARM_CPU_MODE_FIQ;
9599        addr = 0x1c;
9600        /* Disable FIQ, IRQ and imprecise data aborts.  */
9601        mask = CPSR_A | CPSR_I | CPSR_F;
9602        offset = 4;
9603        break;
9604    case EXCP_SMC:
9605        new_mode = ARM_CPU_MODE_MON;
9606        addr = 0x08;
9607        mask = CPSR_A | CPSR_I | CPSR_F;
9608        offset = 0;
9609        break;
9610    default:
9611        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9612        return; /* Never happens.  Keep compiler happy.  */
9613    }
9614
9615    if (new_mode == ARM_CPU_MODE_MON) {
9616        addr += env->cp15.mvbar;
9617    } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
9618        /* High vectors. When enabled, base address cannot be remapped. */
9619        addr += 0xffff0000;
9620    } else {
9621        /* ARM v7 architectures provide a vector base address register to remap
9622         * the interrupt vector table.
9623         * This register is only followed in non-monitor mode, and is banked.
9624         * Note: only bits 31:5 are valid.
9625         */
9626        addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
9627    }
9628
9629    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
9630        env->cp15.scr_el3 &= ~SCR_NS;
9631    }
9632
9633    take_aarch32_exception(env, new_mode, mask, offset, addr);
9634}
9635
9636static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
9637{
9638    /*
9639     * Return the register number of the AArch64 view of the AArch32
9640     * register @aarch32_reg. The CPUARMState CPSR is assumed to still
9641     * be that of the AArch32 mode the exception came from.
9642     */
9643    int mode = env->uncached_cpsr & CPSR_M;
9644
9645    switch (aarch32_reg) {
9646    case 0 ... 7:
9647        return aarch32_reg;
9648    case 8 ... 12:
9649        return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg;
9650    case 13:
9651        switch (mode) {
9652        case ARM_CPU_MODE_USR:
9653        case ARM_CPU_MODE_SYS:
9654            return 13;
9655        case ARM_CPU_MODE_HYP:
9656            return 15;
9657        case ARM_CPU_MODE_IRQ:
9658            return 17;
9659        case ARM_CPU_MODE_SVC:
9660            return 19;
9661        case ARM_CPU_MODE_ABT:
9662            return 21;
9663        case ARM_CPU_MODE_UND:
9664            return 23;
9665        case ARM_CPU_MODE_FIQ:
9666            return 29;
9667        default:
9668            g_assert_not_reached();
9669        }
9670    case 14:
9671        switch (mode) {
9672        case ARM_CPU_MODE_USR:
9673        case ARM_CPU_MODE_SYS:
9674        case ARM_CPU_MODE_HYP:
9675            return 14;
9676        case ARM_CPU_MODE_IRQ:
9677            return 16;
9678        case ARM_CPU_MODE_SVC:
9679            return 18;
9680        case ARM_CPU_MODE_ABT:
9681            return 20;
9682        case ARM_CPU_MODE_UND:
9683            return 22;
9684        case ARM_CPU_MODE_FIQ:
9685            return 30;
9686        default:
9687            g_assert_not_reached();
9688        }
9689    case 15:
9690        return 31;
9691    default:
9692        g_assert_not_reached();
9693    }
9694}
9695
9696/* Handle exception entry to a target EL which is using AArch64 */
9697static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
9698{
9699    ARMCPU *cpu = ARM_CPU(cs);
9700    CPUARMState *env = &cpu->env;
9701    unsigned int new_el = env->exception.target_el;
9702    target_ulong addr = env->cp15.vbar_el[new_el];
9703    unsigned int new_mode = aarch64_pstate_mode(new_el, true);
9704    unsigned int old_mode;
9705    unsigned int cur_el = arm_current_el(env);
9706    int rt;
9707
9708    /*
9709     * Note that new_el can never be 0.  If cur_el is 0, then
9710     * el0_a64 is is_a64(), else el0_a64 is ignored.
9711     */
9712    aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
9713
9714    if (cur_el < new_el) {
9715        /* Entry vector offset depends on whether the implemented EL
9716         * immediately lower than the target level is using AArch32 or AArch64
9717         */
9718        bool is_aa64;
9719        uint64_t hcr;
9720
9721        switch (new_el) {
9722        case 3:
9723            is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
9724            break;
9725        case 2:
9726            hcr = arm_hcr_el2_eff(env);
9727            if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
9728                is_aa64 = (hcr & HCR_RW) != 0;
9729                break;
9730            }
9731            /* fall through */
9732        case 1:
9733            is_aa64 = is_a64(env);
9734            break;
9735        default:
9736            g_assert_not_reached();
9737        }
9738
9739        if (is_aa64) {
9740            addr += 0x400;
9741        } else {
9742            addr += 0x600;
9743        }
9744    } else if (pstate_read(env) & PSTATE_SP) {
9745        addr += 0x200;
9746    }
9747
9748    switch (cs->exception_index) {
9749    case EXCP_PREFETCH_ABORT:
9750    case EXCP_DATA_ABORT:
9751        env->cp15.far_el[new_el] = env->exception.vaddress;
9752        qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
9753                      env->cp15.far_el[new_el]);
9754        /* fall through */
9755    case EXCP_BKPT:
9756    case EXCP_UDEF:
9757    case EXCP_SWI:
9758    case EXCP_HVC:
9759    case EXCP_HYP_TRAP:
9760    case EXCP_SMC:
9761        switch (syn_get_ec(env->exception.syndrome)) {
9762        case EC_ADVSIMDFPACCESSTRAP:
9763            /*
9764             * QEMU internal FP/SIMD syndromes from AArch32 include the
9765             * TA and coproc fields which are only exposed if the exception
9766             * is taken to AArch32 Hyp mode. Mask them out to get a valid
9767             * AArch64 format syndrome.
9768             */
9769            env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
9770            break;
9771        case EC_CP14RTTRAP:
9772        case EC_CP15RTTRAP:
9773        case EC_CP14DTTRAP:
9774            /*
9775             * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently
9776             * the raw register field from the insn; when taking this to
9777             * AArch64 we must convert it to the AArch64 view of the register
9778             * number. Notice that we read a 4-bit AArch32 register number and
9779             * write back a 5-bit AArch64 one.
9780             */
9781            rt = extract32(env->exception.syndrome, 5, 4);
9782            rt = aarch64_regnum(env, rt);
9783            env->exception.syndrome = deposit32(env->exception.syndrome,
9784                                                5, 5, rt);
9785            break;
9786        case EC_CP15RRTTRAP:
9787        case EC_CP14RRTTRAP:
9788            /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */
9789            rt = extract32(env->exception.syndrome, 5, 4);
9790            rt = aarch64_regnum(env, rt);
9791            env->exception.syndrome = deposit32(env->exception.syndrome,
9792                                                5, 5, rt);
9793            rt = extract32(env->exception.syndrome, 10, 4);
9794            rt = aarch64_regnum(env, rt);
9795            env->exception.syndrome = deposit32(env->exception.syndrome,
9796                                                10, 5, rt);
9797            break;
9798        }
9799        env->cp15.esr_el[new_el] = env->exception.syndrome;
9800        break;
9801    case EXCP_IRQ:
9802    case EXCP_VIRQ:
9803        addr += 0x80;
9804        break;
9805    case EXCP_FIQ:
9806    case EXCP_VFIQ:
9807        addr += 0x100;
9808        break;
9809    default:
9810        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
9811    }
9812
9813    if (is_a64(env)) {
9814        old_mode = pstate_read(env);
9815        aarch64_save_sp(env, arm_current_el(env));
9816        env->elr_el[new_el] = env->pc;
9817    } else {
9818        old_mode = cpsr_read(env);
9819        env->elr_el[new_el] = env->regs[15];
9820
9821        aarch64_sync_32_to_64(env);
9822
9823        env->condexec_bits = 0;
9824    }
9825    env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
9826
9827    qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
9828                  env->elr_el[new_el]);
9829
9830    if (cpu_isar_feature(aa64_pan, cpu)) {
9831        /* The value of PSTATE.PAN is normally preserved, except when ... */
9832        new_mode |= old_mode & PSTATE_PAN;
9833        switch (new_el) {
9834        case 2:
9835            /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ...  */
9836            if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE))
9837                != (HCR_E2H | HCR_TGE)) {
9838                break;
9839            }
9840            /* fall through */
9841        case 1:
9842            /* ... the target is EL1 ... */
9843            /* ... and SCTLR_ELx.SPAN == 0, then set to 1.  */
9844            if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) {
9845                new_mode |= PSTATE_PAN;
9846            }
9847            break;
9848        }
9849    }
9850    if (cpu_isar_feature(aa64_mte, cpu)) {
9851        new_mode |= PSTATE_TCO;
9852    }
9853
9854    pstate_write(env, PSTATE_DAIF | new_mode);
9855    env->aarch64 = 1;
9856    aarch64_restore_sp(env, new_el);
9857    helper_rebuild_hflags_a64(env, new_el);
9858
9859    env->pc = addr;
9860
9861    qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
9862                  new_el, env->pc, pstate_read(env));
9863}
9864
9865/*
9866 * Do semihosting call and set the appropriate return value. All the
9867 * permission and validity checks have been done at translate time.
9868 *
9869 * We only see semihosting exceptions in TCG only as they are not
9870 * trapped to the hypervisor in KVM.
9871 */
9872#ifdef CONFIG_TCG
9873static void handle_semihosting(CPUState *cs)
9874{
9875    ARMCPU *cpu = ARM_CPU(cs);
9876    CPUARMState *env = &cpu->env;
9877
9878    if (is_a64(env)) {
9879        qemu_log_mask(CPU_LOG_INT,
9880                      "...handling as semihosting call 0x%" PRIx64 "\n",
9881                      env->xregs[0]);
9882        env->xregs[0] = do_arm_semihosting(env);
9883        env->pc += 4;
9884    } else {
9885        qemu_log_mask(CPU_LOG_INT,
9886                      "...handling as semihosting call 0x%x\n",
9887                      env->regs[0]);
9888        env->regs[0] = do_arm_semihosting(env);
9889        env->regs[15] += env->thumb ? 2 : 4;
9890    }
9891}
9892#endif
9893
9894/* Handle a CPU exception for A and R profile CPUs.
9895 * Do any appropriate logging, handle PSCI calls, and then hand off
9896 * to the AArch64-entry or AArch32-entry function depending on the
9897 * target exception level's register width.
9898 */
9899void arm_cpu_do_interrupt(CPUState *cs)
9900{
9901    ARMCPU *cpu = ARM_CPU(cs);
9902    CPUARMState *env = &cpu->env;
9903    unsigned int new_el = env->exception.target_el;
9904
9905    assert(!arm_feature(env, ARM_FEATURE_M));
9906
9907    arm_log_exception(cs->exception_index);
9908    qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
9909                  new_el);
9910    if (qemu_loglevel_mask(CPU_LOG_INT)
9911        && !excp_is_internal(cs->exception_index)) {
9912        qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
9913                      syn_get_ec(env->exception.syndrome),
9914                      env->exception.syndrome);
9915    }
9916
9917    if (arm_is_psci_call(cpu, cs->exception_index)) {
9918        arm_handle_psci_call(cpu);
9919        qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
9920        return;
9921    }
9922
9923    /*
9924     * Semihosting semantics depend on the register width of the code
9925     * that caused the exception, not the target exception level, so
9926     * must be handled here.
9927     */
9928#ifdef CONFIG_TCG
9929    if (cs->exception_index == EXCP_SEMIHOST) {
9930        handle_semihosting(cs);
9931        return;
9932    }
9933#endif
9934
9935    /* Hooks may change global state so BQL should be held, also the
9936     * BQL needs to be held for any modification of
9937     * cs->interrupt_request.
9938     */
9939    g_assert(qemu_mutex_iothread_locked());
9940
9941    arm_call_pre_el_change_hook(cpu);
9942
9943    assert(!excp_is_internal(cs->exception_index));
9944    if (arm_el_is_aa64(env, new_el)) {
9945        arm_cpu_do_interrupt_aarch64(cs);
9946    } else {
9947        arm_cpu_do_interrupt_aarch32(cs);
9948    }
9949
9950    arm_call_el_change_hook(cpu);
9951
9952    if (!kvm_enabled()) {
9953        cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
9954    }
9955}
9956#endif /* !CONFIG_USER_ONLY */
9957
9958uint64_t arm_sctlr(CPUARMState *env, int el)
9959{
9960    /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
9961    if (el == 0) {
9962        ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
9963        el = (mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1);
9964    }
9965    return env->cp15.sctlr_el[el];
9966}
9967
9968/* Return the SCTLR value which controls this address translation regime */
9969static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
9970{
9971    return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
9972}
9973
9974#ifndef CONFIG_USER_ONLY
9975
9976/* Return true if the specified stage of address translation is disabled */
9977static inline bool regime_translation_disabled(CPUARMState *env,
9978                                               ARMMMUIdx mmu_idx)
9979{
9980    if (arm_feature(env, ARM_FEATURE_M)) {
9981        switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
9982                (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
9983        case R_V7M_MPU_CTRL_ENABLE_MASK:
9984            /* Enabled, but not for HardFault and NMI */
9985            return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
9986        case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
9987            /* Enabled for all cases */
9988            return false;
9989        case 0:
9990        default:
9991            /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
9992             * we warned about that in armv7m_nvic.c when the guest set it.
9993             */
9994            return true;
9995        }
9996    }
9997
9998    if (mmu_idx == ARMMMUIdx_Stage2) {
9999        /* HCR.DC means HCR.VM behaves as 1 */
10000        return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
10001    }
10002
10003    if (env->cp15.hcr_el2 & HCR_TGE) {
10004        /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
10005        if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
10006            return true;
10007        }
10008    }
10009
10010    if ((env->cp15.hcr_el2 & HCR_DC) && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
10011        /* HCR.DC means SCTLR_EL1.M behaves as 0 */
10012        return true;
10013    }
10014
10015    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
10016}
10017
10018static inline bool regime_translation_big_endian(CPUARMState *env,
10019                                                 ARMMMUIdx mmu_idx)
10020{
10021    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
10022}
10023
10024/* Return the TTBR associated with this translation regime */
10025static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
10026                                   int ttbrn)
10027{
10028    if (mmu_idx == ARMMMUIdx_Stage2) {
10029        return env->cp15.vttbr_el2;
10030    }
10031    if (ttbrn == 0) {
10032        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
10033    } else {
10034        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
10035    }
10036}
10037
10038#endif /* !CONFIG_USER_ONLY */
10039
10040/* Convert a possible stage1+2 MMU index into the appropriate
10041 * stage 1 MMU index
10042 */
10043static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
10044{
10045    switch (mmu_idx) {
10046    case ARMMMUIdx_E10_0:
10047        return ARMMMUIdx_Stage1_E0;
10048    case ARMMMUIdx_E10_1:
10049        return ARMMMUIdx_Stage1_E1;
10050    case ARMMMUIdx_E10_1_PAN:
10051        return ARMMMUIdx_Stage1_E1_PAN;
10052    default:
10053        return mmu_idx;
10054    }
10055}
10056
10057/* Return true if the translation regime is using LPAE format page tables */
10058static inline bool regime_using_lpae_format(CPUARMState *env,
10059                                            ARMMMUIdx mmu_idx)
10060{
10061    int el = regime_el(env, mmu_idx);
10062    if (el == 2 || arm_el_is_aa64(env, el)) {
10063        return true;
10064    }
10065    if (arm_feature(env, ARM_FEATURE_LPAE)
10066        && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
10067        return true;
10068    }
10069    return false;
10070}
10071
10072/* Returns true if the stage 1 translation regime is using LPAE format page
10073 * tables. Used when raising alignment exceptions, whose FSR changes depending
10074 * on whether the long or short descriptor format is in use. */
10075bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
10076{
10077    mmu_idx = stage_1_mmu_idx(mmu_idx);
10078
10079    return regime_using_lpae_format(env, mmu_idx);
10080}
10081
10082#ifndef CONFIG_USER_ONLY
10083static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
10084{
10085    switch (mmu_idx) {
10086    case ARMMMUIdx_SE10_0:
10087    case ARMMMUIdx_E20_0:
10088    case ARMMMUIdx_Stage1_E0:
10089    case ARMMMUIdx_MUser:
10090    case ARMMMUIdx_MSUser:
10091    case ARMMMUIdx_MUserNegPri:
10092    case ARMMMUIdx_MSUserNegPri:
10093        return true;
10094    default:
10095        return false;
10096    case ARMMMUIdx_E10_0:
10097    case ARMMMUIdx_E10_1:
10098    case ARMMMUIdx_E10_1_PAN:
10099        g_assert_not_reached();
10100    }
10101}
10102
10103/* Translate section/page access permissions to page
10104 * R/W protection flags
10105 *
10106 * @env:         CPUARMState
10107 * @mmu_idx:     MMU index indicating required translation regime
10108 * @ap:          The 3-bit access permissions (AP[2:0])
10109 * @domain_prot: The 2-bit domain access permissions
10110 */
10111static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
10112                                int ap, int domain_prot)
10113{
10114    bool is_user = regime_is_user(env, mmu_idx);
10115
10116    if (domain_prot == 3) {
10117        return PAGE_READ | PAGE_WRITE;
10118    }
10119
10120    switch (ap) {
10121    case 0:
10122        if (arm_feature(env, ARM_FEATURE_V7)) {
10123            return 0;
10124        }
10125        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
10126        case SCTLR_S:
10127            return is_user ? 0 : PAGE_READ;
10128        case SCTLR_R:
10129            return PAGE_READ;
10130        default:
10131            return 0;
10132        }
10133    case 1:
10134        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10135    case 2:
10136        if (is_user) {
10137            return PAGE_READ;
10138        } else {
10139            return PAGE_READ | PAGE_WRITE;
10140        }
10141    case 3:
10142        return PAGE_READ | PAGE_WRITE;
10143    case 4: /* Reserved.  */
10144        return 0;
10145    case 5:
10146        return is_user ? 0 : PAGE_READ;
10147    case 6:
10148        return PAGE_READ;
10149    case 7:
10150        if (!arm_feature(env, ARM_FEATURE_V6K)) {
10151            return 0;
10152        }
10153        return PAGE_READ;
10154    default:
10155        g_assert_not_reached();
10156    }
10157}
10158
10159/* Translate section/page access permissions to page
10160 * R/W protection flags.
10161 *
10162 * @ap:      The 2-bit simple AP (AP[2:1])
10163 * @is_user: TRUE if accessing from PL0
10164 */
10165static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
10166{
10167    switch (ap) {
10168    case 0:
10169        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
10170    case 1:
10171        return PAGE_READ | PAGE_WRITE;
10172    case 2:
10173        return is_user ? 0 : PAGE_READ;
10174    case 3:
10175        return PAGE_READ;
10176    default:
10177        g_assert_not_reached();
10178    }
10179}
10180
10181static inline int
10182simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
10183{
10184    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
10185}
10186
10187/* Translate S2 section/page access permissions to protection flags
10188 *
10189 * @env:     CPUARMState
10190 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
10191 * @xn:      XN (execute-never) bits
10192 * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
10193 */
10194static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
10195{
10196    int prot = 0;
10197
10198    if (s2ap & 1) {
10199        prot |= PAGE_READ;
10200    }
10201    if (s2ap & 2) {
10202        prot |= PAGE_WRITE;
10203    }
10204
10205    if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
10206        switch (xn) {
10207        case 0:
10208            prot |= PAGE_EXEC;
10209            break;
10210        case 1:
10211            if (s1_is_el0) {
10212                prot |= PAGE_EXEC;
10213            }
10214            break;
10215        case 2:
10216            break;
10217        case 3:
10218            if (!s1_is_el0) {
10219                prot |= PAGE_EXEC;
10220            }
10221            break;
10222        default:
10223            g_assert_not_reached();
10224        }
10225    } else {
10226        if (!extract32(xn, 1, 1)) {
10227            if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
10228                prot |= PAGE_EXEC;
10229            }
10230        }
10231    }
10232    return prot;
10233}
10234
10235/* Translate section/page access permissions to protection flags
10236 *
10237 * @env:     CPUARMState
10238 * @mmu_idx: MMU index indicating required translation regime
10239 * @is_aa64: TRUE if AArch64
10240 * @ap:      The 2-bit simple AP (AP[2:1])
10241 * @ns:      NS (non-secure) bit
10242 * @xn:      XN (execute-never) bit
10243 * @pxn:     PXN (privileged execute-never) bit
10244 */
10245static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
10246                      int ap, int ns, int xn, int pxn)
10247{
10248    bool is_user = regime_is_user(env, mmu_idx);
10249    int prot_rw, user_rw;
10250    bool have_wxn;
10251    int wxn = 0;
10252
10253    assert(mmu_idx != ARMMMUIdx_Stage2);
10254
10255    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
10256    if (is_user) {
10257        prot_rw = user_rw;
10258    } else {
10259        if (user_rw && regime_is_pan(env, mmu_idx)) {
10260            /* PAN forbids data accesses but doesn't affect insn fetch */
10261            prot_rw = 0;
10262        } else {
10263            prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
10264        }
10265    }
10266
10267    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
10268        return prot_rw;
10269    }
10270
10271    /* TODO have_wxn should be replaced with
10272     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
10273     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
10274     * compatible processors have EL2, which is required for [U]WXN.
10275     */
10276    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
10277
10278    if (have_wxn) {
10279        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
10280    }
10281
10282    if (is_aa64) {
10283        if (regime_has_2_ranges(mmu_idx) && !is_user) {
10284            xn = pxn || (user_rw & PAGE_WRITE);
10285        }
10286    } else if (arm_feature(env, ARM_FEATURE_V7)) {
10287        switch (regime_el(env, mmu_idx)) {
10288        case 1:
10289        case 3:
10290            if (is_user) {
10291                xn = xn || !(user_rw & PAGE_READ);
10292            } else {
10293                int uwxn = 0;
10294                if (have_wxn) {
10295                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
10296                }
10297                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
10298                     (uwxn && (user_rw & PAGE_WRITE));
10299            }
10300            break;
10301        case 2:
10302            break;
10303        }
10304    } else {
10305        xn = wxn = 0;
10306    }
10307
10308    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
10309        return prot_rw;
10310    }
10311    return prot_rw | PAGE_EXEC;
10312}
10313
10314static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
10315                                     uint32_t *table, uint32_t address)
10316{
10317    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
10318    TCR *tcr = regime_tcr(env, mmu_idx);
10319
10320    if (address & tcr->mask) {
10321        if (tcr->raw_tcr & TTBCR_PD1) {
10322            /* Translation table walk disabled for TTBR1 */
10323            return false;
10324        }
10325        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
10326    } else {
10327        if (tcr->raw_tcr & TTBCR_PD0) {
10328            /* Translation table walk disabled for TTBR0 */
10329            return false;
10330        }
10331        *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
10332    }
10333    *table |= (address >> 18) & 0x3ffc;
10334    return true;
10335}
10336
10337/* Translate a S1 pagetable walk through S2 if needed.  */
10338static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
10339                               hwaddr addr, MemTxAttrs txattrs,
10340                               ARMMMUFaultInfo *fi)
10341{
10342    if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
10343        !regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
10344        target_ulong s2size;
10345        hwaddr s2pa;
10346        int s2prot;
10347        int ret;
10348        ARMCacheAttrs cacheattrs = {};
10349
10350        ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, ARMMMUIdx_Stage2,
10351                                 false,
10352                                 &s2pa, &txattrs, &s2prot, &s2size, fi,
10353                                 &cacheattrs);
10354        if (ret) {
10355            assert(fi->type != ARMFault_None);
10356            fi->s2addr = addr;
10357            fi->stage2 = true;
10358            fi->s1ptw = true;
10359            return ~0;
10360        }
10361        if ((env->cp15.hcr_el2 & HCR_PTW) && (cacheattrs.attrs & 0xf0) == 0) {
10362            /*
10363             * PTW set and S1 walk touched S2 Device memory:
10364             * generate Permission fault.
10365             */
10366            fi->type = ARMFault_Permission;
10367            fi->s2addr = addr;
10368            fi->stage2 = true;
10369            fi->s1ptw = true;
10370            return ~0;
10371        }
10372        addr = s2pa;
10373    }
10374    return addr;
10375}
10376
10377/* All loads done in the course of a page table walk go through here. */
10378static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10379                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10380{
10381    ARMCPU *cpu = ARM_CPU(cs);
10382    CPUARMState *env = &cpu->env;
10383    MemTxAttrs attrs = {};
10384    MemTxResult result = MEMTX_OK;
10385    AddressSpace *as;
10386    uint32_t data;
10387
10388    attrs.secure = is_secure;
10389    as = arm_addressspace(cs, attrs);
10390    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
10391    if (fi->s1ptw) {
10392        return 0;
10393    }
10394    if (regime_translation_big_endian(env, mmu_idx)) {
10395        data = address_space_ldl_be(as, addr, attrs, &result);
10396    } else {
10397        data = address_space_ldl_le(as, addr, attrs, &result);
10398    }
10399    if (result == MEMTX_OK) {
10400        return data;
10401    }
10402    fi->type = ARMFault_SyncExternalOnWalk;
10403    fi->ea = arm_extabort_type(result);
10404    return 0;
10405}
10406
10407static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
10408                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
10409{
10410    ARMCPU *cpu = ARM_CPU(cs);
10411    CPUARMState *env = &cpu->env;
10412    MemTxAttrs attrs = {};
10413    MemTxResult result = MEMTX_OK;
10414    AddressSpace *as;
10415    uint64_t data;
10416
10417    attrs.secure = is_secure;
10418    as = arm_addressspace(cs, attrs);
10419    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
10420    if (fi->s1ptw) {
10421        return 0;
10422    }
10423    if (regime_translation_big_endian(env, mmu_idx)) {
10424        data = address_space_ldq_be(as, addr, attrs, &result);
10425    } else {
10426        data = address_space_ldq_le(as, addr, attrs, &result);
10427    }
10428    if (result == MEMTX_OK) {
10429        return data;
10430    }
10431    fi->type = ARMFault_SyncExternalOnWalk;
10432    fi->ea = arm_extabort_type(result);
10433    return 0;
10434}
10435
10436static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
10437                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
10438                             hwaddr *phys_ptr, int *prot,
10439                             target_ulong *page_size,
10440                             ARMMMUFaultInfo *fi)
10441{
10442    CPUState *cs = env_cpu(env);
10443    int level = 1;
10444    uint32_t table;
10445    uint32_t desc;
10446    int type;
10447    int ap;
10448    int domain = 0;
10449    int domain_prot;
10450    hwaddr phys_addr;
10451    uint32_t dacr;
10452
10453    /* Pagetable walk.  */
10454    /* Lookup l1 descriptor.  */
10455    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10456        /* Section translation fault if page walk is disabled by PD0 or PD1 */
10457        fi->type = ARMFault_Translation;
10458        goto do_fault;
10459    }
10460    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10461                       mmu_idx, fi);
10462    if (fi->type != ARMFault_None) {
10463        goto do_fault;
10464    }
10465    type = (desc & 3);
10466    domain = (desc >> 5) & 0x0f;
10467    if (regime_el(env, mmu_idx) == 1) {
10468        dacr = env->cp15.dacr_ns;
10469    } else {
10470        dacr = env->cp15.dacr_s;
10471    }
10472    domain_prot = (dacr >> (domain * 2)) & 3;
10473    if (type == 0) {
10474        /* Section translation fault.  */
10475        fi->type = ARMFault_Translation;
10476        goto do_fault;
10477    }
10478    if (type != 2) {
10479        level = 2;
10480    }
10481    if (domain_prot == 0 || domain_prot == 2) {
10482        fi->type = ARMFault_Domain;
10483        goto do_fault;
10484    }
10485    if (type == 2) {
10486        /* 1Mb section.  */
10487        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10488        ap = (desc >> 10) & 3;
10489        *page_size = 1024 * 1024;
10490    } else {
10491        /* Lookup l2 entry.  */
10492        if (type == 1) {
10493            /* Coarse pagetable.  */
10494            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10495        } else {
10496            /* Fine pagetable.  */
10497            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
10498        }
10499        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10500                           mmu_idx, fi);
10501        if (fi->type != ARMFault_None) {
10502            goto do_fault;
10503        }
10504        switch (desc & 3) {
10505        case 0: /* Page translation fault.  */
10506            fi->type = ARMFault_Translation;
10507            goto do_fault;
10508        case 1: /* 64k page.  */
10509            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10510            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
10511            *page_size = 0x10000;
10512            break;
10513        case 2: /* 4k page.  */
10514            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10515            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
10516            *page_size = 0x1000;
10517            break;
10518        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
10519            if (type == 1) {
10520                /* ARMv6/XScale extended small page format */
10521                if (arm_feature(env, ARM_FEATURE_XSCALE)
10522                    || arm_feature(env, ARM_FEATURE_V6)) {
10523                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10524                    *page_size = 0x1000;
10525                } else {
10526                    /* UNPREDICTABLE in ARMv5; we choose to take a
10527                     * page translation fault.
10528                     */
10529                    fi->type = ARMFault_Translation;
10530                    goto do_fault;
10531                }
10532            } else {
10533                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
10534                *page_size = 0x400;
10535            }
10536            ap = (desc >> 4) & 3;
10537            break;
10538        default:
10539            /* Never happens, but compiler isn't smart enough to tell.  */
10540            abort();
10541        }
10542    }
10543    *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10544    *prot |= *prot ? PAGE_EXEC : 0;
10545    if (!(*prot & (1 << access_type))) {
10546        /* Access permission fault.  */
10547        fi->type = ARMFault_Permission;
10548        goto do_fault;
10549    }
10550    *phys_ptr = phys_addr;
10551    return false;
10552do_fault:
10553    fi->domain = domain;
10554    fi->level = level;
10555    return true;
10556}
10557
10558static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
10559                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
10560                             hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10561                             target_ulong *page_size, ARMMMUFaultInfo *fi)
10562{
10563    CPUState *cs = env_cpu(env);
10564    ARMCPU *cpu = env_archcpu(env);
10565    int level = 1;
10566    uint32_t table;
10567    uint32_t desc;
10568    uint32_t xn;
10569    uint32_t pxn = 0;
10570    int type;
10571    int ap;
10572    int domain = 0;
10573    int domain_prot;
10574    hwaddr phys_addr;
10575    uint32_t dacr;
10576    bool ns;
10577
10578    /* Pagetable walk.  */
10579    /* Lookup l1 descriptor.  */
10580    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
10581        /* Section translation fault if page walk is disabled by PD0 or PD1 */
10582        fi->type = ARMFault_Translation;
10583        goto do_fault;
10584    }
10585    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10586                       mmu_idx, fi);
10587    if (fi->type != ARMFault_None) {
10588        goto do_fault;
10589    }
10590    type = (desc & 3);
10591    if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
10592        /* Section translation fault, or attempt to use the encoding
10593         * which is Reserved on implementations without PXN.
10594         */
10595        fi->type = ARMFault_Translation;
10596        goto do_fault;
10597    }
10598    if ((type == 1) || !(desc & (1 << 18))) {
10599        /* Page or Section.  */
10600        domain = (desc >> 5) & 0x0f;
10601    }
10602    if (regime_el(env, mmu_idx) == 1) {
10603        dacr = env->cp15.dacr_ns;
10604    } else {
10605        dacr = env->cp15.dacr_s;
10606    }
10607    if (type == 1) {
10608        level = 2;
10609    }
10610    domain_prot = (dacr >> (domain * 2)) & 3;
10611    if (domain_prot == 0 || domain_prot == 2) {
10612        /* Section or Page domain fault */
10613        fi->type = ARMFault_Domain;
10614        goto do_fault;
10615    }
10616    if (type != 1) {
10617        if (desc & (1 << 18)) {
10618            /* Supersection.  */
10619            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
10620            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
10621            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
10622            *page_size = 0x1000000;
10623        } else {
10624            /* Section.  */
10625            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
10626            *page_size = 0x100000;
10627        }
10628        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
10629        xn = desc & (1 << 4);
10630        pxn = desc & 1;
10631        ns = extract32(desc, 19, 1);
10632    } else {
10633        if (cpu_isar_feature(aa32_pxn, cpu)) {
10634            pxn = (desc >> 2) & 1;
10635        }
10636        ns = extract32(desc, 3, 1);
10637        /* Lookup l2 entry.  */
10638        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
10639        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
10640                           mmu_idx, fi);
10641        if (fi->type != ARMFault_None) {
10642            goto do_fault;
10643        }
10644        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
10645        switch (desc & 3) {
10646        case 0: /* Page translation fault.  */
10647            fi->type = ARMFault_Translation;
10648            goto do_fault;
10649        case 1: /* 64k page.  */
10650            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
10651            xn = desc & (1 << 15);
10652            *page_size = 0x10000;
10653            break;
10654        case 2: case 3: /* 4k page.  */
10655            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
10656            xn = desc & 1;
10657            *page_size = 0x1000;
10658            break;
10659        default:
10660            /* Never happens, but compiler isn't smart enough to tell.  */
10661            abort();
10662        }
10663    }
10664    if (domain_prot == 3) {
10665        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10666    } else {
10667        if (pxn && !regime_is_user(env, mmu_idx)) {
10668            xn = 1;
10669        }
10670        if (xn && access_type == MMU_INST_FETCH) {
10671            fi->type = ARMFault_Permission;
10672            goto do_fault;
10673        }
10674
10675        if (arm_feature(env, ARM_FEATURE_V6K) &&
10676                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
10677            /* The simplified model uses AP[0] as an access control bit.  */
10678            if ((ap & 1) == 0) {
10679                /* Access flag fault.  */
10680                fi->type = ARMFault_AccessFlag;
10681                goto do_fault;
10682            }
10683            *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
10684        } else {
10685            *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
10686        }
10687        if (*prot && !xn) {
10688            *prot |= PAGE_EXEC;
10689        }
10690        if (!(*prot & (1 << access_type))) {
10691            /* Access permission fault.  */
10692            fi->type = ARMFault_Permission;
10693            goto do_fault;
10694        }
10695    }
10696    if (ns) {
10697        /* The NS bit will (as required by the architecture) have no effect if
10698         * the CPU doesn't support TZ or this is a non-secure translation
10699         * regime, because the attribute will already be non-secure.
10700         */
10701        attrs->secure = false;
10702    }
10703    *phys_ptr = phys_addr;
10704    return false;
10705do_fault:
10706    fi->domain = domain;
10707    fi->level = level;
10708    return true;
10709}
10710
10711/*
10712 * check_s2_mmu_setup
10713 * @cpu:        ARMCPU
10714 * @is_aa64:    True if the translation regime is in AArch64 state
10715 * @startlevel: Suggested starting level
10716 * @inputsize:  Bitsize of IPAs
10717 * @stride:     Page-table stride (See the ARM ARM)
10718 *
10719 * Returns true if the suggested S2 translation parameters are OK and
10720 * false otherwise.
10721 */
10722static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
10723                               int inputsize, int stride)
10724{
10725    const int grainsize = stride + 3;
10726    int startsizecheck;
10727
10728    /* Negative levels are never allowed.  */
10729    if (level < 0) {
10730        return false;
10731    }
10732
10733    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
10734    if (startsizecheck < 1 || startsizecheck > stride + 4) {
10735        return false;
10736    }
10737
10738    if (is_aa64) {
10739        CPUARMState *env = &cpu->env;
10740        unsigned int pamax = arm_pamax(cpu);
10741
10742        switch (stride) {
10743        case 13: /* 64KB Pages.  */
10744            if (level == 0 || (level == 1 && pamax <= 42)) {
10745                return false;
10746            }
10747            break;
10748        case 11: /* 16KB Pages.  */
10749            if (level == 0 || (level == 1 && pamax <= 40)) {
10750                return false;
10751            }
10752            break;
10753        case 9: /* 4KB Pages.  */
10754            if (level == 0 && pamax <= 42) {
10755                return false;
10756            }
10757            break;
10758        default:
10759            g_assert_not_reached();
10760        }
10761
10762        /* Inputsize checks.  */
10763        if (inputsize > pamax &&
10764            (arm_el_is_aa64(env, 1) || inputsize > 40)) {
10765            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
10766            return false;
10767        }
10768    } else {
10769        /* AArch32 only supports 4KB pages. Assert on that.  */
10770        assert(stride == 9);
10771
10772        if (level == 0) {
10773            return false;
10774        }
10775    }
10776    return true;
10777}
10778
10779/* Translate from the 4-bit stage 2 representation of
10780 * memory attributes (without cache-allocation hints) to
10781 * the 8-bit representation of the stage 1 MAIR registers
10782 * (which includes allocation hints).
10783 *
10784 * ref: shared/translation/attrs/S2AttrDecode()
10785 *      .../S2ConvertAttrsHints()
10786 */
10787static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
10788{
10789    uint8_t hiattr = extract32(s2attrs, 2, 2);
10790    uint8_t loattr = extract32(s2attrs, 0, 2);
10791    uint8_t hihint = 0, lohint = 0;
10792
10793    if (hiattr != 0) { /* normal memory */
10794        if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
10795            hiattr = loattr = 1; /* non-cacheable */
10796        } else {
10797            if (hiattr != 1) { /* Write-through or write-back */
10798                hihint = 3; /* RW allocate */
10799            }
10800            if (loattr != 1) { /* Write-through or write-back */
10801                lohint = 3; /* RW allocate */
10802            }
10803        }
10804    }
10805
10806    return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
10807}
10808#endif /* !CONFIG_USER_ONLY */
10809
10810static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
10811{
10812    if (regime_has_2_ranges(mmu_idx)) {
10813        return extract64(tcr, 37, 2);
10814    } else if (mmu_idx == ARMMMUIdx_Stage2) {
10815        return 0; /* VTCR_EL2 */
10816    } else {
10817        /* Replicate the single TBI bit so we always have 2 bits.  */
10818        return extract32(tcr, 20, 1) * 3;
10819    }
10820}
10821
10822static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
10823{
10824    if (regime_has_2_ranges(mmu_idx)) {
10825        return extract64(tcr, 51, 2);
10826    } else if (mmu_idx == ARMMMUIdx_Stage2) {
10827        return 0; /* VTCR_EL2 */
10828    } else {
10829        /* Replicate the single TBID bit so we always have 2 bits.  */
10830        return extract32(tcr, 29, 1) * 3;
10831    }
10832}
10833
10834static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
10835{
10836    if (regime_has_2_ranges(mmu_idx)) {
10837        return extract64(tcr, 57, 2);
10838    } else {
10839        /* Replicate the single TCMA bit so we always have 2 bits.  */
10840        return extract32(tcr, 30, 1) * 3;
10841    }
10842}
10843
10844ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
10845                                   ARMMMUIdx mmu_idx, bool data)
10846{
10847    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10848    bool epd, hpd, using16k, using64k;
10849    int select, tsz, tbi;
10850
10851    if (!regime_has_2_ranges(mmu_idx)) {
10852        select = 0;
10853        tsz = extract32(tcr, 0, 6);
10854        using64k = extract32(tcr, 14, 1);
10855        using16k = extract32(tcr, 15, 1);
10856        if (mmu_idx == ARMMMUIdx_Stage2) {
10857            /* VTCR_EL2 */
10858            hpd = false;
10859        } else {
10860            hpd = extract32(tcr, 24, 1);
10861        }
10862        epd = false;
10863    } else {
10864        /*
10865         * Bit 55 is always between the two regions, and is canonical for
10866         * determining if address tagging is enabled.
10867         */
10868        select = extract64(va, 55, 1);
10869        if (!select) {
10870            tsz = extract32(tcr, 0, 6);
10871            epd = extract32(tcr, 7, 1);
10872            using64k = extract32(tcr, 14, 1);
10873            using16k = extract32(tcr, 15, 1);
10874            hpd = extract64(tcr, 41, 1);
10875        } else {
10876            int tg = extract32(tcr, 30, 2);
10877            using16k = tg == 1;
10878            using64k = tg == 3;
10879            tsz = extract32(tcr, 16, 6);
10880            epd = extract32(tcr, 23, 1);
10881            hpd = extract64(tcr, 42, 1);
10882        }
10883    }
10884    tsz = MIN(tsz, 39);  /* TODO: ARMv8.4-TTST */
10885    tsz = MAX(tsz, 16);  /* TODO: ARMv8.2-LVA  */
10886
10887    /* Present TBI as a composite with TBID.  */
10888    tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
10889    if (!data) {
10890        tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
10891    }
10892    tbi = (tbi >> select) & 1;
10893
10894    return (ARMVAParameters) {
10895        .tsz = tsz,
10896        .select = select,
10897        .tbi = tbi,
10898        .epd = epd,
10899        .hpd = hpd,
10900        .using16k = using16k,
10901        .using64k = using64k,
10902    };
10903}
10904
10905#ifndef CONFIG_USER_ONLY
10906static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
10907                                          ARMMMUIdx mmu_idx)
10908{
10909    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
10910    uint32_t el = regime_el(env, mmu_idx);
10911    int select, tsz;
10912    bool epd, hpd;
10913
10914    if (mmu_idx == ARMMMUIdx_Stage2) {
10915        /* VTCR */
10916        bool sext = extract32(tcr, 4, 1);
10917        bool sign = extract32(tcr, 3, 1);
10918
10919        /*
10920         * If the sign-extend bit is not the same as t0sz[3], the result
10921         * is unpredictable. Flag this as a guest error.
10922         */
10923        if (sign != sext) {
10924            qemu_log_mask(LOG_GUEST_ERROR,
10925                          "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
10926        }
10927        tsz = sextract32(tcr, 0, 4) + 8;
10928        select = 0;
10929        hpd = false;
10930        epd = false;
10931    } else if (el == 2) {
10932        /* HTCR */
10933        tsz = extract32(tcr, 0, 3);
10934        select = 0;
10935        hpd = extract64(tcr, 24, 1);
10936        epd = false;
10937    } else {
10938        int t0sz = extract32(tcr, 0, 3);
10939        int t1sz = extract32(tcr, 16, 3);
10940
10941        if (t1sz == 0) {
10942            select = va > (0xffffffffu >> t0sz);
10943        } else {
10944            /* Note that we will detect errors later.  */
10945            select = va >= ~(0xffffffffu >> t1sz);
10946        }
10947        if (!select) {
10948            tsz = t0sz;
10949            epd = extract32(tcr, 7, 1);
10950            hpd = extract64(tcr, 41, 1);
10951        } else {
10952            tsz = t1sz;
10953            epd = extract32(tcr, 23, 1);
10954            hpd = extract64(tcr, 42, 1);
10955        }
10956        /* For aarch32, hpd0 is not enabled without t2e as well.  */
10957        hpd &= extract32(tcr, 6, 1);
10958    }
10959
10960    return (ARMVAParameters) {
10961        .tsz = tsz,
10962        .select = select,
10963        .epd = epd,
10964        .hpd = hpd,
10965    };
10966}
10967
10968/**
10969 * get_phys_addr_lpae: perform one stage of page table walk, LPAE format
10970 *
10971 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10972 * prot and page_size may not be filled in, and the populated fsr value provides
10973 * information on why the translation aborted, in the format of a long-format
10974 * DFSR/IFSR fault register, with the following caveats:
10975 *  * the WnR bit is never set (the caller must do this).
10976 *
10977 * @env: CPUARMState
10978 * @address: virtual address to get physical address for
10979 * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH
10980 * @mmu_idx: MMU index indicating required translation regime
10981 * @s1_is_el0: if @mmu_idx is ARMMMUIdx_Stage2 (so this is a stage 2 page table
10982 *             walk), must be true if this is stage 2 of a stage 1+2 walk for an
10983 *             EL0 access). If @mmu_idx is anything else, @s1_is_el0 is ignored.
10984 * @phys_ptr: set to the physical address corresponding to the virtual address
10985 * @attrs: set to the memory transaction attributes to use
10986 * @prot: set to the permissions for the page containing phys_ptr
10987 * @page_size_ptr: set to the size of the page containing phys_ptr
10988 * @fi: set to fault info if the translation fails
10989 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10990 */
10991static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
10992                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
10993                               bool s1_is_el0,
10994                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
10995                               target_ulong *page_size_ptr,
10996                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10997{
10998    ARMCPU *cpu = env_archcpu(env);
10999    CPUState *cs = CPU(cpu);
11000    /* Read an LPAE long-descriptor translation table. */
11001    ARMFaultType fault_type = ARMFault_Translation;
11002    uint32_t level;
11003    ARMVAParameters param;
11004    uint64_t ttbr;
11005    hwaddr descaddr, indexmask, indexmask_grainsize;
11006    uint32_t tableattrs;
11007    target_ulong page_size;
11008    uint32_t attrs;
11009    int32_t stride;
11010    int addrsize, inputsize;
11011    TCR *tcr = regime_tcr(env, mmu_idx);
11012    int ap, ns, xn, pxn;
11013    uint32_t el = regime_el(env, mmu_idx);
11014    uint64_t descaddrmask;
11015    bool aarch64 = arm_el_is_aa64(env, el);
11016    bool guarded = false;
11017
11018    /* TODO: This code does not support shareability levels. */
11019    if (aarch64) {
11020        param = aa64_va_parameters(env, address, mmu_idx,
11021                                   access_type != MMU_INST_FETCH);
11022        level = 0;
11023        addrsize = 64 - 8 * param.tbi;
11024        inputsize = 64 - param.tsz;
11025    } else {
11026        param = aa32_va_parameters(env, address, mmu_idx);
11027        level = 1;
11028        addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32);
11029        inputsize = addrsize - param.tsz;
11030    }
11031
11032    /*
11033     * We determined the region when collecting the parameters, but we
11034     * have not yet validated that the address is valid for the region.
11035     * Extract the top bits and verify that they all match select.
11036     *
11037     * For aa32, if inputsize == addrsize, then we have selected the
11038     * region by exclusion in aa32_va_parameters and there is no more
11039     * validation to do here.
11040     */
11041    if (inputsize < addrsize) {
11042        target_ulong top_bits = sextract64(address, inputsize,
11043                                           addrsize - inputsize);
11044        if (-top_bits != param.select) {
11045            /* The gap between the two regions is a Translation fault */
11046            fault_type = ARMFault_Translation;
11047            goto do_fault;
11048        }
11049    }
11050
11051    if (param.using64k) {
11052        stride = 13;
11053    } else if (param.using16k) {
11054        stride = 11;
11055    } else {
11056        stride = 9;
11057    }
11058
11059    /* Note that QEMU ignores shareability and cacheability attributes,
11060     * so we don't need to do anything with the SH, ORGN, IRGN fields
11061     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
11062     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
11063     * implement any ASID-like capability so we can ignore it (instead
11064     * we will always flush the TLB any time the ASID is changed).
11065     */
11066    ttbr = regime_ttbr(env, mmu_idx, param.select);
11067
11068    /* Here we should have set up all the parameters for the translation:
11069     * inputsize, ttbr, epd, stride, tbi
11070     */
11071
11072    if (param.epd) {
11073        /* Translation table walk disabled => Translation fault on TLB miss
11074         * Note: This is always 0 on 64-bit EL2 and EL3.
11075         */
11076        goto do_fault;
11077    }
11078
11079    if (mmu_idx != ARMMMUIdx_Stage2) {
11080        /* The starting level depends on the virtual address size (which can
11081         * be up to 48 bits) and the translation granule size. It indicates
11082         * the number of strides (stride bits at a time) needed to
11083         * consume the bits of the input address. In the pseudocode this is:
11084         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
11085         * where their 'inputsize' is our 'inputsize', 'grainsize' is
11086         * our 'stride + 3' and 'stride' is our 'stride'.
11087         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
11088         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
11089         * = 4 - (inputsize - 4) / stride;
11090         */
11091        level = 4 - (inputsize - 4) / stride;
11092    } else {
11093        /* For stage 2 translations the starting level is specified by the
11094         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
11095         */
11096        uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
11097        uint32_t startlevel;
11098        bool ok;
11099
11100        if (!aarch64 || stride == 9) {
11101            /* AArch32 or 4KB pages */
11102            startlevel = 2 - sl0;
11103        } else {
11104            /* 16KB or 64KB pages */
11105            startlevel = 3 - sl0;
11106        }
11107
11108        /* Check that the starting level is valid. */
11109        ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
11110                                inputsize, stride);
11111        if (!ok) {
11112            fault_type = ARMFault_Translation;
11113            goto do_fault;
11114        }
11115        level = startlevel;
11116    }
11117
11118    indexmask_grainsize = (1ULL << (stride + 3)) - 1;
11119    indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
11120
11121    /* Now we can extract the actual base address from the TTBR */
11122    descaddr = extract64(ttbr, 0, 48);
11123    /*
11124     * We rely on this masking to clear the RES0 bits at the bottom of the TTBR
11125     * and also to mask out CnP (bit 0) which could validly be non-zero.
11126     */
11127    descaddr &= ~indexmask;
11128
11129    /* The address field in the descriptor goes up to bit 39 for ARMv7
11130     * but up to bit 47 for ARMv8, but we use the descaddrmask
11131     * up to bit 39 for AArch32, because we don't need other bits in that case
11132     * to construct next descriptor address (anyway they should be all zeroes).
11133     */
11134    descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
11135                   ~indexmask_grainsize;
11136
11137    /* Secure accesses start with the page table in secure memory and
11138     * can be downgraded to non-secure at any step. Non-secure accesses
11139     * remain non-secure. We implement this by just ORing in the NSTable/NS
11140     * bits at each step.
11141     */
11142    tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
11143    for (;;) {
11144        uint64_t descriptor;
11145        bool nstable;
11146
11147        descaddr |= (address >> (stride * (4 - level))) & indexmask;
11148        descaddr &= ~7ULL;
11149        nstable = extract32(tableattrs, 4, 1);
11150        descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
11151        if (fi->type != ARMFault_None) {
11152            goto do_fault;
11153        }
11154
11155        if (!(descriptor & 1) ||
11156            (!(descriptor & 2) && (level == 3))) {
11157            /* Invalid, or the Reserved level 3 encoding */
11158            goto do_fault;
11159        }
11160        descaddr = descriptor & descaddrmask;
11161
11162        if ((descriptor & 2) && (level < 3)) {
11163            /* Table entry. The top five bits are attributes which may
11164             * propagate down through lower levels of the table (and
11165             * which are all arranged so that 0 means "no effect", so
11166             * we can gather them up by ORing in the bits at each level).
11167             */
11168            tableattrs |= extract64(descriptor, 59, 5);
11169            level++;
11170            indexmask = indexmask_grainsize;
11171            continue;
11172        }
11173        /* Block entry at level 1 or 2, or page entry at level 3.
11174         * These are basically the same thing, although the number
11175         * of bits we pull in from the vaddr varies.
11176         */
11177        page_size = (1ULL << ((stride * (4 - level)) + 3));
11178        descaddr |= (address & (page_size - 1));
11179        /* Extract attributes from the descriptor */
11180        attrs = extract64(descriptor, 2, 10)
11181            | (extract64(descriptor, 52, 12) << 10);
11182
11183        if (mmu_idx == ARMMMUIdx_Stage2) {
11184            /* Stage 2 table descriptors do not include any attribute fields */
11185            break;
11186        }
11187        /* Merge in attributes from table descriptors */
11188        attrs |= nstable << 3; /* NS */
11189        guarded = extract64(descriptor, 50, 1);  /* GP */
11190        if (param.hpd) {
11191            /* HPD disables all the table attributes except NSTable.  */
11192            break;
11193        }
11194        attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
11195        /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
11196         * means "force PL1 access only", which means forcing AP[1] to 0.
11197         */
11198        attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
11199        attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
11200        break;
11201    }
11202    /* Here descaddr is the final physical address, and attributes
11203     * are all in attrs.
11204     */
11205    fault_type = ARMFault_AccessFlag;
11206    if ((attrs & (1 << 8)) == 0) {
11207        /* Access flag */
11208        goto do_fault;
11209    }
11210
11211    ap = extract32(attrs, 4, 2);
11212
11213    if (mmu_idx == ARMMMUIdx_Stage2) {
11214        ns = true;
11215        xn = extract32(attrs, 11, 2);
11216        *prot = get_S2prot(env, ap, xn, s1_is_el0);
11217    } else {
11218        ns = extract32(attrs, 3, 1);
11219        xn = extract32(attrs, 12, 1);
11220        pxn = extract32(attrs, 11, 1);
11221        *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
11222    }
11223
11224    fault_type = ARMFault_Permission;
11225    if (!(*prot & (1 << access_type))) {
11226        goto do_fault;
11227    }
11228
11229    if (ns) {
11230        /* The NS bit will (as required by the architecture) have no effect if
11231         * the CPU doesn't support TZ or this is a non-secure translation
11232         * regime, because the attribute will already be non-secure.
11233         */
11234        txattrs->secure = false;
11235    }
11236    /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
11237    if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
11238        arm_tlb_bti_gp(txattrs) = true;
11239    }
11240
11241    if (mmu_idx == ARMMMUIdx_Stage2) {
11242        cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
11243    } else {
11244        /* Index into MAIR registers for cache attributes */
11245        uint8_t attrindx = extract32(attrs, 0, 3);
11246        uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
11247        assert(attrindx <= 7);
11248        cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
11249    }
11250    cacheattrs->shareability = extract32(attrs, 6, 2);
11251
11252    *phys_ptr = descaddr;
11253    *page_size_ptr = page_size;
11254    return false;
11255
11256do_fault:
11257    fi->type = fault_type;
11258    fi->level = level;
11259    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
11260    fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2);
11261    return true;
11262}
11263
11264static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
11265                                                ARMMMUIdx mmu_idx,
11266                                                int32_t address, int *prot)
11267{
11268    if (!arm_feature(env, ARM_FEATURE_M)) {
11269        *prot = PAGE_READ | PAGE_WRITE;
11270        switch (address) {
11271        case 0xF0000000 ... 0xFFFFFFFF:
11272            if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
11273                /* hivecs execing is ok */
11274                *prot |= PAGE_EXEC;
11275            }
11276            break;
11277        case 0x00000000 ... 0x7FFFFFFF:
11278            *prot |= PAGE_EXEC;
11279            break;
11280        }
11281    } else {
11282        /* Default system address map for M profile cores.
11283         * The architecture specifies which regions are execute-never;
11284         * at the MPU level no other checks are defined.
11285         */
11286        switch (address) {
11287        case 0x00000000 ... 0x1fffffff: /* ROM */
11288        case 0x20000000 ... 0x3fffffff: /* SRAM */
11289        case 0x60000000 ... 0x7fffffff: /* RAM */
11290        case 0x80000000 ... 0x9fffffff: /* RAM */
11291            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11292            break;
11293        case 0x40000000 ... 0x5fffffff: /* Peripheral */
11294        case 0xa0000000 ... 0xbfffffff: /* Device */
11295        case 0xc0000000 ... 0xdfffffff: /* Device */
11296        case 0xe0000000 ... 0xffffffff: /* System */
11297            *prot = PAGE_READ | PAGE_WRITE;
11298            break;
11299        default:
11300            g_assert_not_reached();
11301        }
11302    }
11303}
11304
11305static bool pmsav7_use_background_region(ARMCPU *cpu,
11306                                         ARMMMUIdx mmu_idx, bool is_user)
11307{
11308    /* Return true if we should use the default memory map as a
11309     * "background" region if there are no hits against any MPU regions.
11310     */
11311    CPUARMState *env = &cpu->env;
11312
11313    if (is_user) {
11314        return false;
11315    }
11316
11317    if (arm_feature(env, ARM_FEATURE_M)) {
11318        return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
11319            & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
11320    } else {
11321        return regime_sctlr(env, mmu_idx) & SCTLR_BR;
11322    }
11323}
11324
11325static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
11326{
11327    /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
11328    return arm_feature(env, ARM_FEATURE_M) &&
11329        extract32(address, 20, 12) == 0xe00;
11330}
11331
11332static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
11333{
11334    /* True if address is in the M profile system region
11335     * 0xe0000000 - 0xffffffff
11336     */
11337    return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
11338}
11339
11340static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
11341                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11342                                 hwaddr *phys_ptr, int *prot,
11343                                 target_ulong *page_size,
11344                                 ARMMMUFaultInfo *fi)
11345{
11346    ARMCPU *cpu = env_archcpu(env);
11347    int n;
11348    bool is_user = regime_is_user(env, mmu_idx);
11349
11350    *phys_ptr = address;
11351    *page_size = TARGET_PAGE_SIZE;
11352    *prot = 0;
11353
11354    if (regime_translation_disabled(env, mmu_idx) ||
11355        m_is_ppb_region(env, address)) {
11356        /* MPU disabled or M profile PPB access: use default memory map.
11357         * The other case which uses the default memory map in the
11358         * v7M ARM ARM pseudocode is exception vector reads from the vector
11359         * table. In QEMU those accesses are done in arm_v7m_load_vector(),
11360         * which always does a direct read using address_space_ldl(), rather
11361         * than going via this function, so we don't need to check that here.
11362         */
11363        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11364    } else { /* MPU enabled */
11365        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11366            /* region search */
11367            uint32_t base = env->pmsav7.drbar[n];
11368            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
11369            uint32_t rmask;
11370            bool srdis = false;
11371
11372            if (!(env->pmsav7.drsr[n] & 0x1)) {
11373                continue;
11374            }
11375
11376            if (!rsize) {
11377                qemu_log_mask(LOG_GUEST_ERROR,
11378                              "DRSR[%d]: Rsize field cannot be 0\n", n);
11379                continue;
11380            }
11381            rsize++;
11382            rmask = (1ull << rsize) - 1;
11383
11384            if (base & rmask) {
11385                qemu_log_mask(LOG_GUEST_ERROR,
11386                              "DRBAR[%d]: 0x%" PRIx32 " misaligned "
11387                              "to DRSR region size, mask = 0x%" PRIx32 "\n",
11388                              n, base, rmask);
11389                continue;
11390            }
11391
11392            if (address < base || address > base + rmask) {
11393                /*
11394                 * Address not in this region. We must check whether the
11395                 * region covers addresses in the same page as our address.
11396                 * In that case we must not report a size that covers the
11397                 * whole page for a subsequent hit against a different MPU
11398                 * region or the background region, because it would result in
11399                 * incorrect TLB hits for subsequent accesses to addresses that
11400                 * are in this MPU region.
11401                 */
11402                if (ranges_overlap(base, rmask,
11403                                   address & TARGET_PAGE_MASK,
11404                                   TARGET_PAGE_SIZE)) {
11405                    *page_size = 1;
11406                }
11407                continue;
11408            }
11409
11410            /* Region matched */
11411
11412            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
11413                int i, snd;
11414                uint32_t srdis_mask;
11415
11416                rsize -= 3; /* sub region size (power of 2) */
11417                snd = ((address - base) >> rsize) & 0x7;
11418                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
11419
11420                srdis_mask = srdis ? 0x3 : 0x0;
11421                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
11422                    /* This will check in groups of 2, 4 and then 8, whether
11423                     * the subregion bits are consistent. rsize is incremented
11424                     * back up to give the region size, considering consistent
11425                     * adjacent subregions as one region. Stop testing if rsize
11426                     * is already big enough for an entire QEMU page.
11427                     */
11428                    int snd_rounded = snd & ~(i - 1);
11429                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
11430                                                     snd_rounded + 8, i);
11431                    if (srdis_mask ^ srdis_multi) {
11432                        break;
11433                    }
11434                    srdis_mask = (srdis_mask << i) | srdis_mask;
11435                    rsize++;
11436                }
11437            }
11438            if (srdis) {
11439                continue;
11440            }
11441            if (rsize < TARGET_PAGE_BITS) {
11442                *page_size = 1 << rsize;
11443            }
11444            break;
11445        }
11446
11447        if (n == -1) { /* no hits */
11448            if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11449                /* background fault */
11450                fi->type = ARMFault_Background;
11451                return true;
11452            }
11453            get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11454        } else { /* a MPU hit! */
11455            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
11456            uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
11457
11458            if (m_is_system_region(env, address)) {
11459                /* System space is always execute never */
11460                xn = 1;
11461            }
11462
11463            if (is_user) { /* User mode AP bit decoding */
11464                switch (ap) {
11465                case 0:
11466                case 1:
11467                case 5:
11468                    break; /* no access */
11469                case 3:
11470                    *prot |= PAGE_WRITE;
11471                    /* fall through */
11472                case 2:
11473                case 6:
11474                    *prot |= PAGE_READ | PAGE_EXEC;
11475                    break;
11476                case 7:
11477                    /* for v7M, same as 6; for R profile a reserved value */
11478                    if (arm_feature(env, ARM_FEATURE_M)) {
11479                        *prot |= PAGE_READ | PAGE_EXEC;
11480                        break;
11481                    }
11482                    /* fall through */
11483                default:
11484                    qemu_log_mask(LOG_GUEST_ERROR,
11485                                  "DRACR[%d]: Bad value for AP bits: 0x%"
11486                                  PRIx32 "\n", n, ap);
11487                }
11488            } else { /* Priv. mode AP bits decoding */
11489                switch (ap) {
11490                case 0:
11491                    break; /* no access */
11492                case 1:
11493                case 2:
11494                case 3:
11495                    *prot |= PAGE_WRITE;
11496                    /* fall through */
11497                case 5:
11498                case 6:
11499                    *prot |= PAGE_READ | PAGE_EXEC;
11500                    break;
11501                case 7:
11502                    /* for v7M, same as 6; for R profile a reserved value */
11503                    if (arm_feature(env, ARM_FEATURE_M)) {
11504                        *prot |= PAGE_READ | PAGE_EXEC;
11505                        break;
11506                    }
11507                    /* fall through */
11508                default:
11509                    qemu_log_mask(LOG_GUEST_ERROR,
11510                                  "DRACR[%d]: Bad value for AP bits: 0x%"
11511                                  PRIx32 "\n", n, ap);
11512                }
11513            }
11514
11515            /* execute never */
11516            if (xn) {
11517                *prot &= ~PAGE_EXEC;
11518            }
11519        }
11520    }
11521
11522    fi->type = ARMFault_Permission;
11523    fi->level = 1;
11524    return !(*prot & (1 << access_type));
11525}
11526
11527static bool v8m_is_sau_exempt(CPUARMState *env,
11528                              uint32_t address, MMUAccessType access_type)
11529{
11530    /* The architecture specifies that certain address ranges are
11531     * exempt from v8M SAU/IDAU checks.
11532     */
11533    return
11534        (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
11535        (address >= 0xe0000000 && address <= 0xe0002fff) ||
11536        (address >= 0xe000e000 && address <= 0xe000efff) ||
11537        (address >= 0xe002e000 && address <= 0xe002efff) ||
11538        (address >= 0xe0040000 && address <= 0xe0041fff) ||
11539        (address >= 0xe00ff000 && address <= 0xe00fffff);
11540}
11541
11542void v8m_security_lookup(CPUARMState *env, uint32_t address,
11543                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
11544                                V8M_SAttributes *sattrs)
11545{
11546    /* Look up the security attributes for this address. Compare the
11547     * pseudocode SecurityCheck() function.
11548     * We assume the caller has zero-initialized *sattrs.
11549     */
11550    ARMCPU *cpu = env_archcpu(env);
11551    int r;
11552    bool idau_exempt = false, idau_ns = true, idau_nsc = true;
11553    int idau_region = IREGION_NOTVALID;
11554    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11555    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11556
11557    if (cpu->idau) {
11558        IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
11559        IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
11560
11561        iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
11562                   &idau_nsc);
11563    }
11564
11565    if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
11566        /* 0xf0000000..0xffffffff is always S for insn fetches */
11567        return;
11568    }
11569
11570    if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
11571        sattrs->ns = !regime_is_secure(env, mmu_idx);
11572        return;
11573    }
11574
11575    if (idau_region != IREGION_NOTVALID) {
11576        sattrs->irvalid = true;
11577        sattrs->iregion = idau_region;
11578    }
11579
11580    switch (env->sau.ctrl & 3) {
11581    case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
11582        break;
11583    case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
11584        sattrs->ns = true;
11585        break;
11586    default: /* SAU.ENABLE == 1 */
11587        for (r = 0; r < cpu->sau_sregion; r++) {
11588            if (env->sau.rlar[r] & 1) {
11589                uint32_t base = env->sau.rbar[r] & ~0x1f;
11590                uint32_t limit = env->sau.rlar[r] | 0x1f;
11591
11592                if (base <= address && limit >= address) {
11593                    if (base > addr_page_base || limit < addr_page_limit) {
11594                        sattrs->subpage = true;
11595                    }
11596                    if (sattrs->srvalid) {
11597                        /* If we hit in more than one region then we must report
11598                         * as Secure, not NS-Callable, with no valid region
11599                         * number info.
11600                         */
11601                        sattrs->ns = false;
11602                        sattrs->nsc = false;
11603                        sattrs->sregion = 0;
11604                        sattrs->srvalid = false;
11605                        break;
11606                    } else {
11607                        if (env->sau.rlar[r] & 2) {
11608                            sattrs->nsc = true;
11609                        } else {
11610                            sattrs->ns = true;
11611                        }
11612                        sattrs->srvalid = true;
11613                        sattrs->sregion = r;
11614                    }
11615                } else {
11616                    /*
11617                     * Address not in this region. We must check whether the
11618                     * region covers addresses in the same page as our address.
11619                     * In that case we must not report a size that covers the
11620                     * whole page for a subsequent hit against a different MPU
11621                     * region or the background region, because it would result
11622                     * in incorrect TLB hits for subsequent accesses to
11623                     * addresses that are in this MPU region.
11624                     */
11625                    if (limit >= base &&
11626                        ranges_overlap(base, limit - base + 1,
11627                                       addr_page_base,
11628                                       TARGET_PAGE_SIZE)) {
11629                        sattrs->subpage = true;
11630                    }
11631                }
11632            }
11633        }
11634        break;
11635    }
11636
11637    /*
11638     * The IDAU will override the SAU lookup results if it specifies
11639     * higher security than the SAU does.
11640     */
11641    if (!idau_ns) {
11642        if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
11643            sattrs->ns = false;
11644            sattrs->nsc = idau_nsc;
11645        }
11646    }
11647}
11648
11649bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
11650                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
11651                              hwaddr *phys_ptr, MemTxAttrs *txattrs,
11652                              int *prot, bool *is_subpage,
11653                              ARMMMUFaultInfo *fi, uint32_t *mregion)
11654{
11655    /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
11656     * that a full phys-to-virt translation does).
11657     * mregion is (if not NULL) set to the region number which matched,
11658     * or -1 if no region number is returned (MPU off, address did not
11659     * hit a region, address hit in multiple regions).
11660     * We set is_subpage to true if the region hit doesn't cover the
11661     * entire TARGET_PAGE the address is within.
11662     */
11663    ARMCPU *cpu = env_archcpu(env);
11664    bool is_user = regime_is_user(env, mmu_idx);
11665    uint32_t secure = regime_is_secure(env, mmu_idx);
11666    int n;
11667    int matchregion = -1;
11668    bool hit = false;
11669    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
11670    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
11671
11672    *is_subpage = false;
11673    *phys_ptr = address;
11674    *prot = 0;
11675    if (mregion) {
11676        *mregion = -1;
11677    }
11678
11679    /* Unlike the ARM ARM pseudocode, we don't need to check whether this
11680     * was an exception vector read from the vector table (which is always
11681     * done using the default system address map), because those accesses
11682     * are done in arm_v7m_load_vector(), which always does a direct
11683     * read using address_space_ldl(), rather than going via this function.
11684     */
11685    if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
11686        hit = true;
11687    } else if (m_is_ppb_region(env, address)) {
11688        hit = true;
11689    } else {
11690        if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
11691            hit = true;
11692        }
11693
11694        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
11695            /* region search */
11696            /* Note that the base address is bits [31:5] from the register
11697             * with bits [4:0] all zeroes, but the limit address is bits
11698             * [31:5] from the register with bits [4:0] all ones.
11699             */
11700            uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
11701            uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
11702
11703            if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
11704                /* Region disabled */
11705                continue;
11706            }
11707
11708            if (address < base || address > limit) {
11709                /*
11710                 * Address not in this region. We must check whether the
11711                 * region covers addresses in the same page as our address.
11712                 * In that case we must not report a size that covers the
11713                 * whole page for a subsequent hit against a different MPU
11714                 * region or the background region, because it would result in
11715                 * incorrect TLB hits for subsequent accesses to addresses that
11716                 * are in this MPU region.
11717                 */
11718                if (limit >= base &&
11719                    ranges_overlap(base, limit - base + 1,
11720                                   addr_page_base,
11721                                   TARGET_PAGE_SIZE)) {
11722                    *is_subpage = true;
11723                }
11724                continue;
11725            }
11726
11727            if (base > addr_page_base || limit < addr_page_limit) {
11728                *is_subpage = true;
11729            }
11730
11731            if (matchregion != -1) {
11732                /* Multiple regions match -- always a failure (unlike
11733                 * PMSAv7 where highest-numbered-region wins)
11734                 */
11735                fi->type = ARMFault_Permission;
11736                fi->level = 1;
11737                return true;
11738            }
11739
11740            matchregion = n;
11741            hit = true;
11742        }
11743    }
11744
11745    if (!hit) {
11746        /* background fault */
11747        fi->type = ARMFault_Background;
11748        return true;
11749    }
11750
11751    if (matchregion == -1) {
11752        /* hit using the background region */
11753        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
11754    } else {
11755        uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
11756        uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
11757
11758        if (m_is_system_region(env, address)) {
11759            /* System space is always execute never */
11760            xn = 1;
11761        }
11762
11763        *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
11764        if (*prot && !xn) {
11765            *prot |= PAGE_EXEC;
11766        }
11767        /* We don't need to look the attribute up in the MAIR0/MAIR1
11768         * registers because that only tells us about cacheability.
11769         */
11770        if (mregion) {
11771            *mregion = matchregion;
11772        }
11773    }
11774
11775    fi->type = ARMFault_Permission;
11776    fi->level = 1;
11777    return !(*prot & (1 << access_type));
11778}
11779
11780
11781static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
11782                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11783                                 hwaddr *phys_ptr, MemTxAttrs *txattrs,
11784                                 int *prot, target_ulong *page_size,
11785                                 ARMMMUFaultInfo *fi)
11786{
11787    uint32_t secure = regime_is_secure(env, mmu_idx);
11788    V8M_SAttributes sattrs = {};
11789    bool ret;
11790    bool mpu_is_subpage;
11791
11792    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
11793        v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
11794        if (access_type == MMU_INST_FETCH) {
11795            /* Instruction fetches always use the MMU bank and the
11796             * transaction attribute determined by the fetch address,
11797             * regardless of CPU state. This is painful for QEMU
11798             * to handle, because it would mean we need to encode
11799             * into the mmu_idx not just the (user, negpri) information
11800             * for the current security state but also that for the
11801             * other security state, which would balloon the number
11802             * of mmu_idx values needed alarmingly.
11803             * Fortunately we can avoid this because it's not actually
11804             * possible to arbitrarily execute code from memory with
11805             * the wrong security attribute: it will always generate
11806             * an exception of some kind or another, apart from the
11807             * special case of an NS CPU executing an SG instruction
11808             * in S&NSC memory. So we always just fail the translation
11809             * here and sort things out in the exception handler
11810             * (including possibly emulating an SG instruction).
11811             */
11812            if (sattrs.ns != !secure) {
11813                if (sattrs.nsc) {
11814                    fi->type = ARMFault_QEMU_NSCExec;
11815                } else {
11816                    fi->type = ARMFault_QEMU_SFault;
11817                }
11818                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
11819                *phys_ptr = address;
11820                *prot = 0;
11821                return true;
11822            }
11823        } else {
11824            /* For data accesses we always use the MMU bank indicated
11825             * by the current CPU state, but the security attributes
11826             * might downgrade a secure access to nonsecure.
11827             */
11828            if (sattrs.ns) {
11829                txattrs->secure = false;
11830            } else if (!secure) {
11831                /* NS access to S memory must fault.
11832                 * Architecturally we should first check whether the
11833                 * MPU information for this address indicates that we
11834                 * are doing an unaligned access to Device memory, which
11835                 * should generate a UsageFault instead. QEMU does not
11836                 * currently check for that kind of unaligned access though.
11837                 * If we added it we would need to do so as a special case
11838                 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
11839                 */
11840                fi->type = ARMFault_QEMU_SFault;
11841                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
11842                *phys_ptr = address;
11843                *prot = 0;
11844                return true;
11845            }
11846        }
11847    }
11848
11849    ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
11850                            txattrs, prot, &mpu_is_subpage, fi, NULL);
11851    *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
11852    return ret;
11853}
11854
11855static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
11856                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
11857                                 hwaddr *phys_ptr, int *prot,
11858                                 ARMMMUFaultInfo *fi)
11859{
11860    int n;
11861    uint32_t mask;
11862    uint32_t base;
11863    bool is_user = regime_is_user(env, mmu_idx);
11864
11865    if (regime_translation_disabled(env, mmu_idx)) {
11866        /* MPU disabled.  */
11867        *phys_ptr = address;
11868        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
11869        return false;
11870    }
11871
11872    *phys_ptr = address;
11873    for (n = 7; n >= 0; n--) {
11874        base = env->cp15.c6_region[n];
11875        if ((base & 1) == 0) {
11876            continue;
11877        }
11878        mask = 1 << ((base >> 1) & 0x1f);
11879        /* Keep this shift separate from the above to avoid an
11880           (undefined) << 32.  */
11881        mask = (mask << 1) - 1;
11882        if (((base ^ address) & ~mask) == 0) {
11883            break;
11884        }
11885    }
11886    if (n < 0) {
11887        fi->type = ARMFault_Background;
11888        return true;
11889    }
11890
11891    if (access_type == MMU_INST_FETCH) {
11892        mask = env->cp15.pmsav5_insn_ap;
11893    } else {
11894        mask = env->cp15.pmsav5_data_ap;
11895    }
11896    mask = (mask >> (n * 4)) & 0xf;
11897    switch (mask) {
11898    case 0:
11899        fi->type = ARMFault_Permission;
11900        fi->level = 1;
11901        return true;
11902    case 1:
11903        if (is_user) {
11904            fi->type = ARMFault_Permission;
11905            fi->level = 1;
11906            return true;
11907        }
11908        *prot = PAGE_READ | PAGE_WRITE;
11909        break;
11910    case 2:
11911        *prot = PAGE_READ;
11912        if (!is_user) {
11913            *prot |= PAGE_WRITE;
11914        }
11915        break;
11916    case 3:
11917        *prot = PAGE_READ | PAGE_WRITE;
11918        break;
11919    case 5:
11920        if (is_user) {
11921            fi->type = ARMFault_Permission;
11922            fi->level = 1;
11923            return true;
11924        }
11925        *prot = PAGE_READ;
11926        break;
11927    case 6:
11928        *prot = PAGE_READ;
11929        break;
11930    default:
11931        /* Bad permission.  */
11932        fi->type = ARMFault_Permission;
11933        fi->level = 1;
11934        return true;
11935    }
11936    *prot |= PAGE_EXEC;
11937    return false;
11938}
11939
11940/* Combine either inner or outer cacheability attributes for normal
11941 * memory, according to table D4-42 and pseudocode procedure
11942 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
11943 *
11944 * NB: only stage 1 includes allocation hints (RW bits), leading to
11945 * some asymmetry.
11946 */
11947static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
11948{
11949    if (s1 == 4 || s2 == 4) {
11950        /* non-cacheable has precedence */
11951        return 4;
11952    } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
11953        /* stage 1 write-through takes precedence */
11954        return s1;
11955    } else if (extract32(s2, 2, 2) == 2) {
11956        /* stage 2 write-through takes precedence, but the allocation hint
11957         * is still taken from stage 1
11958         */
11959        return (2 << 2) | extract32(s1, 0, 2);
11960    } else { /* write-back */
11961        return s1;
11962    }
11963}
11964
11965/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
11966 * and CombineS1S2Desc()
11967 *
11968 * @s1:      Attributes from stage 1 walk
11969 * @s2:      Attributes from stage 2 walk
11970 */
11971static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
11972{
11973    uint8_t s1lo, s2lo, s1hi, s2hi;
11974    ARMCacheAttrs ret;
11975    bool tagged = false;
11976
11977    if (s1.attrs == 0xf0) {
11978        tagged = true;
11979        s1.attrs = 0xff;
11980    }
11981
11982    s1lo = extract32(s1.attrs, 0, 4);
11983    s2lo = extract32(s2.attrs, 0, 4);
11984    s1hi = extract32(s1.attrs, 4, 4);
11985    s2hi = extract32(s2.attrs, 4, 4);
11986
11987    /* Combine shareability attributes (table D4-43) */
11988    if (s1.shareability == 2 || s2.shareability == 2) {
11989        /* if either are outer-shareable, the result is outer-shareable */
11990        ret.shareability = 2;
11991    } else if (s1.shareability == 3 || s2.shareability == 3) {
11992        /* if either are inner-shareable, the result is inner-shareable */
11993        ret.shareability = 3;
11994    } else {
11995        /* both non-shareable */
11996        ret.shareability = 0;
11997    }
11998
11999    /* Combine memory type and cacheability attributes */
12000    if (s1hi == 0 || s2hi == 0) {
12001        /* Device has precedence over normal */
12002        if (s1lo == 0 || s2lo == 0) {
12003            /* nGnRnE has precedence over anything */
12004            ret.attrs = 0;
12005        } else if (s1lo == 4 || s2lo == 4) {
12006            /* non-Reordering has precedence over Reordering */
12007            ret.attrs = 4;  /* nGnRE */
12008        } else if (s1lo == 8 || s2lo == 8) {
12009            /* non-Gathering has precedence over Gathering */
12010            ret.attrs = 8;  /* nGRE */
12011        } else {
12012            ret.attrs = 0xc; /* GRE */
12013        }
12014
12015        /* Any location for which the resultant memory type is any
12016         * type of Device memory is always treated as Outer Shareable.
12017         */
12018        ret.shareability = 2;
12019    } else { /* Normal memory */
12020        /* Outer/inner cacheability combine independently */
12021        ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
12022                  | combine_cacheattr_nibble(s1lo, s2lo);
12023
12024        if (ret.attrs == 0x44) {
12025            /* Any location for which the resultant memory type is Normal
12026             * Inner Non-cacheable, Outer Non-cacheable is always treated
12027             * as Outer Shareable.
12028             */
12029            ret.shareability = 2;
12030        }
12031    }
12032
12033    /* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
12034    if (tagged && ret.attrs == 0xff) {
12035        ret.attrs = 0xf0;
12036    }
12037
12038    return ret;
12039}
12040
12041
12042/* get_phys_addr - get the physical address for this virtual address
12043 *
12044 * Find the physical address corresponding to the given virtual address,
12045 * by doing a translation table walk on MMU based systems or using the
12046 * MPU state on MPU based systems.
12047 *
12048 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
12049 * prot and page_size may not be filled in, and the populated fsr value provides
12050 * information on why the translation aborted, in the format of a
12051 * DFSR/IFSR fault register, with the following caveats:
12052 *  * we honour the short vs long DFSR format differences.
12053 *  * the WnR bit is never set (the caller must do this).
12054 *  * for PSMAv5 based systems we don't bother to return a full FSR format
12055 *    value.
12056 *
12057 * @env: CPUARMState
12058 * @address: virtual address to get physical address for
12059 * @access_type: 0 for read, 1 for write, 2 for execute
12060 * @mmu_idx: MMU index indicating required translation regime
12061 * @phys_ptr: set to the physical address corresponding to the virtual address
12062 * @attrs: set to the memory transaction attributes to use
12063 * @prot: set to the permissions for the page containing phys_ptr
12064 * @page_size: set to the size of the page containing phys_ptr
12065 * @fi: set to fault info if the translation fails
12066 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
12067 */
12068bool get_phys_addr(CPUARMState *env, target_ulong address,
12069                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
12070                   hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
12071                   target_ulong *page_size,
12072                   ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
12073{
12074    if (mmu_idx == ARMMMUIdx_E10_0 ||
12075        mmu_idx == ARMMMUIdx_E10_1 ||
12076        mmu_idx == ARMMMUIdx_E10_1_PAN) {
12077        /* Call ourselves recursively to do the stage 1 and then stage 2
12078         * translations.
12079         */
12080        if (arm_feature(env, ARM_FEATURE_EL2)) {
12081            hwaddr ipa;
12082            int s2_prot;
12083            int ret;
12084            ARMCacheAttrs cacheattrs2 = {};
12085
12086            ret = get_phys_addr(env, address, access_type,
12087                                stage_1_mmu_idx(mmu_idx), &ipa, attrs,
12088                                prot, page_size, fi, cacheattrs);
12089
12090            /* If S1 fails or S2 is disabled, return early.  */
12091            if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
12092                *phys_ptr = ipa;
12093                return ret;
12094            }
12095
12096            /* S1 is done. Now do S2 translation.  */
12097            ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
12098                                     mmu_idx == ARMMMUIdx_E10_0,
12099                                     phys_ptr, attrs, &s2_prot,
12100                                     page_size, fi, &cacheattrs2);
12101            fi->s2addr = ipa;
12102            /* Combine the S1 and S2 perms.  */
12103            *prot &= s2_prot;
12104
12105            /* If S2 fails, return early.  */
12106            if (ret) {
12107                return ret;
12108            }
12109
12110            /* Combine the S1 and S2 cache attributes. */
12111            if (env->cp15.hcr_el2 & HCR_DC) {
12112                /*
12113                 * HCR.DC forces the first stage attributes to
12114                 *  Normal Non-Shareable,
12115                 *  Inner Write-Back Read-Allocate Write-Allocate,
12116                 *  Outer Write-Back Read-Allocate Write-Allocate.
12117                 * Do not overwrite Tagged within attrs.
12118                 */
12119                if (cacheattrs->attrs != 0xf0) {
12120                    cacheattrs->attrs = 0xff;
12121                }
12122                cacheattrs->shareability = 0;
12123            }
12124            *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
12125            return 0;
12126        } else {
12127            /*
12128             * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
12129             */
12130            mmu_idx = stage_1_mmu_idx(mmu_idx);
12131        }
12132    }
12133
12134    /* The page table entries may downgrade secure to non-secure, but
12135     * cannot upgrade an non-secure translation regime's attributes
12136     * to secure.
12137     */
12138    attrs->secure = regime_is_secure(env, mmu_idx);
12139    attrs->user = regime_is_user(env, mmu_idx);
12140
12141    /* Fast Context Switch Extension. This doesn't exist at all in v8.
12142     * In v7 and earlier it affects all stage 1 translations.
12143     */
12144    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
12145        && !arm_feature(env, ARM_FEATURE_V8)) {
12146        if (regime_el(env, mmu_idx) == 3) {
12147            address += env->cp15.fcseidr_s;
12148        } else {
12149            address += env->cp15.fcseidr_ns;
12150        }
12151    }
12152
12153    if (arm_feature(env, ARM_FEATURE_PMSA)) {
12154        bool ret;
12155        *page_size = TARGET_PAGE_SIZE;
12156
12157        if (arm_feature(env, ARM_FEATURE_V8)) {
12158            /* PMSAv8 */
12159            ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
12160                                       phys_ptr, attrs, prot, page_size, fi);
12161        } else if (arm_feature(env, ARM_FEATURE_V7)) {
12162            /* PMSAv7 */
12163            ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
12164                                       phys_ptr, prot, page_size, fi);
12165        } else {
12166            /* Pre-v7 MPU */
12167            ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
12168                                       phys_ptr, prot, fi);
12169        }
12170        qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
12171                      " mmu_idx %u -> %s (prot %c%c%c)\n",
12172                      access_type == MMU_DATA_LOAD ? "reading" :
12173                      (access_type == MMU_DATA_STORE ? "writing" : "execute"),
12174                      (uint32_t)address, mmu_idx,
12175                      ret ? "Miss" : "Hit",
12176                      *prot & PAGE_READ ? 'r' : '-',
12177                      *prot & PAGE_WRITE ? 'w' : '-',
12178                      *prot & PAGE_EXEC ? 'x' : '-');
12179
12180        return ret;
12181    }
12182
12183    /* Definitely a real MMU, not an MPU */
12184
12185    if (regime_translation_disabled(env, mmu_idx)) {
12186        uint64_t hcr;
12187        uint8_t memattr;
12188
12189        /*
12190         * MMU disabled.  S1 addresses within aa64 translation regimes are
12191         * still checked for bounds -- see AArch64.TranslateAddressS1Off.
12192         */
12193        if (mmu_idx != ARMMMUIdx_Stage2) {
12194            int r_el = regime_el(env, mmu_idx);
12195            if (arm_el_is_aa64(env, r_el)) {
12196                int pamax = arm_pamax(env_archcpu(env));
12197                uint64_t tcr = env->cp15.tcr_el[r_el].raw_tcr;
12198                int addrtop, tbi;
12199
12200                tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
12201                if (access_type == MMU_INST_FETCH) {
12202                    tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
12203                }
12204                tbi = (tbi >> extract64(address, 55, 1)) & 1;
12205                addrtop = (tbi ? 55 : 63);
12206
12207                if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
12208                    fi->type = ARMFault_AddressSize;
12209                    fi->level = 0;
12210                    fi->stage2 = false;
12211                    return 1;
12212                }
12213
12214                /*
12215                 * When TBI is disabled, we've just validated that all of the
12216                 * bits above PAMax are zero, so logically we only need to
12217                 * clear the top byte for TBI.  But it's clearer to follow
12218                 * the pseudocode set of addrdesc.paddress.
12219                 */
12220                address = extract64(address, 0, 52);
12221            }
12222        }
12223        *phys_ptr = address;
12224        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
12225        *page_size = TARGET_PAGE_SIZE;
12226
12227        /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
12228        hcr = arm_hcr_el2_eff(env);
12229        cacheattrs->shareability = 0;
12230        if (hcr & HCR_DC) {
12231            if (hcr & HCR_DCT) {
12232                memattr = 0xf0;  /* Tagged, Normal, WB, RWA */
12233            } else {
12234                memattr = 0xff;  /* Normal, WB, RWA */
12235            }
12236        } else if (access_type == MMU_INST_FETCH) {
12237            if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
12238                memattr = 0xee;  /* Normal, WT, RA, NT */
12239            } else {
12240                memattr = 0x44;  /* Normal, NC, No */
12241            }
12242            cacheattrs->shareability = 2; /* outer sharable */
12243        } else {
12244            memattr = 0x00;      /* Device, nGnRnE */
12245        }
12246        cacheattrs->attrs = memattr;
12247        return 0;
12248    }
12249
12250    if (regime_using_lpae_format(env, mmu_idx)) {
12251        return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
12252                                  phys_ptr, attrs, prot, page_size,
12253                                  fi, cacheattrs);
12254    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
12255        return get_phys_addr_v6(env, address, access_type, mmu_idx,
12256                                phys_ptr, attrs, prot, page_size, fi);
12257    } else {
12258        return get_phys_addr_v5(env, address, access_type, mmu_idx,
12259                                    phys_ptr, prot, page_size, fi);
12260    }
12261}
12262
12263hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
12264                                         MemTxAttrs *attrs)
12265{
12266    ARMCPU *cpu = ARM_CPU(cs);
12267    CPUARMState *env = &cpu->env;
12268    hwaddr phys_addr;
12269    target_ulong page_size;
12270    int prot;
12271    bool ret;
12272    ARMMMUFaultInfo fi = {};
12273    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
12274    ARMCacheAttrs cacheattrs = {};
12275
12276    *attrs = (MemTxAttrs) {};
12277
12278    ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
12279                        attrs, &prot, &page_size, &fi, &cacheattrs);
12280
12281    if (ret) {
12282        return -1;
12283    }
12284    return phys_addr;
12285}
12286
12287#endif
12288
12289/* Note that signed overflow is undefined in C.  The following routines are
12290   careful to use unsigned types where modulo arithmetic is required.
12291   Failure to do so _will_ break on newer gcc.  */
12292
12293/* Signed saturating arithmetic.  */
12294
12295/* Perform 16-bit signed saturating addition.  */
12296static inline uint16_t add16_sat(uint16_t a, uint16_t b)
12297{
12298    uint16_t res;
12299
12300    res = a + b;
12301    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
12302        if (a & 0x8000)
12303            res = 0x8000;
12304        else
12305            res = 0x7fff;
12306    }
12307    return res;
12308}
12309
12310/* Perform 8-bit signed saturating addition.  */
12311static inline uint8_t add8_sat(uint8_t a, uint8_t b)
12312{
12313    uint8_t res;
12314
12315    res = a + b;
12316    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
12317        if (a & 0x80)
12318            res = 0x80;
12319        else
12320            res = 0x7f;
12321    }
12322    return res;
12323}
12324
12325/* Perform 16-bit signed saturating subtraction.  */
12326static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
12327{
12328    uint16_t res;
12329
12330    res = a - b;
12331    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
12332        if (a & 0x8000)
12333            res = 0x8000;
12334        else
12335            res = 0x7fff;
12336    }
12337    return res;
12338}
12339
12340/* Perform 8-bit signed saturating subtraction.  */
12341static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
12342{
12343    uint8_t res;
12344
12345    res = a - b;
12346    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
12347        if (a & 0x80)
12348            res = 0x80;
12349        else
12350            res = 0x7f;
12351    }
12352    return res;
12353}
12354
12355#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
12356#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
12357#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
12358#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
12359#define PFX q
12360
12361#include "op_addsub.h"
12362
12363/* Unsigned saturating arithmetic.  */
12364static inline uint16_t add16_usat(uint16_t a, uint16_t b)
12365{
12366    uint16_t res;
12367    res = a + b;
12368    if (res < a)
12369        res = 0xffff;
12370    return res;
12371}
12372
12373static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
12374{
12375    if (a > b)
12376        return a - b;
12377    else
12378        return 0;
12379}
12380
12381static inline uint8_t add8_usat(uint8_t a, uint8_t b)
12382{
12383    uint8_t res;
12384    res = a + b;
12385    if (res < a)
12386        res = 0xff;
12387    return res;
12388}
12389
12390static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
12391{
12392    if (a > b)
12393        return a - b;
12394    else
12395        return 0;
12396}
12397
12398#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
12399#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
12400#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
12401#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
12402#define PFX uq
12403
12404#include "op_addsub.h"
12405
12406/* Signed modulo arithmetic.  */
12407#define SARITH16(a, b, n, op) do { \
12408    int32_t sum; \
12409    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
12410    RESULT(sum, n, 16); \
12411    if (sum >= 0) \
12412        ge |= 3 << (n * 2); \
12413    } while(0)
12414
12415#define SARITH8(a, b, n, op) do { \
12416    int32_t sum; \
12417    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
12418    RESULT(sum, n, 8); \
12419    if (sum >= 0) \
12420        ge |= 1 << n; \
12421    } while(0)
12422
12423
12424#define ADD16(a, b, n) SARITH16(a, b, n, +)
12425#define SUB16(a, b, n) SARITH16(a, b, n, -)
12426#define ADD8(a, b, n)  SARITH8(a, b, n, +)
12427#define SUB8(a, b, n)  SARITH8(a, b, n, -)
12428#define PFX s
12429#define ARITH_GE
12430
12431#include "op_addsub.h"
12432
12433/* Unsigned modulo arithmetic.  */
12434#define ADD16(a, b, n) do { \
12435    uint32_t sum; \
12436    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
12437    RESULT(sum, n, 16); \
12438    if ((sum >> 16) == 1) \
12439        ge |= 3 << (n * 2); \
12440    } while(0)
12441
12442#define ADD8(a, b, n) do { \
12443    uint32_t sum; \
12444    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
12445    RESULT(sum, n, 8); \
12446    if ((sum >> 8) == 1) \
12447        ge |= 1 << n; \
12448    } while(0)
12449
12450#define SUB16(a, b, n) do { \
12451    uint32_t sum; \
12452    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
12453    RESULT(sum, n, 16); \
12454    if ((sum >> 16) == 0) \
12455        ge |= 3 << (n * 2); \
12456    } while(0)
12457
12458#define SUB8(a, b, n) do { \
12459    uint32_t sum; \
12460    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
12461    RESULT(sum, n, 8); \
12462    if ((sum >> 8) == 0) \
12463        ge |= 1 << n; \
12464    } while(0)
12465
12466#define PFX u
12467#define ARITH_GE
12468
12469#include "op_addsub.h"
12470
12471/* Halved signed arithmetic.  */
12472#define ADD16(a, b, n) \
12473  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
12474#define SUB16(a, b, n) \
12475  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
12476#define ADD8(a, b, n) \
12477  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
12478#define SUB8(a, b, n) \
12479  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
12480#define PFX sh
12481
12482#include "op_addsub.h"
12483
12484/* Halved unsigned arithmetic.  */
12485#define ADD16(a, b, n) \
12486  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12487#define SUB16(a, b, n) \
12488  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
12489#define ADD8(a, b, n) \
12490  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12491#define SUB8(a, b, n) \
12492  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
12493#define PFX uh
12494
12495#include "op_addsub.h"
12496
12497static inline uint8_t do_usad(uint8_t a, uint8_t b)
12498{
12499    if (a > b)
12500        return a - b;
12501    else
12502        return b - a;
12503}
12504
12505/* Unsigned sum of absolute byte differences.  */
12506uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
12507{
12508    uint32_t sum;
12509    sum = do_usad(a, b);
12510    sum += do_usad(a >> 8, b >> 8);
12511    sum += do_usad(a >> 16, b >> 16);
12512    sum += do_usad(a >> 24, b >> 24);
12513    return sum;
12514}
12515
12516/* For ARMv6 SEL instruction.  */
12517uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
12518{
12519    uint32_t mask;
12520
12521    mask = 0;
12522    if (flags & 1)
12523        mask |= 0xff;
12524    if (flags & 2)
12525        mask |= 0xff00;
12526    if (flags & 4)
12527        mask |= 0xff0000;
12528    if (flags & 8)
12529        mask |= 0xff000000;
12530    return (a & mask) | (b & ~mask);
12531}
12532
12533/* CRC helpers.
12534 * The upper bytes of val (above the number specified by 'bytes') must have
12535 * been zeroed out by the caller.
12536 */
12537uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
12538{
12539    uint8_t buf[4];
12540
12541    stl_le_p(buf, val);
12542
12543    /* zlib crc32 converts the accumulator and output to one's complement.  */
12544    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
12545}
12546
12547uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
12548{
12549    uint8_t buf[4];
12550
12551    stl_le_p(buf, val);
12552
12553    /* Linux crc32c converts the output to one's complement.  */
12554    return crc32c(acc, buf, bytes) ^ 0xffffffff;
12555}
12556
12557/* Return the exception level to which FP-disabled exceptions should
12558 * be taken, or 0 if FP is enabled.
12559 */
12560int fp_exception_el(CPUARMState *env, int cur_el)
12561{
12562#ifndef CONFIG_USER_ONLY
12563    /* CPACR and the CPTR registers don't exist before v6, so FP is
12564     * always accessible
12565     */
12566    if (!arm_feature(env, ARM_FEATURE_V6)) {
12567        return 0;
12568    }
12569
12570    if (arm_feature(env, ARM_FEATURE_M)) {
12571        /* CPACR can cause a NOCP UsageFault taken to current security state */
12572        if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
12573            return 1;
12574        }
12575
12576        if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
12577            if (!extract32(env->v7m.nsacr, 10, 1)) {
12578                /* FP insns cause a NOCP UsageFault taken to Secure */
12579                return 3;
12580            }
12581        }
12582
12583        return 0;
12584    }
12585
12586    /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
12587     * 0, 2 : trap EL0 and EL1/PL1 accesses
12588     * 1    : trap only EL0 accesses
12589     * 3    : trap no accesses
12590     * This register is ignored if E2H+TGE are both set.
12591     */
12592    if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12593        int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
12594
12595        switch (fpen) {
12596        case 0:
12597        case 2:
12598            if (cur_el == 0 || cur_el == 1) {
12599                /* Trap to PL1, which might be EL1 or EL3 */
12600                if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
12601                    return 3;
12602                }
12603                return 1;
12604            }
12605            if (cur_el == 3 && !is_a64(env)) {
12606                /* Secure PL1 running at EL3 */
12607                return 3;
12608            }
12609            break;
12610        case 1:
12611            if (cur_el == 0) {
12612                return 1;
12613            }
12614            break;
12615        case 3:
12616            break;
12617        }
12618    }
12619
12620    /*
12621     * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
12622     * to control non-secure access to the FPU. It doesn't have any
12623     * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
12624     */
12625    if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
12626         cur_el <= 2 && !arm_is_secure_below_el3(env))) {
12627        if (!extract32(env->cp15.nsacr, 10, 1)) {
12628            /* FP insns act as UNDEF */
12629            return cur_el == 2 ? 2 : 1;
12630        }
12631    }
12632
12633    /* For the CPTR registers we don't need to guard with an ARM_FEATURE
12634     * check because zero bits in the registers mean "don't trap".
12635     */
12636
12637    /* CPTR_EL2 : present in v7VE or v8 */
12638    if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
12639        && !arm_is_secure_below_el3(env)) {
12640        /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
12641        return 2;
12642    }
12643
12644    /* CPTR_EL3 : present in v8 */
12645    if (extract32(env->cp15.cptr_el[3], 10, 1)) {
12646        /* Trap all FP ops to EL3 */
12647        return 3;
12648    }
12649#endif
12650    return 0;
12651}
12652
12653/* Return the exception level we're running at if this is our mmu_idx */
12654int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
12655{
12656    if (mmu_idx & ARM_MMU_IDX_M) {
12657        return mmu_idx & ARM_MMU_IDX_M_PRIV;
12658    }
12659
12660    switch (mmu_idx) {
12661    case ARMMMUIdx_E10_0:
12662    case ARMMMUIdx_E20_0:
12663    case ARMMMUIdx_SE10_0:
12664        return 0;
12665    case ARMMMUIdx_E10_1:
12666    case ARMMMUIdx_E10_1_PAN:
12667    case ARMMMUIdx_SE10_1:
12668    case ARMMMUIdx_SE10_1_PAN:
12669        return 1;
12670    case ARMMMUIdx_E2:
12671    case ARMMMUIdx_E20_2:
12672    case ARMMMUIdx_E20_2_PAN:
12673        return 2;
12674    case ARMMMUIdx_SE3:
12675        return 3;
12676    default:
12677        g_assert_not_reached();
12678    }
12679}
12680
12681#ifndef CONFIG_TCG
12682ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
12683{
12684    g_assert_not_reached();
12685}
12686#endif
12687
12688ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
12689{
12690    if (arm_feature(env, ARM_FEATURE_M)) {
12691        return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
12692    }
12693
12694    /* See ARM pseudo-function ELIsInHost.  */
12695    switch (el) {
12696    case 0:
12697        if (arm_is_secure_below_el3(env)) {
12698            return ARMMMUIdx_SE10_0;
12699        }
12700        if ((env->cp15.hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)
12701            && arm_el_is_aa64(env, 2)) {
12702            return ARMMMUIdx_E20_0;
12703        }
12704        return ARMMMUIdx_E10_0;
12705    case 1:
12706        if (arm_is_secure_below_el3(env)) {
12707            if (env->pstate & PSTATE_PAN) {
12708                return ARMMMUIdx_SE10_1_PAN;
12709            }
12710            return ARMMMUIdx_SE10_1;
12711        }
12712        if (env->pstate & PSTATE_PAN) {
12713            return ARMMMUIdx_E10_1_PAN;
12714        }
12715        return ARMMMUIdx_E10_1;
12716    case 2:
12717        /* TODO: ARMv8.4-SecEL2 */
12718        /* Note that TGE does not apply at EL2.  */
12719        if ((env->cp15.hcr_el2 & HCR_E2H) && arm_el_is_aa64(env, 2)) {
12720            if (env->pstate & PSTATE_PAN) {
12721                return ARMMMUIdx_E20_2_PAN;
12722            }
12723            return ARMMMUIdx_E20_2;
12724        }
12725        return ARMMMUIdx_E2;
12726    case 3:
12727        return ARMMMUIdx_SE3;
12728    default:
12729        g_assert_not_reached();
12730    }
12731}
12732
12733ARMMMUIdx arm_mmu_idx(CPUARMState *env)
12734{
12735    return arm_mmu_idx_el(env, arm_current_el(env));
12736}
12737
12738#ifndef CONFIG_USER_ONLY
12739ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
12740{
12741    return stage_1_mmu_idx(arm_mmu_idx(env));
12742}
12743#endif
12744
12745static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
12746                                      ARMMMUIdx mmu_idx, uint32_t flags)
12747{
12748    flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
12749    flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
12750                       arm_to_core_mmu_idx(mmu_idx));
12751
12752    if (arm_singlestep_active(env)) {
12753        flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
12754    }
12755    return flags;
12756}
12757
12758static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
12759                                         ARMMMUIdx mmu_idx, uint32_t flags)
12760{
12761    bool sctlr_b = arm_sctlr_b(env);
12762
12763    if (sctlr_b) {
12764        flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
12765    }
12766    if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
12767        flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
12768    }
12769    flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
12770
12771    return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
12772}
12773
12774static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
12775                                   ARMMMUIdx mmu_idx)
12776{
12777    uint32_t flags = 0;
12778
12779    if (arm_v7m_is_handler_mode(env)) {
12780        flags = FIELD_DP32(flags, TBFLAG_M32, HANDLER, 1);
12781    }
12782
12783    /*
12784     * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
12785     * is suppressing them because the requested execution priority
12786     * is less than 0.
12787     */
12788    if (arm_feature(env, ARM_FEATURE_V8) &&
12789        !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
12790          (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
12791        flags = FIELD_DP32(flags, TBFLAG_M32, STACKCHECK, 1);
12792    }
12793
12794    return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
12795}
12796
12797static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
12798{
12799    int flags = 0;
12800
12801    flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
12802                       arm_debug_target_el(env));
12803    return flags;
12804}
12805
12806static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
12807                                   ARMMMUIdx mmu_idx)
12808{
12809    uint32_t flags = rebuild_hflags_aprofile(env);
12810
12811    if (arm_el_is_aa64(env, 1)) {
12812        flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
12813    }
12814
12815    if (arm_current_el(env) < 2 && env->cp15.hstr_el2 &&
12816        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
12817        flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1);
12818    }
12819
12820    return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
12821}
12822
12823static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
12824                                   ARMMMUIdx mmu_idx)
12825{
12826    uint32_t flags = rebuild_hflags_aprofile(env);
12827    ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
12828    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
12829    uint64_t sctlr;
12830    int tbii, tbid;
12831
12832    flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
12833
12834    /* Get control bits for tagged addresses.  */
12835    tbid = aa64_va_parameter_tbi(tcr, mmu_idx);
12836    tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx);
12837
12838    flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
12839    flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
12840
12841    if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
12842        int sve_el = sve_exception_el(env, el);
12843        uint32_t zcr_len;
12844
12845        /*
12846         * If SVE is disabled, but FP is enabled,
12847         * then the effective len is 0.
12848         */
12849        if (sve_el != 0 && fp_el == 0) {
12850            zcr_len = 0;
12851        } else {
12852            zcr_len = sve_zcr_len_for_el(env, el);
12853        }
12854        flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
12855        flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
12856    }
12857
12858    sctlr = regime_sctlr(env, stage1);
12859
12860    if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
12861        flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
12862    }
12863
12864    if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
12865        /*
12866         * In order to save space in flags, we record only whether
12867         * pauth is "inactive", meaning all insns are implemented as
12868         * a nop, or "active" when some action must be performed.
12869         * The decision of which action to take is left to a helper.
12870         */
12871        if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
12872            flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
12873        }
12874    }
12875
12876    if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
12877        /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
12878        if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
12879            flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
12880        }
12881    }
12882
12883    /* Compute the condition for using AccType_UNPRIV for LDTR et al. */
12884    if (!(env->pstate & PSTATE_UAO)) {
12885        switch (mmu_idx) {
12886        case ARMMMUIdx_E10_1:
12887        case ARMMMUIdx_E10_1_PAN:
12888        case ARMMMUIdx_SE10_1:
12889        case ARMMMUIdx_SE10_1_PAN:
12890            /* TODO: ARMv8.3-NV */
12891            flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
12892            break;
12893        case ARMMMUIdx_E20_2:
12894        case ARMMMUIdx_E20_2_PAN:
12895            /* TODO: ARMv8.4-SecEL2 */
12896            /*
12897             * Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
12898             * gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
12899             */
12900            if (env->cp15.hcr_el2 & HCR_TGE) {
12901                flags = FIELD_DP32(flags, TBFLAG_A64, UNPRIV, 1);
12902            }
12903            break;
12904        default:
12905            break;
12906        }
12907    }
12908
12909    if (cpu_isar_feature(aa64_mte, env_archcpu(env))) {
12910        /*
12911         * Set MTE_ACTIVE if any access may be Checked, and leave clear
12912         * if all accesses must be Unchecked:
12913         * 1) If no TBI, then there are no tags in the address to check,
12914         * 2) If Tag Check Override, then all accesses are Unchecked,
12915         * 3) If Tag Check Fail == 0, then Checked access have no effect,
12916         * 4) If no Allocation Tag Access, then all accesses are Unchecked.
12917         */
12918        if (allocation_tag_access_enabled(env, el, sctlr)) {
12919            flags = FIELD_DP32(flags, TBFLAG_A64, ATA, 1);
12920            if (tbid
12921                && !(env->pstate & PSTATE_TCO)
12922                && (sctlr & (el == 0 ? SCTLR_TCF0 : SCTLR_TCF))) {
12923                flags = FIELD_DP32(flags, TBFLAG_A64, MTE_ACTIVE, 1);
12924            }
12925        }
12926        /* And again for unprivileged accesses, if required.  */
12927        if (FIELD_EX32(flags, TBFLAG_A64, UNPRIV)
12928            && tbid
12929            && !(env->pstate & PSTATE_TCO)
12930            && (sctlr & SCTLR_TCF0)
12931            && allocation_tag_access_enabled(env, 0, sctlr)) {
12932            flags = FIELD_DP32(flags, TBFLAG_A64, MTE0_ACTIVE, 1);
12933        }
12934        /* Cache TCMA as well as TBI. */
12935        flags = FIELD_DP32(flags, TBFLAG_A64, TCMA,
12936                           aa64_va_parameter_tcma(tcr, mmu_idx));
12937    }
12938
12939    return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
12940}
12941
12942static uint32_t rebuild_hflags_internal(CPUARMState *env)
12943{
12944    int el = arm_current_el(env);
12945    int fp_el = fp_exception_el(env, el);
12946    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12947
12948    if (is_a64(env)) {
12949        return rebuild_hflags_a64(env, el, fp_el, mmu_idx);
12950    } else if (arm_feature(env, ARM_FEATURE_M)) {
12951        return rebuild_hflags_m32(env, fp_el, mmu_idx);
12952    } else {
12953        return rebuild_hflags_a32(env, fp_el, mmu_idx);
12954    }
12955}
12956
12957void arm_rebuild_hflags(CPUARMState *env)
12958{
12959    env->hflags = rebuild_hflags_internal(env);
12960}
12961
12962/*
12963 * If we have triggered a EL state change we can't rely on the
12964 * translator having passed it to us, we need to recompute.
12965 */
12966void HELPER(rebuild_hflags_m32_newel)(CPUARMState *env)
12967{
12968    int el = arm_current_el(env);
12969    int fp_el = fp_exception_el(env, el);
12970    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12971    env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
12972}
12973
12974void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el)
12975{
12976    int fp_el = fp_exception_el(env, el);
12977    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12978
12979    env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx);
12980}
12981
12982/*
12983 * If we have triggered a EL state change we can't rely on the
12984 * translator having passed it to us, we need to recompute.
12985 */
12986void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env)
12987{
12988    int el = arm_current_el(env);
12989    int fp_el = fp_exception_el(env, el);
12990    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12991    env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
12992}
12993
12994void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el)
12995{
12996    int fp_el = fp_exception_el(env, el);
12997    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
12998
12999    env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx);
13000}
13001
13002void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
13003{
13004    int fp_el = fp_exception_el(env, el);
13005    ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el);
13006
13007    env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
13008}
13009
13010static inline void assert_hflags_rebuild_correctly(CPUARMState *env)
13011{
13012#ifdef CONFIG_DEBUG_TCG
13013    uint32_t env_flags_current = env->hflags;
13014    uint32_t env_flags_rebuilt = rebuild_hflags_internal(env);
13015
13016    if (unlikely(env_flags_current != env_flags_rebuilt)) {
13017        fprintf(stderr, "TCG hflags mismatch (current:0x%08x rebuilt:0x%08x)\n",
13018                env_flags_current, env_flags_rebuilt);
13019        abort();
13020    }
13021#endif
13022}
13023
13024void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
13025                          target_ulong *cs_base, uint32_t *pflags)
13026{
13027    uint32_t flags = env->hflags;
13028    uint32_t pstate_for_ss;
13029
13030    *cs_base = 0;
13031    assert_hflags_rebuild_correctly(env);
13032
13033    if (FIELD_EX32(flags, TBFLAG_ANY, AARCH64_STATE)) {
13034        *pc = env->pc;
13035        if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
13036            flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
13037        }
13038        pstate_for_ss = env->pstate;
13039    } else {
13040        *pc = env->regs[15];
13041
13042        if (arm_feature(env, ARM_FEATURE_M)) {
13043            if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
13044                FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
13045                != env->v7m.secure) {
13046                flags = FIELD_DP32(flags, TBFLAG_M32, FPCCR_S_WRONG, 1);
13047            }
13048
13049            if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
13050                (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
13051                 (env->v7m.secure &&
13052                  !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
13053                /*
13054                 * ASPEN is set, but FPCA/SFPA indicate that there is no
13055                 * active FP context; we must create a new FP context before
13056                 * executing any FP insn.
13057                 */
13058                flags = FIELD_DP32(flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED, 1);
13059            }
13060
13061            bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
13062            if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
13063                flags = FIELD_DP32(flags, TBFLAG_M32, LSPACT, 1);
13064            }
13065        } else {
13066            /*
13067             * Note that XSCALE_CPAR shares bits with VECSTRIDE.
13068             * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
13069             */
13070            if (arm_feature(env, ARM_FEATURE_XSCALE)) {
13071                flags = FIELD_DP32(flags, TBFLAG_A32,
13072                                   XSCALE_CPAR, env->cp15.c15_cpar);
13073            } else {
13074                flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN,
13075                                   env->vfp.vec_len);
13076                flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE,
13077                                   env->vfp.vec_stride);
13078            }
13079            if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
13080                flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
13081            }
13082        }
13083
13084        flags = FIELD_DP32(flags, TBFLAG_AM32, THUMB, env->thumb);
13085        flags = FIELD_DP32(flags, TBFLAG_AM32, CONDEXEC, env->condexec_bits);
13086        pstate_for_ss = env->uncached_cpsr;
13087    }
13088
13089    /*
13090     * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
13091     * states defined in the ARM ARM for software singlestep:
13092     *  SS_ACTIVE   PSTATE.SS   State
13093     *     0            x       Inactive (the TB flag for SS is always 0)
13094     *     1            0       Active-pending
13095     *     1            1       Active-not-pending
13096     * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
13097     */
13098    if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE) &&
13099        (pstate_for_ss & PSTATE_SS)) {
13100        flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
13101    }
13102
13103    *pflags = flags;
13104}
13105
13106#ifdef TARGET_AARCH64
13107/*
13108 * The manual says that when SVE is enabled and VQ is widened the
13109 * implementation is allowed to zero the previously inaccessible
13110 * portion of the registers.  The corollary to that is that when
13111 * SVE is enabled and VQ is narrowed we are also allowed to zero
13112 * the now inaccessible portion of the registers.
13113 *
13114 * The intent of this is that no predicate bit beyond VQ is ever set.
13115 * Which means that some operations on predicate registers themselves
13116 * may operate on full uint64_t or even unrolled across the maximum
13117 * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
13118 * may well be cheaper than conditionals to restrict the operation
13119 * to the relevant portion of a uint16_t[16].
13120 */
13121void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
13122{
13123    int i, j;
13124    uint64_t pmask;
13125
13126    assert(vq >= 1 && vq <= ARM_MAX_VQ);
13127    assert(vq <= env_archcpu(env)->sve_max_vq);
13128
13129    /* Zap the high bits of the zregs.  */
13130    for (i = 0; i < 32; i++) {
13131        memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
13132    }
13133
13134    /* Zap the high bits of the pregs and ffr.  */
13135    pmask = 0;
13136    if (vq & 3) {
13137        pmask = ~(-1ULL << (16 * (vq & 3)));
13138    }
13139    for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
13140        for (i = 0; i < 17; ++i) {
13141            env->vfp.pregs[i].p[j] &= pmask;
13142        }
13143        pmask = 0;
13144    }
13145}
13146
13147/*
13148 * Notice a change in SVE vector size when changing EL.
13149 */
13150void aarch64_sve_change_el(CPUARMState *env, int old_el,
13151                           int new_el, bool el0_a64)
13152{
13153    ARMCPU *cpu = env_archcpu(env);
13154    int old_len, new_len;
13155    bool old_a64, new_a64;
13156
13157    /* Nothing to do if no SVE.  */
13158    if (!cpu_isar_feature(aa64_sve, cpu)) {
13159        return;
13160    }
13161
13162    /* Nothing to do if FP is disabled in either EL.  */
13163    if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
13164        return;
13165    }
13166
13167    /*
13168     * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
13169     * at ELx, or not available because the EL is in AArch32 state, then
13170     * for all purposes other than a direct read, the ZCR_ELx.LEN field
13171     * has an effective value of 0".
13172     *
13173     * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
13174     * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
13175     * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
13176     * we already have the correct register contents when encountering the
13177     * vq0->vq0 transition between EL0->EL1.
13178     */
13179    old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
13180    old_len = (old_a64 && !sve_exception_el(env, old_el)
13181               ? sve_zcr_len_for_el(env, old_el) : 0);
13182    new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
13183    new_len = (new_a64 && !sve_exception_el(env, new_el)
13184               ? sve_zcr_len_for_el(env, new_el) : 0);
13185
13186    /* When changing vector length, clear inaccessible state.  */
13187    if (new_len < old_len) {
13188        aarch64_sve_narrow_vq(env, new_len + 1);
13189    }
13190}
13191#endif
13192