qemu/target/arm/helper.c
<<
>>
Prefs
   1/*
   2 * ARM generic helpers.
   3 *
   4 * This code is licensed under the GNU GPL v2 or later.
   5 *
   6 * SPDX-License-Identifier: GPL-2.0-or-later
   7 */
   8#include "qemu/osdep.h"
   9#include "qemu/units.h"
  10#include "target/arm/idau.h"
  11#include "trace.h"
  12#include "cpu.h"
  13#include "internals.h"
  14#include "exec/gdbstub.h"
  15#include "exec/helper-proto.h"
  16#include "qemu/host-utils.h"
  17#include "sysemu/sysemu.h"
  18#include "qemu/bitops.h"
  19#include "qemu/crc32c.h"
  20#include "qemu/qemu-print.h"
  21#include "exec/exec-all.h"
  22#include <zlib.h> /* For crc32 */
  23#include "hw/semihosting/semihost.h"
  24#include "sysemu/cpus.h"
  25#include "sysemu/kvm.h"
  26#include "qemu/range.h"
  27#include "qapi/qapi-commands-machine-target.h"
  28#include "qapi/error.h"
  29#include "qemu/guest-random.h"
  30#ifdef CONFIG_TCG
  31#include "arm_ldst.h"
  32#include "exec/cpu_ldst.h"
  33#endif
  34
  35#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
  36
  37#ifndef CONFIG_USER_ONLY
  38
  39static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
  40                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
  41                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
  42                               target_ulong *page_size_ptr,
  43                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
  44#endif
  45
  46static void switch_mode(CPUARMState *env, int mode);
  47
  48static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
  49{
  50    int nregs;
  51
  52    /* VFP data registers are always little-endian.  */
  53    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  54    if (reg < nregs) {
  55        stq_le_p(buf, *aa32_vfp_dreg(env, reg));
  56        return 8;
  57    }
  58    if (arm_feature(env, ARM_FEATURE_NEON)) {
  59        /* Aliases for Q regs.  */
  60        nregs += 16;
  61        if (reg < nregs) {
  62            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  63            stq_le_p(buf, q[0]);
  64            stq_le_p(buf + 8, q[1]);
  65            return 16;
  66        }
  67    }
  68    switch (reg - nregs) {
  69    case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
  70    case 1: stl_p(buf, vfp_get_fpscr(env)); return 4;
  71    case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
  72    }
  73    return 0;
  74}
  75
  76static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
  77{
  78    int nregs;
  79
  80    nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
  81    if (reg < nregs) {
  82        *aa32_vfp_dreg(env, reg) = ldq_le_p(buf);
  83        return 8;
  84    }
  85    if (arm_feature(env, ARM_FEATURE_NEON)) {
  86        nregs += 16;
  87        if (reg < nregs) {
  88            uint64_t *q = aa32_vfp_qreg(env, reg - 32);
  89            q[0] = ldq_le_p(buf);
  90            q[1] = ldq_le_p(buf + 8);
  91            return 16;
  92        }
  93    }
  94    switch (reg - nregs) {
  95    case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
  96    case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4;
  97    case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
  98    }
  99    return 0;
 100}
 101
 102static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
 103{
 104    switch (reg) {
 105    case 0 ... 31:
 106        /* 128 bit FP register */
 107        {
 108            uint64_t *q = aa64_vfp_qreg(env, reg);
 109            stq_le_p(buf, q[0]);
 110            stq_le_p(buf + 8, q[1]);
 111            return 16;
 112        }
 113    case 32:
 114        /* FPSR */
 115        stl_p(buf, vfp_get_fpsr(env));
 116        return 4;
 117    case 33:
 118        /* FPCR */
 119        stl_p(buf, vfp_get_fpcr(env));
 120        return 4;
 121    default:
 122        return 0;
 123    }
 124}
 125
 126static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
 127{
 128    switch (reg) {
 129    case 0 ... 31:
 130        /* 128 bit FP register */
 131        {
 132            uint64_t *q = aa64_vfp_qreg(env, reg);
 133            q[0] = ldq_le_p(buf);
 134            q[1] = ldq_le_p(buf + 8);
 135            return 16;
 136        }
 137    case 32:
 138        /* FPSR */
 139        vfp_set_fpsr(env, ldl_p(buf));
 140        return 4;
 141    case 33:
 142        /* FPCR */
 143        vfp_set_fpcr(env, ldl_p(buf));
 144        return 4;
 145    default:
 146        return 0;
 147    }
 148}
 149
 150static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
 151{
 152    assert(ri->fieldoffset);
 153    if (cpreg_field_is_64bit(ri)) {
 154        return CPREG_FIELD64(env, ri);
 155    } else {
 156        return CPREG_FIELD32(env, ri);
 157    }
 158}
 159
 160static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
 161                      uint64_t value)
 162{
 163    assert(ri->fieldoffset);
 164    if (cpreg_field_is_64bit(ri)) {
 165        CPREG_FIELD64(env, ri) = value;
 166    } else {
 167        CPREG_FIELD32(env, ri) = value;
 168    }
 169}
 170
 171static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
 172{
 173    return (char *)env + ri->fieldoffset;
 174}
 175
 176uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
 177{
 178    /* Raw read of a coprocessor register (as needed for migration, etc). */
 179    if (ri->type & ARM_CP_CONST) {
 180        return ri->resetvalue;
 181    } else if (ri->raw_readfn) {
 182        return ri->raw_readfn(env, ri);
 183    } else if (ri->readfn) {
 184        return ri->readfn(env, ri);
 185    } else {
 186        return raw_read(env, ri);
 187    }
 188}
 189
 190static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
 191                             uint64_t v)
 192{
 193    /* Raw write of a coprocessor register (as needed for migration, etc).
 194     * Note that constant registers are treated as write-ignored; the
 195     * caller should check for success by whether a readback gives the
 196     * value written.
 197     */
 198    if (ri->type & ARM_CP_CONST) {
 199        return;
 200    } else if (ri->raw_writefn) {
 201        ri->raw_writefn(env, ri, v);
 202    } else if (ri->writefn) {
 203        ri->writefn(env, ri, v);
 204    } else {
 205        raw_write(env, ri, v);
 206    }
 207}
 208
 209static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 210{
 211    ARMCPU *cpu = env_archcpu(env);
 212    const ARMCPRegInfo *ri;
 213    uint32_t key;
 214
 215    key = cpu->dyn_xml.cpregs_keys[reg];
 216    ri = get_arm_cp_reginfo(cpu->cp_regs, key);
 217    if (ri) {
 218        if (cpreg_field_is_64bit(ri)) {
 219            return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri));
 220        } else {
 221            return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri));
 222        }
 223    }
 224    return 0;
 225}
 226
 227static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg)
 228{
 229    return 0;
 230}
 231
 232static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
 233{
 234   /* Return true if the regdef would cause an assertion if you called
 235    * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
 236    * program bug for it not to have the NO_RAW flag).
 237    * NB that returning false here doesn't necessarily mean that calling
 238    * read/write_raw_cp_reg() is safe, because we can't distinguish "has
 239    * read/write access functions which are safe for raw use" from "has
 240    * read/write access functions which have side effects but has forgotten
 241    * to provide raw access functions".
 242    * The tests here line up with the conditions in read/write_raw_cp_reg()
 243    * and assertions in raw_read()/raw_write().
 244    */
 245    if ((ri->type & ARM_CP_CONST) ||
 246        ri->fieldoffset ||
 247        ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
 248        return false;
 249    }
 250    return true;
 251}
 252
 253bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync)
 254{
 255    /* Write the coprocessor state from cpu->env to the (index,value) list. */
 256    int i;
 257    bool ok = true;
 258
 259    for (i = 0; i < cpu->cpreg_array_len; i++) {
 260        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 261        const ARMCPRegInfo *ri;
 262        uint64_t newval;
 263
 264        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 265        if (!ri) {
 266            ok = false;
 267            continue;
 268        }
 269        if (ri->type & ARM_CP_NO_RAW) {
 270            continue;
 271        }
 272
 273        newval = read_raw_cp_reg(&cpu->env, ri);
 274        if (kvm_sync) {
 275            /*
 276             * Only sync if the previous list->cpustate sync succeeded.
 277             * Rather than tracking the success/failure state for every
 278             * item in the list, we just recheck "does the raw write we must
 279             * have made in write_list_to_cpustate() read back OK" here.
 280             */
 281            uint64_t oldval = cpu->cpreg_values[i];
 282
 283            if (oldval == newval) {
 284                continue;
 285            }
 286
 287            write_raw_cp_reg(&cpu->env, ri, oldval);
 288            if (read_raw_cp_reg(&cpu->env, ri) != oldval) {
 289                continue;
 290            }
 291
 292            write_raw_cp_reg(&cpu->env, ri, newval);
 293        }
 294        cpu->cpreg_values[i] = newval;
 295    }
 296    return ok;
 297}
 298
 299bool write_list_to_cpustate(ARMCPU *cpu)
 300{
 301    int i;
 302    bool ok = true;
 303
 304    for (i = 0; i < cpu->cpreg_array_len; i++) {
 305        uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
 306        uint64_t v = cpu->cpreg_values[i];
 307        const ARMCPRegInfo *ri;
 308
 309        ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 310        if (!ri) {
 311            ok = false;
 312            continue;
 313        }
 314        if (ri->type & ARM_CP_NO_RAW) {
 315            continue;
 316        }
 317        /* Write value and confirm it reads back as written
 318         * (to catch read-only registers and partially read-only
 319         * registers where the incoming migration value doesn't match)
 320         */
 321        write_raw_cp_reg(&cpu->env, ri, v);
 322        if (read_raw_cp_reg(&cpu->env, ri) != v) {
 323            ok = false;
 324        }
 325    }
 326    return ok;
 327}
 328
 329static void add_cpreg_to_list(gpointer key, gpointer opaque)
 330{
 331    ARMCPU *cpu = opaque;
 332    uint64_t regidx;
 333    const ARMCPRegInfo *ri;
 334
 335    regidx = *(uint32_t *)key;
 336    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 337
 338    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 339        cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
 340        /* The value array need not be initialized at this point */
 341        cpu->cpreg_array_len++;
 342    }
 343}
 344
 345static void count_cpreg(gpointer key, gpointer opaque)
 346{
 347    ARMCPU *cpu = opaque;
 348    uint64_t regidx;
 349    const ARMCPRegInfo *ri;
 350
 351    regidx = *(uint32_t *)key;
 352    ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
 353
 354    if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
 355        cpu->cpreg_array_len++;
 356    }
 357}
 358
 359static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
 360{
 361    uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
 362    uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
 363
 364    if (aidx > bidx) {
 365        return 1;
 366    }
 367    if (aidx < bidx) {
 368        return -1;
 369    }
 370    return 0;
 371}
 372
 373void init_cpreg_list(ARMCPU *cpu)
 374{
 375    /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
 376     * Note that we require cpreg_tuples[] to be sorted by key ID.
 377     */
 378    GList *keys;
 379    int arraylen;
 380
 381    keys = g_hash_table_get_keys(cpu->cp_regs);
 382    keys = g_list_sort(keys, cpreg_key_compare);
 383
 384    cpu->cpreg_array_len = 0;
 385
 386    g_list_foreach(keys, count_cpreg, cpu);
 387
 388    arraylen = cpu->cpreg_array_len;
 389    cpu->cpreg_indexes = g_new(uint64_t, arraylen);
 390    cpu->cpreg_values = g_new(uint64_t, arraylen);
 391    cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
 392    cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
 393    cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
 394    cpu->cpreg_array_len = 0;
 395
 396    g_list_foreach(keys, add_cpreg_to_list, cpu);
 397
 398    assert(cpu->cpreg_array_len == arraylen);
 399
 400    g_list_free(keys);
 401}
 402
 403/*
 404 * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
 405 * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
 406 *
 407 * access_el3_aa32ns: Used to check AArch32 register views.
 408 * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
 409 */
 410static CPAccessResult access_el3_aa32ns(CPUARMState *env,
 411                                        const ARMCPRegInfo *ri,
 412                                        bool isread)
 413{
 414    bool secure = arm_is_secure_below_el3(env);
 415
 416    assert(!arm_el_is_aa64(env, 3));
 417    if (secure) {
 418        return CP_ACCESS_TRAP_UNCATEGORIZED;
 419    }
 420    return CP_ACCESS_OK;
 421}
 422
 423static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
 424                                                const ARMCPRegInfo *ri,
 425                                                bool isread)
 426{
 427    if (!arm_el_is_aa64(env, 3)) {
 428        return access_el3_aa32ns(env, ri, isread);
 429    }
 430    return CP_ACCESS_OK;
 431}
 432
 433/* Some secure-only AArch32 registers trap to EL3 if used from
 434 * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
 435 * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
 436 * We assume that the .access field is set to PL1_RW.
 437 */
 438static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
 439                                            const ARMCPRegInfo *ri,
 440                                            bool isread)
 441{
 442    if (arm_current_el(env) == 3) {
 443        return CP_ACCESS_OK;
 444    }
 445    if (arm_is_secure_below_el3(env)) {
 446        return CP_ACCESS_TRAP_EL3;
 447    }
 448    /* This will be EL1 NS and EL2 NS, which just UNDEF */
 449    return CP_ACCESS_TRAP_UNCATEGORIZED;
 450}
 451
 452/* Check for traps to "powerdown debug" registers, which are controlled
 453 * by MDCR.TDOSA
 454 */
 455static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
 456                                   bool isread)
 457{
 458    int el = arm_current_el(env);
 459    bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
 460        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 461        (arm_hcr_el2_eff(env) & HCR_TGE);
 462
 463    if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
 464        return CP_ACCESS_TRAP_EL2;
 465    }
 466    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
 467        return CP_ACCESS_TRAP_EL3;
 468    }
 469    return CP_ACCESS_OK;
 470}
 471
 472/* Check for traps to "debug ROM" registers, which are controlled
 473 * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
 474 */
 475static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
 476                                  bool isread)
 477{
 478    int el = arm_current_el(env);
 479    bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
 480        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 481        (arm_hcr_el2_eff(env) & HCR_TGE);
 482
 483    if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
 484        return CP_ACCESS_TRAP_EL2;
 485    }
 486    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 487        return CP_ACCESS_TRAP_EL3;
 488    }
 489    return CP_ACCESS_OK;
 490}
 491
 492/* Check for traps to general debug registers, which are controlled
 493 * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
 494 */
 495static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
 496                                  bool isread)
 497{
 498    int el = arm_current_el(env);
 499    bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
 500        (env->cp15.mdcr_el2 & MDCR_TDE) ||
 501        (arm_hcr_el2_eff(env) & HCR_TGE);
 502
 503    if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
 504        return CP_ACCESS_TRAP_EL2;
 505    }
 506    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
 507        return CP_ACCESS_TRAP_EL3;
 508    }
 509    return CP_ACCESS_OK;
 510}
 511
 512/* Check for traps to performance monitor registers, which are controlled
 513 * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
 514 */
 515static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
 516                                 bool isread)
 517{
 518    int el = arm_current_el(env);
 519
 520    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
 521        && !arm_is_secure_below_el3(env)) {
 522        return CP_ACCESS_TRAP_EL2;
 523    }
 524    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
 525        return CP_ACCESS_TRAP_EL3;
 526    }
 527    return CP_ACCESS_OK;
 528}
 529
 530static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 531{
 532    ARMCPU *cpu = env_archcpu(env);
 533
 534    raw_write(env, ri, value);
 535    tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */
 536}
 537
 538static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
 539{
 540    ARMCPU *cpu = env_archcpu(env);
 541
 542    if (raw_read(env, ri) != value) {
 543        /* Unlike real hardware the qemu TLB uses virtual addresses,
 544         * not modified virtual addresses, so this causes a TLB flush.
 545         */
 546        tlb_flush(CPU(cpu));
 547        raw_write(env, ri, value);
 548    }
 549}
 550
 551static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 552                             uint64_t value)
 553{
 554    ARMCPU *cpu = env_archcpu(env);
 555
 556    if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA)
 557        && !extended_addresses_enabled(env)) {
 558        /* For VMSA (when not using the LPAE long descriptor page table
 559         * format) this register includes the ASID, so do a TLB flush.
 560         * For PMSA it is purely a process ID and no action is needed.
 561         */
 562        tlb_flush(CPU(cpu));
 563    }
 564    raw_write(env, ri, value);
 565}
 566
 567/* IS variants of TLB operations must affect all cores */
 568static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 569                             uint64_t value)
 570{
 571    CPUState *cs = env_cpu(env);
 572
 573    tlb_flush_all_cpus_synced(cs);
 574}
 575
 576static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 577                             uint64_t value)
 578{
 579    CPUState *cs = env_cpu(env);
 580
 581    tlb_flush_all_cpus_synced(cs);
 582}
 583
 584static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 585                             uint64_t value)
 586{
 587    CPUState *cs = env_cpu(env);
 588
 589    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 590}
 591
 592static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 593                             uint64_t value)
 594{
 595    CPUState *cs = env_cpu(env);
 596
 597    tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK);
 598}
 599
 600/*
 601 * Non-IS variants of TLB operations are upgraded to
 602 * IS versions if we are at NS EL1 and HCR_EL2.FB is set to
 603 * force broadcast of these operations.
 604 */
 605static bool tlb_force_broadcast(CPUARMState *env)
 606{
 607    return (env->cp15.hcr_el2 & HCR_FB) &&
 608        arm_current_el(env) == 1 && arm_is_secure_below_el3(env);
 609}
 610
 611static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
 612                          uint64_t value)
 613{
 614    /* Invalidate all (TLBIALL) */
 615    ARMCPU *cpu = env_archcpu(env);
 616
 617    if (tlb_force_broadcast(env)) {
 618        tlbiall_is_write(env, NULL, value);
 619        return;
 620    }
 621
 622    tlb_flush(CPU(cpu));
 623}
 624
 625static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
 626                          uint64_t value)
 627{
 628    /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
 629    ARMCPU *cpu = env_archcpu(env);
 630
 631    if (tlb_force_broadcast(env)) {
 632        tlbimva_is_write(env, NULL, value);
 633        return;
 634    }
 635
 636    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 637}
 638
 639static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
 640                           uint64_t value)
 641{
 642    /* Invalidate by ASID (TLBIASID) */
 643    ARMCPU *cpu = env_archcpu(env);
 644
 645    if (tlb_force_broadcast(env)) {
 646        tlbiasid_is_write(env, NULL, value);
 647        return;
 648    }
 649
 650    tlb_flush(CPU(cpu));
 651}
 652
 653static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
 654                           uint64_t value)
 655{
 656    /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
 657    ARMCPU *cpu = env_archcpu(env);
 658
 659    if (tlb_force_broadcast(env)) {
 660        tlbimvaa_is_write(env, NULL, value);
 661        return;
 662    }
 663
 664    tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
 665}
 666
 667static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
 668                               uint64_t value)
 669{
 670    CPUState *cs = env_cpu(env);
 671
 672    tlb_flush_by_mmuidx(cs,
 673                        ARMMMUIdxBit_S12NSE1 |
 674                        ARMMMUIdxBit_S12NSE0 |
 675                        ARMMMUIdxBit_S2NS);
 676}
 677
 678static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 679                                  uint64_t value)
 680{
 681    CPUState *cs = env_cpu(env);
 682
 683    tlb_flush_by_mmuidx_all_cpus_synced(cs,
 684                                        ARMMMUIdxBit_S12NSE1 |
 685                                        ARMMMUIdxBit_S12NSE0 |
 686                                        ARMMMUIdxBit_S2NS);
 687}
 688
 689static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
 690                            uint64_t value)
 691{
 692    /* Invalidate by IPA. This has to invalidate any structures that
 693     * contain only stage 2 translation information, but does not need
 694     * to apply to structures that contain combined stage 1 and stage 2
 695     * translation information.
 696     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
 697     */
 698    CPUState *cs = env_cpu(env);
 699    uint64_t pageaddr;
 700
 701    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 702        return;
 703    }
 704
 705    pageaddr = sextract64(value << 12, 0, 40);
 706
 707    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
 708}
 709
 710static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 711                               uint64_t value)
 712{
 713    CPUState *cs = env_cpu(env);
 714    uint64_t pageaddr;
 715
 716    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
 717        return;
 718    }
 719
 720    pageaddr = sextract64(value << 12, 0, 40);
 721
 722    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 723                                             ARMMMUIdxBit_S2NS);
 724}
 725
 726static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 727                              uint64_t value)
 728{
 729    CPUState *cs = env_cpu(env);
 730
 731    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
 732}
 733
 734static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 735                                 uint64_t value)
 736{
 737    CPUState *cs = env_cpu(env);
 738
 739    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
 740}
 741
 742static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
 743                              uint64_t value)
 744{
 745    CPUState *cs = env_cpu(env);
 746    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 747
 748    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
 749}
 750
 751static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
 752                                 uint64_t value)
 753{
 754    CPUState *cs = env_cpu(env);
 755    uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
 756
 757    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
 758                                             ARMMMUIdxBit_S1E2);
 759}
 760
 761static const ARMCPRegInfo cp_reginfo[] = {
 762    /* Define the secure and non-secure FCSE identifier CP registers
 763     * separately because there is no secure bank in V8 (no _EL3).  This allows
 764     * the secure register to be properly reset and migrated. There is also no
 765     * v8 EL1 version of the register so the non-secure instance stands alone.
 766     */
 767    { .name = "FCSEIDR",
 768      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 769      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 770      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
 771      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 772    { .name = "FCSEIDR_S",
 773      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
 774      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 775      .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
 776      .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
 777    /* Define the secure and non-secure context identifier CP registers
 778     * separately because there is no secure bank in V8 (no _EL3).  This allows
 779     * the secure register to be properly reset and migrated.  In the
 780     * non-secure case, the 32-bit register will have reset and migration
 781     * disabled during registration as it is handled by the 64-bit instance.
 782     */
 783    { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
 784      .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 785      .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
 786      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
 787      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 788    { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32,
 789      .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
 790      .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
 791      .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
 792      .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
 793    REGINFO_SENTINEL
 794};
 795
 796static const ARMCPRegInfo not_v8_cp_reginfo[] = {
 797    /* NB: Some of these registers exist in v8 but with more precise
 798     * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
 799     */
 800    /* MMU Domain access control / MPU write buffer control */
 801    { .name = "DACR",
 802      .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
 803      .access = PL1_RW, .resetvalue = 0,
 804      .writefn = dacr_write, .raw_writefn = raw_write,
 805      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
 806                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
 807    /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
 808     * For v6 and v5, these mappings are overly broad.
 809     */
 810    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
 811      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 812    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
 813      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 814    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
 815      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 816    { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
 817      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
 818    /* Cache maintenance ops; some of this space may be overridden later. */
 819    { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
 820      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
 821      .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
 822    REGINFO_SENTINEL
 823};
 824
 825static const ARMCPRegInfo not_v6_cp_reginfo[] = {
 826    /* Not all pre-v6 cores implemented this WFI, so this is slightly
 827     * over-broad.
 828     */
 829    { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
 830      .access = PL1_W, .type = ARM_CP_WFI },
 831    REGINFO_SENTINEL
 832};
 833
 834static const ARMCPRegInfo not_v7_cp_reginfo[] = {
 835    /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
 836     * is UNPREDICTABLE; we choose to NOP as most implementations do).
 837     */
 838    { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
 839      .access = PL1_W, .type = ARM_CP_WFI },
 840    /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
 841     * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
 842     * OMAPCP will override this space.
 843     */
 844    { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
 845      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
 846      .resetvalue = 0 },
 847    { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
 848      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
 849      .resetvalue = 0 },
 850    /* v6 doesn't have the cache ID registers but Linux reads them anyway */
 851    { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
 852      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
 853      .resetvalue = 0 },
 854    /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
 855     * implementing it as RAZ means the "debug architecture version" bits
 856     * will read as a reserved value, which should cause Linux to not try
 857     * to use the debug hardware.
 858     */
 859    { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
 860      .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
 861    /* MMU TLB control. Note that the wildcarding means we cover not just
 862     * the unified TLB ops but also the dside/iside/inner-shareable variants.
 863     */
 864    { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
 865      .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
 866      .type = ARM_CP_NO_RAW },
 867    { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
 868      .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
 869      .type = ARM_CP_NO_RAW },
 870    { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
 871      .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
 872      .type = ARM_CP_NO_RAW },
 873    { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
 874      .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
 875      .type = ARM_CP_NO_RAW },
 876    { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
 877      .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
 878    { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
 879      .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
 880    REGINFO_SENTINEL
 881};
 882
 883static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
 884                        uint64_t value)
 885{
 886    uint32_t mask = 0;
 887
 888    /* In ARMv8 most bits of CPACR_EL1 are RES0. */
 889    if (!arm_feature(env, ARM_FEATURE_V8)) {
 890        /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
 891         * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
 892         * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
 893         */
 894        if (arm_feature(env, ARM_FEATURE_VFP)) {
 895            /* VFP coprocessor: cp10 & cp11 [23:20] */
 896            mask |= (1 << 31) | (1 << 30) | (0xf << 20);
 897
 898            if (!arm_feature(env, ARM_FEATURE_NEON)) {
 899                /* ASEDIS [31] bit is RAO/WI */
 900                value |= (1 << 31);
 901            }
 902
 903            /* VFPv3 and upwards with NEON implement 32 double precision
 904             * registers (D0-D31).
 905             */
 906            if (!arm_feature(env, ARM_FEATURE_NEON) ||
 907                    !arm_feature(env, ARM_FEATURE_VFP3)) {
 908                /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
 909                value |= (1 << 30);
 910            }
 911        }
 912        value &= mask;
 913    }
 914
 915    /*
 916     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
 917     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
 918     */
 919    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
 920        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
 921        value &= ~(0xf << 20);
 922        value |= env->cp15.cpacr_el1 & (0xf << 20);
 923    }
 924
 925    env->cp15.cpacr_el1 = value;
 926}
 927
 928static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
 929{
 930    /*
 931     * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10
 932     * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00.
 933     */
 934    uint64_t value = env->cp15.cpacr_el1;
 935
 936    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
 937        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
 938        value &= ~(0xf << 20);
 939    }
 940    return value;
 941}
 942
 943
 944static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
 945{
 946    /* Call cpacr_write() so that we reset with the correct RAO bits set
 947     * for our CPU features.
 948     */
 949    cpacr_write(env, ri, 0);
 950}
 951
 952static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 953                                   bool isread)
 954{
 955    if (arm_feature(env, ARM_FEATURE_V8)) {
 956        /* Check if CPACR accesses are to be trapped to EL2 */
 957        if (arm_current_el(env) == 1 &&
 958            (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
 959            return CP_ACCESS_TRAP_EL2;
 960        /* Check if CPACR accesses are to be trapped to EL3 */
 961        } else if (arm_current_el(env) < 3 &&
 962                   (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 963            return CP_ACCESS_TRAP_EL3;
 964        }
 965    }
 966
 967    return CP_ACCESS_OK;
 968}
 969
 970static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
 971                                  bool isread)
 972{
 973    /* Check if CPTR accesses are set to trap to EL3 */
 974    if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
 975        return CP_ACCESS_TRAP_EL3;
 976    }
 977
 978    return CP_ACCESS_OK;
 979}
 980
 981static const ARMCPRegInfo v6_cp_reginfo[] = {
 982    /* prefetch by MVA in v6, NOP in v7 */
 983    { .name = "MVA_prefetch",
 984      .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
 985      .access = PL1_W, .type = ARM_CP_NOP },
 986    /* We need to break the TB after ISB to execute self-modifying code
 987     * correctly and also to take any pending interrupts immediately.
 988     * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
 989     */
 990    { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
 991      .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
 992    { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
 993      .access = PL0_W, .type = ARM_CP_NOP },
 994    { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
 995      .access = PL0_W, .type = ARM_CP_NOP },
 996    { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
 997      .access = PL1_RW,
 998      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
 999                             offsetof(CPUARMState, cp15.ifar_ns) },
1000      .resetvalue = 0, },
1001    /* Watchpoint Fault Address Register : should actually only be present
1002     * for 1136, 1176, 11MPCore.
1003     */
1004    { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
1005      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
1006    { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
1007      .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
1008      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
1009      .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
1010    REGINFO_SENTINEL
1011};
1012
1013/* Definitions for the PMU registers */
1014#define PMCRN_MASK  0xf800
1015#define PMCRN_SHIFT 11
1016#define PMCRLC  0x40
1017#define PMCRDP  0x10
1018#define PMCRD   0x8
1019#define PMCRC   0x4
1020#define PMCRP   0x2
1021#define PMCRE   0x1
1022
1023#define PMXEVTYPER_P          0x80000000
1024#define PMXEVTYPER_U          0x40000000
1025#define PMXEVTYPER_NSK        0x20000000
1026#define PMXEVTYPER_NSU        0x10000000
1027#define PMXEVTYPER_NSH        0x08000000
1028#define PMXEVTYPER_M          0x04000000
1029#define PMXEVTYPER_MT         0x02000000
1030#define PMXEVTYPER_EVTCOUNT   0x0000ffff
1031#define PMXEVTYPER_MASK       (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
1032                               PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
1033                               PMXEVTYPER_M | PMXEVTYPER_MT | \
1034                               PMXEVTYPER_EVTCOUNT)
1035
1036#define PMCCFILTR             0xf8000000
1037#define PMCCFILTR_M           PMXEVTYPER_M
1038#define PMCCFILTR_EL0         (PMCCFILTR | PMCCFILTR_M)
1039
1040static inline uint32_t pmu_num_counters(CPUARMState *env)
1041{
1042  return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
1043}
1044
1045/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
1046static inline uint64_t pmu_counter_mask(CPUARMState *env)
1047{
1048  return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
1049}
1050
1051typedef struct pm_event {
1052    uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
1053    /* If the event is supported on this CPU (used to generate PMCEID[01]) */
1054    bool (*supported)(CPUARMState *);
1055    /*
1056     * Retrieve the current count of the underlying event. The programmed
1057     * counters hold a difference from the return value from this function
1058     */
1059    uint64_t (*get_count)(CPUARMState *);
1060    /*
1061     * Return how many nanoseconds it will take (at a minimum) for count events
1062     * to occur. A negative value indicates the counter will never overflow, or
1063     * that the counter has otherwise arranged for the overflow bit to be set
1064     * and the PMU interrupt to be raised on overflow.
1065     */
1066    int64_t (*ns_per_count)(uint64_t);
1067} pm_event;
1068
1069static bool event_always_supported(CPUARMState *env)
1070{
1071    return true;
1072}
1073
1074static uint64_t swinc_get_count(CPUARMState *env)
1075{
1076    /*
1077     * SW_INCR events are written directly to the pmevcntr's by writes to
1078     * PMSWINC, so there is no underlying count maintained by the PMU itself
1079     */
1080    return 0;
1081}
1082
1083static int64_t swinc_ns_per(uint64_t ignored)
1084{
1085    return -1;
1086}
1087
1088/*
1089 * Return the underlying cycle count for the PMU cycle counters. If we're in
1090 * usermode, simply return 0.
1091 */
1092static uint64_t cycles_get_count(CPUARMState *env)
1093{
1094#ifndef CONFIG_USER_ONLY
1095    return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
1096                   ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
1097#else
1098    return cpu_get_host_ticks();
1099#endif
1100}
1101
1102#ifndef CONFIG_USER_ONLY
1103static int64_t cycles_ns_per(uint64_t cycles)
1104{
1105    return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
1106}
1107
1108static bool instructions_supported(CPUARMState *env)
1109{
1110    return use_icount == 1 /* Precise instruction counting */;
1111}
1112
1113static uint64_t instructions_get_count(CPUARMState *env)
1114{
1115    return (uint64_t)cpu_get_icount_raw();
1116}
1117
1118static int64_t instructions_ns_per(uint64_t icount)
1119{
1120    return cpu_icount_to_ns((int64_t)icount);
1121}
1122#endif
1123
1124static const pm_event pm_events[] = {
1125    { .number = 0x000, /* SW_INCR */
1126      .supported = event_always_supported,
1127      .get_count = swinc_get_count,
1128      .ns_per_count = swinc_ns_per,
1129    },
1130#ifndef CONFIG_USER_ONLY
1131    { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
1132      .supported = instructions_supported,
1133      .get_count = instructions_get_count,
1134      .ns_per_count = instructions_ns_per,
1135    },
1136    { .number = 0x011, /* CPU_CYCLES, Cycle */
1137      .supported = event_always_supported,
1138      .get_count = cycles_get_count,
1139      .ns_per_count = cycles_ns_per,
1140    }
1141#endif
1142};
1143
1144/*
1145 * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
1146 * events (i.e. the statistical profiling extension), this implementation
1147 * should first be updated to something sparse instead of the current
1148 * supported_event_map[] array.
1149 */
1150#define MAX_EVENT_ID 0x11
1151#define UNSUPPORTED_EVENT UINT16_MAX
1152static uint16_t supported_event_map[MAX_EVENT_ID + 1];
1153
1154/*
1155 * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
1156 * of ARM event numbers to indices in our pm_events array.
1157 *
1158 * Note: Events in the 0x40XX range are not currently supported.
1159 */
1160void pmu_init(ARMCPU *cpu)
1161{
1162    unsigned int i;
1163
1164    /*
1165     * Empty supported_event_map and cpu->pmceid[01] before adding supported
1166     * events to them
1167     */
1168    for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
1169        supported_event_map[i] = UNSUPPORTED_EVENT;
1170    }
1171    cpu->pmceid0 = 0;
1172    cpu->pmceid1 = 0;
1173
1174    for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
1175        const pm_event *cnt = &pm_events[i];
1176        assert(cnt->number <= MAX_EVENT_ID);
1177        /* We do not currently support events in the 0x40xx range */
1178        assert(cnt->number <= 0x3f);
1179
1180        if (cnt->supported(&cpu->env)) {
1181            supported_event_map[cnt->number] = i;
1182            uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
1183            if (cnt->number & 0x20) {
1184                cpu->pmceid1 |= event_mask;
1185            } else {
1186                cpu->pmceid0 |= event_mask;
1187            }
1188        }
1189    }
1190}
1191
1192/*
1193 * Check at runtime whether a PMU event is supported for the current machine
1194 */
1195static bool event_supported(uint16_t number)
1196{
1197    if (number > MAX_EVENT_ID) {
1198        return false;
1199    }
1200    return supported_event_map[number] != UNSUPPORTED_EVENT;
1201}
1202
1203static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
1204                                   bool isread)
1205{
1206    /* Performance monitor registers user accessibility is controlled
1207     * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
1208     * trapping to EL2 or EL3 for other accesses.
1209     */
1210    int el = arm_current_el(env);
1211
1212    if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
1213        return CP_ACCESS_TRAP;
1214    }
1215    if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
1216        && !arm_is_secure_below_el3(env)) {
1217        return CP_ACCESS_TRAP_EL2;
1218    }
1219    if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
1220        return CP_ACCESS_TRAP_EL3;
1221    }
1222
1223    return CP_ACCESS_OK;
1224}
1225
1226static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
1227                                           const ARMCPRegInfo *ri,
1228                                           bool isread)
1229{
1230    /* ER: event counter read trap control */
1231    if (arm_feature(env, ARM_FEATURE_V8)
1232        && arm_current_el(env) == 0
1233        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
1234        && isread) {
1235        return CP_ACCESS_OK;
1236    }
1237
1238    return pmreg_access(env, ri, isread);
1239}
1240
1241static CPAccessResult pmreg_access_swinc(CPUARMState *env,
1242                                         const ARMCPRegInfo *ri,
1243                                         bool isread)
1244{
1245    /* SW: software increment write trap control */
1246    if (arm_feature(env, ARM_FEATURE_V8)
1247        && arm_current_el(env) == 0
1248        && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
1249        && !isread) {
1250        return CP_ACCESS_OK;
1251    }
1252
1253    return pmreg_access(env, ri, isread);
1254}
1255
1256static CPAccessResult pmreg_access_selr(CPUARMState *env,
1257                                        const ARMCPRegInfo *ri,
1258                                        bool isread)
1259{
1260    /* ER: event counter read trap control */
1261    if (arm_feature(env, ARM_FEATURE_V8)
1262        && arm_current_el(env) == 0
1263        && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
1264        return CP_ACCESS_OK;
1265    }
1266
1267    return pmreg_access(env, ri, isread);
1268}
1269
1270static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
1271                                         const ARMCPRegInfo *ri,
1272                                         bool isread)
1273{
1274    /* CR: cycle counter read trap control */
1275    if (arm_feature(env, ARM_FEATURE_V8)
1276        && arm_current_el(env) == 0
1277        && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
1278        && isread) {
1279        return CP_ACCESS_OK;
1280    }
1281
1282    return pmreg_access(env, ri, isread);
1283}
1284
1285/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
1286 * the current EL, security state, and register configuration.
1287 */
1288static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
1289{
1290    uint64_t filter;
1291    bool e, p, u, nsk, nsu, nsh, m;
1292    bool enabled, prohibited, filtered;
1293    bool secure = arm_is_secure(env);
1294    int el = arm_current_el(env);
1295    uint8_t hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
1296
1297    if (!arm_feature(env, ARM_FEATURE_PMU)) {
1298        return false;
1299    }
1300
1301    if (!arm_feature(env, ARM_FEATURE_EL2) ||
1302            (counter < hpmn || counter == 31)) {
1303        e = env->cp15.c9_pmcr & PMCRE;
1304    } else {
1305        e = env->cp15.mdcr_el2 & MDCR_HPME;
1306    }
1307    enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
1308
1309    if (!secure) {
1310        if (el == 2 && (counter < hpmn || counter == 31)) {
1311            prohibited = env->cp15.mdcr_el2 & MDCR_HPMD;
1312        } else {
1313            prohibited = false;
1314        }
1315    } else {
1316        prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
1317           (env->cp15.mdcr_el3 & MDCR_SPME);
1318    }
1319
1320    if (prohibited && counter == 31) {
1321        prohibited = env->cp15.c9_pmcr & PMCRDP;
1322    }
1323
1324    if (counter == 31) {
1325        filter = env->cp15.pmccfiltr_el0;
1326    } else {
1327        filter = env->cp15.c14_pmevtyper[counter];
1328    }
1329
1330    p   = filter & PMXEVTYPER_P;
1331    u   = filter & PMXEVTYPER_U;
1332    nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
1333    nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
1334    nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
1335    m   = arm_el_is_aa64(env, 1) &&
1336              arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
1337
1338    if (el == 0) {
1339        filtered = secure ? u : u != nsu;
1340    } else if (el == 1) {
1341        filtered = secure ? p : p != nsk;
1342    } else if (el == 2) {
1343        filtered = !nsh;
1344    } else { /* EL3 */
1345        filtered = m != p;
1346    }
1347
1348    if (counter != 31) {
1349        /*
1350         * If not checking PMCCNTR, ensure the counter is setup to an event we
1351         * support
1352         */
1353        uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
1354        if (!event_supported(event)) {
1355            return false;
1356        }
1357    }
1358
1359    return enabled && !prohibited && !filtered;
1360}
1361
1362static void pmu_update_irq(CPUARMState *env)
1363{
1364    ARMCPU *cpu = env_archcpu(env);
1365    qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
1366            (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
1367}
1368
1369/*
1370 * Ensure c15_ccnt is the guest-visible count so that operations such as
1371 * enabling/disabling the counter or filtering, modifying the count itself,
1372 * etc. can be done logically. This is essentially a no-op if the counter is
1373 * not enabled at the time of the call.
1374 */
1375static void pmccntr_op_start(CPUARMState *env)
1376{
1377    uint64_t cycles = cycles_get_count(env);
1378
1379    if (pmu_counter_enabled(env, 31)) {
1380        uint64_t eff_cycles = cycles;
1381        if (env->cp15.c9_pmcr & PMCRD) {
1382            /* Increment once every 64 processor clock cycles */
1383            eff_cycles /= 64;
1384        }
1385
1386        uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
1387
1388        uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
1389                                 1ull << 63 : 1ull << 31;
1390        if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
1391            env->cp15.c9_pmovsr |= (1 << 31);
1392            pmu_update_irq(env);
1393        }
1394
1395        env->cp15.c15_ccnt = new_pmccntr;
1396    }
1397    env->cp15.c15_ccnt_delta = cycles;
1398}
1399
1400/*
1401 * If PMCCNTR is enabled, recalculate the delta between the clock and the
1402 * guest-visible count. A call to pmccntr_op_finish should follow every call to
1403 * pmccntr_op_start.
1404 */
1405static void pmccntr_op_finish(CPUARMState *env)
1406{
1407    if (pmu_counter_enabled(env, 31)) {
1408#ifndef CONFIG_USER_ONLY
1409        /* Calculate when the counter will next overflow */
1410        uint64_t remaining_cycles = -env->cp15.c15_ccnt;
1411        if (!(env->cp15.c9_pmcr & PMCRLC)) {
1412            remaining_cycles = (uint32_t)remaining_cycles;
1413        }
1414        int64_t overflow_in = cycles_ns_per(remaining_cycles);
1415
1416        if (overflow_in > 0) {
1417            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1418                overflow_in;
1419            ARMCPU *cpu = env_archcpu(env);
1420            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1421        }
1422#endif
1423
1424        uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
1425        if (env->cp15.c9_pmcr & PMCRD) {
1426            /* Increment once every 64 processor clock cycles */
1427            prev_cycles /= 64;
1428        }
1429        env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
1430    }
1431}
1432
1433static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
1434{
1435
1436    uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1437    uint64_t count = 0;
1438    if (event_supported(event)) {
1439        uint16_t event_idx = supported_event_map[event];
1440        count = pm_events[event_idx].get_count(env);
1441    }
1442
1443    if (pmu_counter_enabled(env, counter)) {
1444        uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
1445
1446        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
1447            env->cp15.c9_pmovsr |= (1 << counter);
1448            pmu_update_irq(env);
1449        }
1450        env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
1451    }
1452    env->cp15.c14_pmevcntr_delta[counter] = count;
1453}
1454
1455static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
1456{
1457    if (pmu_counter_enabled(env, counter)) {
1458#ifndef CONFIG_USER_ONLY
1459        uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
1460        uint16_t event_idx = supported_event_map[event];
1461        uint64_t delta = UINT32_MAX -
1462            (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
1463        int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
1464
1465        if (overflow_in > 0) {
1466            int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1467                overflow_in;
1468            ARMCPU *cpu = env_archcpu(env);
1469            timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
1470        }
1471#endif
1472
1473        env->cp15.c14_pmevcntr_delta[counter] -=
1474            env->cp15.c14_pmevcntr[counter];
1475    }
1476}
1477
1478void pmu_op_start(CPUARMState *env)
1479{
1480    unsigned int i;
1481    pmccntr_op_start(env);
1482    for (i = 0; i < pmu_num_counters(env); i++) {
1483        pmevcntr_op_start(env, i);
1484    }
1485}
1486
1487void pmu_op_finish(CPUARMState *env)
1488{
1489    unsigned int i;
1490    pmccntr_op_finish(env);
1491    for (i = 0; i < pmu_num_counters(env); i++) {
1492        pmevcntr_op_finish(env, i);
1493    }
1494}
1495
1496void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
1497{
1498    pmu_op_start(&cpu->env);
1499}
1500
1501void pmu_post_el_change(ARMCPU *cpu, void *ignored)
1502{
1503    pmu_op_finish(&cpu->env);
1504}
1505
1506void arm_pmu_timer_cb(void *opaque)
1507{
1508    ARMCPU *cpu = opaque;
1509
1510    /*
1511     * Update all the counter values based on the current underlying counts,
1512     * triggering interrupts to be raised, if necessary. pmu_op_finish() also
1513     * has the effect of setting the cpu->pmu_timer to the next earliest time a
1514     * counter may expire.
1515     */
1516    pmu_op_start(&cpu->env);
1517    pmu_op_finish(&cpu->env);
1518}
1519
1520static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1521                       uint64_t value)
1522{
1523    pmu_op_start(env);
1524
1525    if (value & PMCRC) {
1526        /* The counter has been reset */
1527        env->cp15.c15_ccnt = 0;
1528    }
1529
1530    if (value & PMCRP) {
1531        unsigned int i;
1532        for (i = 0; i < pmu_num_counters(env); i++) {
1533            env->cp15.c14_pmevcntr[i] = 0;
1534        }
1535    }
1536
1537    /* only the DP, X, D and E bits are writable */
1538    env->cp15.c9_pmcr &= ~0x39;
1539    env->cp15.c9_pmcr |= (value & 0x39);
1540
1541    pmu_op_finish(env);
1542}
1543
1544static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
1545                          uint64_t value)
1546{
1547    unsigned int i;
1548    for (i = 0; i < pmu_num_counters(env); i++) {
1549        /* Increment a counter's count iff: */
1550        if ((value & (1 << i)) && /* counter's bit is set */
1551                /* counter is enabled and not filtered */
1552                pmu_counter_enabled(env, i) &&
1553                /* counter is SW_INCR */
1554                (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
1555            pmevcntr_op_start(env, i);
1556
1557            /*
1558             * Detect if this write causes an overflow since we can't predict
1559             * PMSWINC overflows like we can for other events
1560             */
1561            uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
1562
1563            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
1564                env->cp15.c9_pmovsr |= (1 << i);
1565                pmu_update_irq(env);
1566            }
1567
1568            env->cp15.c14_pmevcntr[i] = new_pmswinc;
1569
1570            pmevcntr_op_finish(env, i);
1571        }
1572    }
1573}
1574
1575static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1576{
1577    uint64_t ret;
1578    pmccntr_op_start(env);
1579    ret = env->cp15.c15_ccnt;
1580    pmccntr_op_finish(env);
1581    return ret;
1582}
1583
1584static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1585                         uint64_t value)
1586{
1587    /* The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
1588     * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
1589     * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
1590     * accessed.
1591     */
1592    env->cp15.c9_pmselr = value & 0x1f;
1593}
1594
1595static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1596                        uint64_t value)
1597{
1598    pmccntr_op_start(env);
1599    env->cp15.c15_ccnt = value;
1600    pmccntr_op_finish(env);
1601}
1602
1603static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
1604                            uint64_t value)
1605{
1606    uint64_t cur_val = pmccntr_read(env, NULL);
1607
1608    pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
1609}
1610
1611static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1612                            uint64_t value)
1613{
1614    pmccntr_op_start(env);
1615    env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
1616    pmccntr_op_finish(env);
1617}
1618
1619static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
1620                            uint64_t value)
1621{
1622    pmccntr_op_start(env);
1623    /* M is not accessible from AArch32 */
1624    env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
1625        (value & PMCCFILTR);
1626    pmccntr_op_finish(env);
1627}
1628
1629static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
1630{
1631    /* M is not visible in AArch32 */
1632    return env->cp15.pmccfiltr_el0 & PMCCFILTR;
1633}
1634
1635static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1636                            uint64_t value)
1637{
1638    value &= pmu_counter_mask(env);
1639    env->cp15.c9_pmcnten |= value;
1640}
1641
1642static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1643                             uint64_t value)
1644{
1645    value &= pmu_counter_mask(env);
1646    env->cp15.c9_pmcnten &= ~value;
1647}
1648
1649static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1650                         uint64_t value)
1651{
1652    value &= pmu_counter_mask(env);
1653    env->cp15.c9_pmovsr &= ~value;
1654    pmu_update_irq(env);
1655}
1656
1657static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1658                         uint64_t value)
1659{
1660    value &= pmu_counter_mask(env);
1661    env->cp15.c9_pmovsr |= value;
1662    pmu_update_irq(env);
1663}
1664
1665static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1666                             uint64_t value, const uint8_t counter)
1667{
1668    if (counter == 31) {
1669        pmccfiltr_write(env, ri, value);
1670    } else if (counter < pmu_num_counters(env)) {
1671        pmevcntr_op_start(env, counter);
1672
1673        /*
1674         * If this counter's event type is changing, store the current
1675         * underlying count for the new type in c14_pmevcntr_delta[counter] so
1676         * pmevcntr_op_finish has the correct baseline when it converts back to
1677         * a delta.
1678         */
1679        uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
1680            PMXEVTYPER_EVTCOUNT;
1681        uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
1682        if (old_event != new_event) {
1683            uint64_t count = 0;
1684            if (event_supported(new_event)) {
1685                uint16_t event_idx = supported_event_map[new_event];
1686                count = pm_events[event_idx].get_count(env);
1687            }
1688            env->cp15.c14_pmevcntr_delta[counter] = count;
1689        }
1690
1691        env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
1692        pmevcntr_op_finish(env, counter);
1693    }
1694    /* Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
1695     * PMSELR value is equal to or greater than the number of implemented
1696     * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
1697     */
1698}
1699
1700static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
1701                               const uint8_t counter)
1702{
1703    if (counter == 31) {
1704        return env->cp15.pmccfiltr_el0;
1705    } else if (counter < pmu_num_counters(env)) {
1706        return env->cp15.c14_pmevtyper[counter];
1707    } else {
1708      /*
1709       * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
1710       * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
1711       */
1712        return 0;
1713    }
1714}
1715
1716static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1717                              uint64_t value)
1718{
1719    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1720    pmevtyper_write(env, ri, value, counter);
1721}
1722
1723static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1724                               uint64_t value)
1725{
1726    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1727    env->cp15.c14_pmevtyper[counter] = value;
1728
1729    /*
1730     * pmevtyper_rawwrite is called between a pair of pmu_op_start and
1731     * pmu_op_finish calls when loading saved state for a migration. Because
1732     * we're potentially updating the type of event here, the value written to
1733     * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a
1734     * different counter type. Therefore, we need to set this value to the
1735     * current count for the counter type we're writing so that pmu_op_finish
1736     * has the correct count for its calculation.
1737     */
1738    uint16_t event = value & PMXEVTYPER_EVTCOUNT;
1739    if (event_supported(event)) {
1740        uint16_t event_idx = supported_event_map[event];
1741        env->cp15.c14_pmevcntr_delta[counter] =
1742            pm_events[event_idx].get_count(env);
1743    }
1744}
1745
1746static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1747{
1748    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1749    return pmevtyper_read(env, ri, counter);
1750}
1751
1752static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
1753                             uint64_t value)
1754{
1755    pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
1756}
1757
1758static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
1759{
1760    return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
1761}
1762
1763static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1764                             uint64_t value, uint8_t counter)
1765{
1766    if (counter < pmu_num_counters(env)) {
1767        pmevcntr_op_start(env, counter);
1768        env->cp15.c14_pmevcntr[counter] = value;
1769        pmevcntr_op_finish(env, counter);
1770    }
1771    /*
1772     * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1773     * are CONSTRAINED UNPREDICTABLE.
1774     */
1775}
1776
1777static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
1778                              uint8_t counter)
1779{
1780    if (counter < pmu_num_counters(env)) {
1781        uint64_t ret;
1782        pmevcntr_op_start(env, counter);
1783        ret = env->cp15.c14_pmevcntr[counter];
1784        pmevcntr_op_finish(env, counter);
1785        return ret;
1786    } else {
1787      /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
1788       * are CONSTRAINED UNPREDICTABLE. */
1789        return 0;
1790    }
1791}
1792
1793static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
1794                             uint64_t value)
1795{
1796    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1797    pmevcntr_write(env, ri, value, counter);
1798}
1799
1800static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
1801{
1802    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1803    return pmevcntr_read(env, ri, counter);
1804}
1805
1806static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
1807                             uint64_t value)
1808{
1809    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1810    assert(counter < pmu_num_counters(env));
1811    env->cp15.c14_pmevcntr[counter] = value;
1812    pmevcntr_write(env, ri, value, counter);
1813}
1814
1815static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
1816{
1817    uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
1818    assert(counter < pmu_num_counters(env));
1819    return env->cp15.c14_pmevcntr[counter];
1820}
1821
1822static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1823                             uint64_t value)
1824{
1825    pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
1826}
1827
1828static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1829{
1830    return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
1831}
1832
1833static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1834                            uint64_t value)
1835{
1836    if (arm_feature(env, ARM_FEATURE_V8)) {
1837        env->cp15.c9_pmuserenr = value & 0xf;
1838    } else {
1839        env->cp15.c9_pmuserenr = value & 1;
1840    }
1841}
1842
1843static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
1844                             uint64_t value)
1845{
1846    /* We have no event counters so only the C bit can be changed */
1847    value &= pmu_counter_mask(env);
1848    env->cp15.c9_pminten |= value;
1849    pmu_update_irq(env);
1850}
1851
1852static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1853                             uint64_t value)
1854{
1855    value &= pmu_counter_mask(env);
1856    env->cp15.c9_pminten &= ~value;
1857    pmu_update_irq(env);
1858}
1859
1860static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
1861                       uint64_t value)
1862{
1863    /* Note that even though the AArch64 view of this register has bits
1864     * [10:0] all RES0 we can only mask the bottom 5, to comply with the
1865     * architectural requirements for bits which are RES0 only in some
1866     * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
1867     * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
1868     */
1869    raw_write(env, ri, value & ~0x1FULL);
1870}
1871
1872static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
1873{
1874    /* Begin with base v8.0 state.  */
1875    uint32_t valid_mask = 0x3fff;
1876    ARMCPU *cpu = env_archcpu(env);
1877
1878    if (arm_el_is_aa64(env, 3)) {
1879        value |= SCR_FW | SCR_AW;   /* these two bits are RES1.  */
1880        valid_mask &= ~SCR_NET;
1881    } else {
1882        valid_mask &= ~(SCR_RW | SCR_ST);
1883    }
1884
1885    if (!arm_feature(env, ARM_FEATURE_EL2)) {
1886        valid_mask &= ~SCR_HCE;
1887
1888        /* On ARMv7, SMD (or SCD as it is called in v7) is only
1889         * supported if EL2 exists. The bit is UNK/SBZP when
1890         * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
1891         * when EL2 is unavailable.
1892         * On ARMv8, this bit is always available.
1893         */
1894        if (arm_feature(env, ARM_FEATURE_V7) &&
1895            !arm_feature(env, ARM_FEATURE_V8)) {
1896            valid_mask &= ~SCR_SMD;
1897        }
1898    }
1899    if (cpu_isar_feature(aa64_lor, cpu)) {
1900        valid_mask |= SCR_TLOR;
1901    }
1902    if (cpu_isar_feature(aa64_pauth, cpu)) {
1903        valid_mask |= SCR_API | SCR_APK;
1904    }
1905
1906    /* Clear all-context RES0 bits.  */
1907    value &= valid_mask;
1908    raw_write(env, ri, value);
1909}
1910
1911static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1912{
1913    ARMCPU *cpu = env_archcpu(env);
1914
1915    /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
1916     * bank
1917     */
1918    uint32_t index = A32_BANKED_REG_GET(env, csselr,
1919                                        ri->secure & ARM_CP_SECSTATE_S);
1920
1921    return cpu->ccsidr[index];
1922}
1923
1924static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
1925                         uint64_t value)
1926{
1927    raw_write(env, ri, value & 0xf);
1928}
1929
1930static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1931{
1932    CPUState *cs = env_cpu(env);
1933    uint64_t hcr_el2 = arm_hcr_el2_eff(env);
1934    uint64_t ret = 0;
1935
1936    if (hcr_el2 & HCR_IMO) {
1937        if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
1938            ret |= CPSR_I;
1939        }
1940    } else {
1941        if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
1942            ret |= CPSR_I;
1943        }
1944    }
1945
1946    if (hcr_el2 & HCR_FMO) {
1947        if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
1948            ret |= CPSR_F;
1949        }
1950    } else {
1951        if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
1952            ret |= CPSR_F;
1953        }
1954    }
1955
1956    /* External aborts are not possible in QEMU so A bit is always clear */
1957    return ret;
1958}
1959
1960static const ARMCPRegInfo v7_cp_reginfo[] = {
1961    /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
1962    { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
1963      .access = PL1_W, .type = ARM_CP_NOP },
1964    /* Performance monitors are implementation defined in v7,
1965     * but with an ARM recommended set of registers, which we
1966     * follow.
1967     *
1968     * Performance registers fall into three categories:
1969     *  (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
1970     *  (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
1971     *  (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
1972     * For the cases controlled by PMUSERENR we must set .access to PL0_RW
1973     * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
1974     */
1975    { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
1976      .access = PL0_RW, .type = ARM_CP_ALIAS,
1977      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1978      .writefn = pmcntenset_write,
1979      .accessfn = pmreg_access,
1980      .raw_writefn = raw_write },
1981    { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
1982      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
1983      .access = PL0_RW, .accessfn = pmreg_access,
1984      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
1985      .writefn = pmcntenset_write, .raw_writefn = raw_write },
1986    { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
1987      .access = PL0_RW,
1988      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
1989      .accessfn = pmreg_access,
1990      .writefn = pmcntenclr_write,
1991      .type = ARM_CP_ALIAS },
1992    { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
1993      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
1994      .access = PL0_RW, .accessfn = pmreg_access,
1995      .type = ARM_CP_ALIAS,
1996      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
1997      .writefn = pmcntenclr_write },
1998    { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
1999      .access = PL0_RW, .type = ARM_CP_IO,
2000      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2001      .accessfn = pmreg_access,
2002      .writefn = pmovsr_write,
2003      .raw_writefn = raw_write },
2004    { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
2005      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
2006      .access = PL0_RW, .accessfn = pmreg_access,
2007      .type = ARM_CP_ALIAS | ARM_CP_IO,
2008      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2009      .writefn = pmovsr_write,
2010      .raw_writefn = raw_write },
2011    { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
2012      .access = PL0_W, .accessfn = pmreg_access_swinc,
2013      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2014      .writefn = pmswinc_write },
2015    { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
2016      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
2017      .access = PL0_W, .accessfn = pmreg_access_swinc,
2018      .type = ARM_CP_NO_RAW | ARM_CP_IO,
2019      .writefn = pmswinc_write },
2020    { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
2021      .access = PL0_RW, .type = ARM_CP_ALIAS,
2022      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
2023      .accessfn = pmreg_access_selr, .writefn = pmselr_write,
2024      .raw_writefn = raw_write},
2025    { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
2026      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
2027      .access = PL0_RW, .accessfn = pmreg_access_selr,
2028      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
2029      .writefn = pmselr_write, .raw_writefn = raw_write, },
2030    { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
2031      .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
2032      .readfn = pmccntr_read, .writefn = pmccntr_write32,
2033      .accessfn = pmreg_access_ccntr },
2034    { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
2035      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
2036      .access = PL0_RW, .accessfn = pmreg_access_ccntr,
2037      .type = ARM_CP_IO,
2038      .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
2039      .readfn = pmccntr_read, .writefn = pmccntr_write,
2040      .raw_readfn = raw_read, .raw_writefn = raw_write, },
2041    { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
2042      .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
2043      .access = PL0_RW, .accessfn = pmreg_access,
2044      .type = ARM_CP_ALIAS | ARM_CP_IO,
2045      .resetvalue = 0, },
2046    { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
2047      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
2048      .writefn = pmccfiltr_write, .raw_writefn = raw_write,
2049      .access = PL0_RW, .accessfn = pmreg_access,
2050      .type = ARM_CP_IO,
2051      .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
2052      .resetvalue = 0, },
2053    { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
2054      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2055      .accessfn = pmreg_access,
2056      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2057    { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
2058      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
2059      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2060      .accessfn = pmreg_access,
2061      .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
2062    { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
2063      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2064      .accessfn = pmreg_access_xevcntr,
2065      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2066    { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
2067      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
2068      .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2069      .accessfn = pmreg_access_xevcntr,
2070      .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
2071    { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
2072      .access = PL0_R | PL1_RW, .accessfn = access_tpm,
2073      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
2074      .resetvalue = 0,
2075      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2076    { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
2077      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
2078      .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
2079      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
2080      .resetvalue = 0,
2081      .writefn = pmuserenr_write, .raw_writefn = raw_write },
2082    { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
2083      .access = PL1_RW, .accessfn = access_tpm,
2084      .type = ARM_CP_ALIAS | ARM_CP_IO,
2085      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
2086      .resetvalue = 0,
2087      .writefn = pmintenset_write, .raw_writefn = raw_write },
2088    { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
2089      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
2090      .access = PL1_RW, .accessfn = access_tpm,
2091      .type = ARM_CP_IO,
2092      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2093      .writefn = pmintenset_write, .raw_writefn = raw_write,
2094      .resetvalue = 0x0 },
2095    { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
2096      .access = PL1_RW, .accessfn = access_tpm,
2097      .type = ARM_CP_ALIAS | ARM_CP_IO,
2098      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2099      .writefn = pmintenclr_write, },
2100    { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
2101      .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
2102      .access = PL1_RW, .accessfn = access_tpm,
2103      .type = ARM_CP_ALIAS | ARM_CP_IO,
2104      .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
2105      .writefn = pmintenclr_write },
2106    { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
2107      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
2108      .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
2109    { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
2110      .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
2111      .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
2112      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
2113                             offsetof(CPUARMState, cp15.csselr_ns) } },
2114    /* Auxiliary ID register: this actually has an IMPDEF value but for now
2115     * just RAZ for all cores:
2116     */
2117    { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
2118      .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
2119      .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
2120    /* Auxiliary fault status registers: these also are IMPDEF, and we
2121     * choose to RAZ/WI for all cores.
2122     */
2123    { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
2124      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
2125      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2126    { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
2127      .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
2128      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
2129    /* MAIR can just read-as-written because we don't implement caches
2130     * and so don't need to care about memory attributes.
2131     */
2132    { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
2133      .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
2134      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
2135      .resetvalue = 0 },
2136    { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
2137      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
2138      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
2139      .resetvalue = 0 },
2140    /* For non-long-descriptor page tables these are PRRR and NMRR;
2141     * regardless they still act as reads-as-written for QEMU.
2142     */
2143     /* MAIR0/1 are defined separately from their 64-bit counterpart which
2144      * allows them to assign the correct fieldoffset based on the endianness
2145      * handled in the field definitions.
2146      */
2147    { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
2148      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
2149      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
2150                             offsetof(CPUARMState, cp15.mair0_ns) },
2151      .resetfn = arm_cp_reset_ignore },
2152    { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
2153      .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
2154      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
2155                             offsetof(CPUARMState, cp15.mair1_ns) },
2156      .resetfn = arm_cp_reset_ignore },
2157    { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
2158      .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
2159      .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
2160    /* 32 bit ITLB invalidates */
2161    { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
2162      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2163    { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
2164      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2165    { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
2166      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2167    /* 32 bit DTLB invalidates */
2168    { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
2169      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2170    { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
2171      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2172    { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
2173      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2174    /* 32 bit TLB invalidates */
2175    { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
2176      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
2177    { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
2178      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
2179    { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
2180      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
2181    { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
2182      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
2183    REGINFO_SENTINEL
2184};
2185
2186static const ARMCPRegInfo v7mp_cp_reginfo[] = {
2187    /* 32 bit TLB invalidates, Inner Shareable */
2188    { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
2189      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
2190    { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
2191      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
2192    { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
2193      .type = ARM_CP_NO_RAW, .access = PL1_W,
2194      .writefn = tlbiasid_is_write },
2195    { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
2196      .type = ARM_CP_NO_RAW, .access = PL1_W,
2197      .writefn = tlbimvaa_is_write },
2198    REGINFO_SENTINEL
2199};
2200
2201static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
2202    /* PMOVSSET is not implemented in v7 before v7ve */
2203    { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
2204      .access = PL0_RW, .accessfn = pmreg_access,
2205      .type = ARM_CP_ALIAS | ARM_CP_IO,
2206      .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
2207      .writefn = pmovsset_write,
2208      .raw_writefn = raw_write },
2209    { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
2210      .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
2211      .access = PL0_RW, .accessfn = pmreg_access,
2212      .type = ARM_CP_ALIAS | ARM_CP_IO,
2213      .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
2214      .writefn = pmovsset_write,
2215      .raw_writefn = raw_write },
2216    REGINFO_SENTINEL
2217};
2218
2219static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
2220                        uint64_t value)
2221{
2222    value &= 1;
2223    env->teecr = value;
2224}
2225
2226static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
2227                                    bool isread)
2228{
2229    if (arm_current_el(env) == 0 && (env->teecr & 1)) {
2230        return CP_ACCESS_TRAP;
2231    }
2232    return CP_ACCESS_OK;
2233}
2234
2235static const ARMCPRegInfo t2ee_cp_reginfo[] = {
2236    { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
2237      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
2238      .resetvalue = 0,
2239      .writefn = teecr_write },
2240    { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
2241      .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
2242      .accessfn = teehbr_access, .resetvalue = 0 },
2243    REGINFO_SENTINEL
2244};
2245
2246static const ARMCPRegInfo v6k_cp_reginfo[] = {
2247    { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
2248      .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
2249      .access = PL0_RW,
2250      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
2251    { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
2252      .access = PL0_RW,
2253      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
2254                             offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
2255      .resetfn = arm_cp_reset_ignore },
2256    { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
2257      .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
2258      .access = PL0_R|PL1_W,
2259      .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
2260      .resetvalue = 0},
2261    { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
2262      .access = PL0_R|PL1_W,
2263      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
2264                             offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
2265      .resetfn = arm_cp_reset_ignore },
2266    { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
2267      .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
2268      .access = PL1_RW,
2269      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
2270    { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
2271      .access = PL1_RW,
2272      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
2273                             offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
2274      .resetvalue = 0 },
2275    REGINFO_SENTINEL
2276};
2277
2278#ifndef CONFIG_USER_ONLY
2279
2280static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
2281                                       bool isread)
2282{
2283    /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
2284     * Writable only at the highest implemented exception level.
2285     */
2286    int el = arm_current_el(env);
2287
2288    switch (el) {
2289    case 0:
2290        if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
2291            return CP_ACCESS_TRAP;
2292        }
2293        break;
2294    case 1:
2295        if (!isread && ri->state == ARM_CP_STATE_AA32 &&
2296            arm_is_secure_below_el3(env)) {
2297            /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
2298            return CP_ACCESS_TRAP_UNCATEGORIZED;
2299        }
2300        break;
2301    case 2:
2302    case 3:
2303        break;
2304    }
2305
2306    if (!isread && el < arm_highest_el(env)) {
2307        return CP_ACCESS_TRAP_UNCATEGORIZED;
2308    }
2309
2310    return CP_ACCESS_OK;
2311}
2312
2313static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
2314                                        bool isread)
2315{
2316    unsigned int cur_el = arm_current_el(env);
2317    bool secure = arm_is_secure(env);
2318
2319    /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
2320    if (cur_el == 0 &&
2321        !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
2322        return CP_ACCESS_TRAP;
2323    }
2324
2325    if (arm_feature(env, ARM_FEATURE_EL2) &&
2326        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2327        !extract32(env->cp15.cnthctl_el2, 0, 1)) {
2328        return CP_ACCESS_TRAP_EL2;
2329    }
2330    return CP_ACCESS_OK;
2331}
2332
2333static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
2334                                      bool isread)
2335{
2336    unsigned int cur_el = arm_current_el(env);
2337    bool secure = arm_is_secure(env);
2338
2339    /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
2340     * EL0[PV]TEN is zero.
2341     */
2342    if (cur_el == 0 &&
2343        !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
2344        return CP_ACCESS_TRAP;
2345    }
2346
2347    if (arm_feature(env, ARM_FEATURE_EL2) &&
2348        timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
2349        !extract32(env->cp15.cnthctl_el2, 1, 1)) {
2350        return CP_ACCESS_TRAP_EL2;
2351    }
2352    return CP_ACCESS_OK;
2353}
2354
2355static CPAccessResult gt_pct_access(CPUARMState *env,
2356                                    const ARMCPRegInfo *ri,
2357                                    bool isread)
2358{
2359    return gt_counter_access(env, GTIMER_PHYS, isread);
2360}
2361
2362static CPAccessResult gt_vct_access(CPUARMState *env,
2363                                    const ARMCPRegInfo *ri,
2364                                    bool isread)
2365{
2366    return gt_counter_access(env, GTIMER_VIRT, isread);
2367}
2368
2369static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2370                                       bool isread)
2371{
2372    return gt_timer_access(env, GTIMER_PHYS, isread);
2373}
2374
2375static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
2376                                       bool isread)
2377{
2378    return gt_timer_access(env, GTIMER_VIRT, isread);
2379}
2380
2381static CPAccessResult gt_stimer_access(CPUARMState *env,
2382                                       const ARMCPRegInfo *ri,
2383                                       bool isread)
2384{
2385    /* The AArch64 register view of the secure physical timer is
2386     * always accessible from EL3, and configurably accessible from
2387     * Secure EL1.
2388     */
2389    switch (arm_current_el(env)) {
2390    case 1:
2391        if (!arm_is_secure(env)) {
2392            return CP_ACCESS_TRAP;
2393        }
2394        if (!(env->cp15.scr_el3 & SCR_ST)) {
2395            return CP_ACCESS_TRAP_EL3;
2396        }
2397        return CP_ACCESS_OK;
2398    case 0:
2399    case 2:
2400        return CP_ACCESS_TRAP;
2401    case 3:
2402        return CP_ACCESS_OK;
2403    default:
2404        g_assert_not_reached();
2405    }
2406}
2407
2408static uint64_t gt_get_countervalue(CPUARMState *env)
2409{
2410    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
2411}
2412
2413static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
2414{
2415    ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
2416
2417    if (gt->ctl & 1) {
2418        /* Timer enabled: calculate and set current ISTATUS, irq, and
2419         * reset timer to when ISTATUS next has to change
2420         */
2421        uint64_t offset = timeridx == GTIMER_VIRT ?
2422                                      cpu->env.cp15.cntvoff_el2 : 0;
2423        uint64_t count = gt_get_countervalue(&cpu->env);
2424        /* Note that this must be unsigned 64 bit arithmetic: */
2425        int istatus = count - offset >= gt->cval;
2426        uint64_t nexttick;
2427        int irqstate;
2428
2429        gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
2430
2431        irqstate = (istatus && !(gt->ctl & 2));
2432        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2433
2434        if (istatus) {
2435            /* Next transition is when count rolls back over to zero */
2436            nexttick = UINT64_MAX;
2437        } else {
2438            /* Next transition is when we hit cval */
2439            nexttick = gt->cval + offset;
2440        }
2441        /* Note that the desired next expiry time might be beyond the
2442         * signed-64-bit range of a QEMUTimer -- in this case we just
2443         * set the timer for as far in the future as possible. When the
2444         * timer expires we will reset the timer for any remaining period.
2445         */
2446        if (nexttick > INT64_MAX / GTIMER_SCALE) {
2447            nexttick = INT64_MAX / GTIMER_SCALE;
2448        }
2449        timer_mod(cpu->gt_timer[timeridx], nexttick);
2450        trace_arm_gt_recalc(timeridx, irqstate, nexttick);
2451    } else {
2452        /* Timer disabled: ISTATUS and timer output always clear */
2453        gt->ctl &= ~4;
2454        qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
2455        timer_del(cpu->gt_timer[timeridx]);
2456        trace_arm_gt_recalc_disabled(timeridx);
2457    }
2458}
2459
2460static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
2461                           int timeridx)
2462{
2463    ARMCPU *cpu = env_archcpu(env);
2464
2465    timer_del(cpu->gt_timer[timeridx]);
2466}
2467
2468static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2469{
2470    return gt_get_countervalue(env);
2471}
2472
2473static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2474{
2475    return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
2476}
2477
2478static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2479                          int timeridx,
2480                          uint64_t value)
2481{
2482    trace_arm_gt_cval_write(timeridx, value);
2483    env->cp15.c14_timer[timeridx].cval = value;
2484    gt_recalc_timer(env_archcpu(env), timeridx);
2485}
2486
2487static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
2488                             int timeridx)
2489{
2490    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2491
2492    return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
2493                      (gt_get_countervalue(env) - offset));
2494}
2495
2496static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2497                          int timeridx,
2498                          uint64_t value)
2499{
2500    uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
2501
2502    trace_arm_gt_tval_write(timeridx, value);
2503    env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
2504                                         sextract64(value, 0, 32);
2505    gt_recalc_timer(env_archcpu(env), timeridx);
2506}
2507
2508static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2509                         int timeridx,
2510                         uint64_t value)
2511{
2512    ARMCPU *cpu = env_archcpu(env);
2513    uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
2514
2515    trace_arm_gt_ctl_write(timeridx, value);
2516    env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
2517    if ((oldval ^ value) & 1) {
2518        /* Enable toggled */
2519        gt_recalc_timer(cpu, timeridx);
2520    } else if ((oldval ^ value) & 2) {
2521        /* IMASK toggled: don't need to recalculate,
2522         * just set the interrupt line based on ISTATUS
2523         */
2524        int irqstate = (oldval & 4) && !(value & 2);
2525
2526        trace_arm_gt_imask_toggle(timeridx, irqstate);
2527        qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
2528    }
2529}
2530
2531static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2532{
2533    gt_timer_reset(env, ri, GTIMER_PHYS);
2534}
2535
2536static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2537                               uint64_t value)
2538{
2539    gt_cval_write(env, ri, GTIMER_PHYS, value);
2540}
2541
2542static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2543{
2544    return gt_tval_read(env, ri, GTIMER_PHYS);
2545}
2546
2547static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2548                               uint64_t value)
2549{
2550    gt_tval_write(env, ri, GTIMER_PHYS, value);
2551}
2552
2553static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2554                              uint64_t value)
2555{
2556    gt_ctl_write(env, ri, GTIMER_PHYS, value);
2557}
2558
2559static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2560{
2561    gt_timer_reset(env, ri, GTIMER_VIRT);
2562}
2563
2564static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2565                               uint64_t value)
2566{
2567    gt_cval_write(env, ri, GTIMER_VIRT, value);
2568}
2569
2570static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2571{
2572    return gt_tval_read(env, ri, GTIMER_VIRT);
2573}
2574
2575static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2576                               uint64_t value)
2577{
2578    gt_tval_write(env, ri, GTIMER_VIRT, value);
2579}
2580
2581static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2582                              uint64_t value)
2583{
2584    gt_ctl_write(env, ri, GTIMER_VIRT, value);
2585}
2586
2587static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
2588                              uint64_t value)
2589{
2590    ARMCPU *cpu = env_archcpu(env);
2591
2592    trace_arm_gt_cntvoff_write(value);
2593    raw_write(env, ri, value);
2594    gt_recalc_timer(cpu, GTIMER_VIRT);
2595}
2596
2597static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2598{
2599    gt_timer_reset(env, ri, GTIMER_HYP);
2600}
2601
2602static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2603                              uint64_t value)
2604{
2605    gt_cval_write(env, ri, GTIMER_HYP, value);
2606}
2607
2608static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2609{
2610    return gt_tval_read(env, ri, GTIMER_HYP);
2611}
2612
2613static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2614                              uint64_t value)
2615{
2616    gt_tval_write(env, ri, GTIMER_HYP, value);
2617}
2618
2619static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2620                              uint64_t value)
2621{
2622    gt_ctl_write(env, ri, GTIMER_HYP, value);
2623}
2624
2625static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
2626{
2627    gt_timer_reset(env, ri, GTIMER_SEC);
2628}
2629
2630static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2631                              uint64_t value)
2632{
2633    gt_cval_write(env, ri, GTIMER_SEC, value);
2634}
2635
2636static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
2637{
2638    return gt_tval_read(env, ri, GTIMER_SEC);
2639}
2640
2641static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
2642                              uint64_t value)
2643{
2644    gt_tval_write(env, ri, GTIMER_SEC, value);
2645}
2646
2647static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
2648                              uint64_t value)
2649{
2650    gt_ctl_write(env, ri, GTIMER_SEC, value);
2651}
2652
2653void arm_gt_ptimer_cb(void *opaque)
2654{
2655    ARMCPU *cpu = opaque;
2656
2657    gt_recalc_timer(cpu, GTIMER_PHYS);
2658}
2659
2660void arm_gt_vtimer_cb(void *opaque)
2661{
2662    ARMCPU *cpu = opaque;
2663
2664    gt_recalc_timer(cpu, GTIMER_VIRT);
2665}
2666
2667void arm_gt_htimer_cb(void *opaque)
2668{
2669    ARMCPU *cpu = opaque;
2670
2671    gt_recalc_timer(cpu, GTIMER_HYP);
2672}
2673
2674void arm_gt_stimer_cb(void *opaque)
2675{
2676    ARMCPU *cpu = opaque;
2677
2678    gt_recalc_timer(cpu, GTIMER_SEC);
2679}
2680
2681static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2682    /* Note that CNTFRQ is purely reads-as-written for the benefit
2683     * of software; writing it doesn't actually change the timer frequency.
2684     * Our reset value matches the fixed frequency we implement the timer at.
2685     */
2686    { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
2687      .type = ARM_CP_ALIAS,
2688      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2689      .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
2690    },
2691    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2692      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2693      .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
2694      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2695      .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
2696    },
2697    /* overall control: mostly access permissions */
2698    { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
2699      .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
2700      .access = PL1_RW,
2701      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
2702      .resetvalue = 0,
2703    },
2704    /* per-timer control */
2705    { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2706      .secure = ARM_CP_SECSTATE_NS,
2707      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2708      .accessfn = gt_ptimer_access,
2709      .fieldoffset = offsetoflow32(CPUARMState,
2710                                   cp15.c14_timer[GTIMER_PHYS].ctl),
2711      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2712    },
2713    { .name = "CNTP_CTL_S",
2714      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
2715      .secure = ARM_CP_SECSTATE_S,
2716      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2717      .accessfn = gt_ptimer_access,
2718      .fieldoffset = offsetoflow32(CPUARMState,
2719                                   cp15.c14_timer[GTIMER_SEC].ctl),
2720      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2721    },
2722    { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
2723      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
2724      .type = ARM_CP_IO, .access = PL0_RW,
2725      .accessfn = gt_ptimer_access,
2726      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
2727      .resetvalue = 0,
2728      .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
2729    },
2730    { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
2731      .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW,
2732      .accessfn = gt_vtimer_access,
2733      .fieldoffset = offsetoflow32(CPUARMState,
2734                                   cp15.c14_timer[GTIMER_VIRT].ctl),
2735      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2736    },
2737    { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
2738      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
2739      .type = ARM_CP_IO, .access = PL0_RW,
2740      .accessfn = gt_vtimer_access,
2741      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
2742      .resetvalue = 0,
2743      .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
2744    },
2745    /* TimerValue views: a 32 bit downcounting view of the underlying state */
2746    { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2747      .secure = ARM_CP_SECSTATE_NS,
2748      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2749      .accessfn = gt_ptimer_access,
2750      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2751    },
2752    { .name = "CNTP_TVAL_S",
2753      .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
2754      .secure = ARM_CP_SECSTATE_S,
2755      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2756      .accessfn = gt_ptimer_access,
2757      .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
2758    },
2759    { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2760      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
2761      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2762      .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
2763      .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
2764    },
2765    { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
2766      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2767      .accessfn = gt_vtimer_access,
2768      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2769    },
2770    { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
2771      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
2772      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW,
2773      .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
2774      .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
2775    },
2776    /* The counter itself */
2777    { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
2778      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2779      .accessfn = gt_pct_access,
2780      .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
2781    },
2782    { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
2783      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
2784      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2785      .accessfn = gt_pct_access, .readfn = gt_cnt_read,
2786    },
2787    { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
2788      .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
2789      .accessfn = gt_vct_access,
2790      .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
2791    },
2792    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2793      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2794      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2795      .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
2796    },
2797    /* Comparison value, indicating when the timer goes off */
2798    { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
2799      .secure = ARM_CP_SECSTATE_NS,
2800      .access = PL0_RW,
2801      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2802      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2803      .accessfn = gt_ptimer_access,
2804      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2805    },
2806    { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2,
2807      .secure = ARM_CP_SECSTATE_S,
2808      .access = PL0_RW,
2809      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2810      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2811      .accessfn = gt_ptimer_access,
2812      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2813    },
2814    { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2815      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
2816      .access = PL0_RW,
2817      .type = ARM_CP_IO,
2818      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
2819      .resetvalue = 0, .accessfn = gt_ptimer_access,
2820      .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
2821    },
2822    { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
2823      .access = PL0_RW,
2824      .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
2825      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2826      .accessfn = gt_vtimer_access,
2827      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2828    },
2829    { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
2830      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
2831      .access = PL0_RW,
2832      .type = ARM_CP_IO,
2833      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
2834      .resetvalue = 0, .accessfn = gt_vtimer_access,
2835      .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
2836    },
2837    /* Secure timer -- this is actually restricted to only EL3
2838     * and configurably Secure-EL1 via the accessfn.
2839     */
2840    { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
2841      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
2842      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
2843      .accessfn = gt_stimer_access,
2844      .readfn = gt_sec_tval_read,
2845      .writefn = gt_sec_tval_write,
2846      .resetfn = gt_sec_timer_reset,
2847    },
2848    { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
2849      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
2850      .type = ARM_CP_IO, .access = PL1_RW,
2851      .accessfn = gt_stimer_access,
2852      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
2853      .resetvalue = 0,
2854      .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
2855    },
2856    { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
2857      .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
2858      .type = ARM_CP_IO, .access = PL1_RW,
2859      .accessfn = gt_stimer_access,
2860      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
2861      .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
2862    },
2863    REGINFO_SENTINEL
2864};
2865
2866#else
2867
2868/* In user-mode most of the generic timer registers are inaccessible
2869 * however modern kernels (4.12+) allow access to cntvct_el0
2870 */
2871
2872static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
2873{
2874    /* Currently we have no support for QEMUTimer in linux-user so we
2875     * can't call gt_get_countervalue(env), instead we directly
2876     * call the lower level functions.
2877     */
2878    return cpu_get_clock() / GTIMER_SCALE;
2879}
2880
2881static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
2882    { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
2883      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
2884      .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */,
2885      .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
2886      .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE,
2887    },
2888    { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
2889      .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
2890      .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
2891      .readfn = gt_virt_cnt_read,
2892    },
2893    REGINFO_SENTINEL
2894};
2895
2896#endif
2897
2898static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
2899{
2900    if (arm_feature(env, ARM_FEATURE_LPAE)) {
2901        raw_write(env, ri, value);
2902    } else if (arm_feature(env, ARM_FEATURE_V7)) {
2903        raw_write(env, ri, value & 0xfffff6ff);
2904    } else {
2905        raw_write(env, ri, value & 0xfffff1ff);
2906    }
2907}
2908
2909#ifndef CONFIG_USER_ONLY
2910/* get_phys_addr() isn't present for user-mode-only targets */
2911
2912static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
2913                                 bool isread)
2914{
2915    if (ri->opc2 & 4) {
2916        /* The ATS12NSO* operations must trap to EL3 if executed in
2917         * Secure EL1 (which can only happen if EL3 is AArch64).
2918         * They are simply UNDEF if executed from NS EL1.
2919         * They function normally from EL2 or EL3.
2920         */
2921        if (arm_current_el(env) == 1) {
2922            if (arm_is_secure_below_el3(env)) {
2923                return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
2924            }
2925            return CP_ACCESS_TRAP_UNCATEGORIZED;
2926        }
2927    }
2928    return CP_ACCESS_OK;
2929}
2930
2931static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
2932                             MMUAccessType access_type, ARMMMUIdx mmu_idx)
2933{
2934    hwaddr phys_addr;
2935    target_ulong page_size;
2936    int prot;
2937    bool ret;
2938    uint64_t par64;
2939    bool format64 = false;
2940    MemTxAttrs attrs = {};
2941    ARMMMUFaultInfo fi = {};
2942    ARMCacheAttrs cacheattrs = {};
2943
2944    ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
2945                        &prot, &page_size, &fi, &cacheattrs);
2946
2947    if (is_a64(env)) {
2948        format64 = true;
2949    } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
2950        /*
2951         * ATS1Cxx:
2952         * * TTBCR.EAE determines whether the result is returned using the
2953         *   32-bit or the 64-bit PAR format
2954         * * Instructions executed in Hyp mode always use the 64bit format
2955         *
2956         * ATS1S2NSOxx uses the 64bit format if any of the following is true:
2957         * * The Non-secure TTBCR.EAE bit is set to 1
2958         * * The implementation includes EL2, and the value of HCR.VM is 1
2959         *
2960         * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
2961         *
2962         * ATS1Hx always uses the 64bit format.
2963         */
2964        format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
2965
2966        if (arm_feature(env, ARM_FEATURE_EL2)) {
2967            if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
2968                format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
2969            } else {
2970                format64 |= arm_current_el(env) == 2;
2971            }
2972        }
2973    }
2974
2975    if (format64) {
2976        /* Create a 64-bit PAR */
2977        par64 = (1 << 11); /* LPAE bit always set */
2978        if (!ret) {
2979            par64 |= phys_addr & ~0xfffULL;
2980            if (!attrs.secure) {
2981                par64 |= (1 << 9); /* NS */
2982            }
2983            par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
2984            par64 |= cacheattrs.shareability << 7; /* SH */
2985        } else {
2986            uint32_t fsr = arm_fi_to_lfsc(&fi);
2987
2988            par64 |= 1; /* F */
2989            par64 |= (fsr & 0x3f) << 1; /* FS */
2990            if (fi.stage2) {
2991                par64 |= (1 << 9); /* S */
2992            }
2993            if (fi.s1ptw) {
2994                par64 |= (1 << 8); /* PTW */
2995            }
2996        }
2997    } else {
2998        /* fsr is a DFSR/IFSR value for the short descriptor
2999         * translation table format (with WnR always clear).
3000         * Convert it to a 32-bit PAR.
3001         */
3002        if (!ret) {
3003            /* We do not set any attribute bits in the PAR */
3004            if (page_size == (1 << 24)
3005                && arm_feature(env, ARM_FEATURE_V7)) {
3006                par64 = (phys_addr & 0xff000000) | (1 << 1);
3007            } else {
3008                par64 = phys_addr & 0xfffff000;
3009            }
3010            if (!attrs.secure) {
3011                par64 |= (1 << 9); /* NS */
3012            }
3013        } else {
3014            uint32_t fsr = arm_fi_to_sfsc(&fi);
3015
3016            par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
3017                    ((fsr & 0xf) << 1) | 1;
3018        }
3019    }
3020    return par64;
3021}
3022
3023static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
3024{
3025    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3026    uint64_t par64;
3027    ARMMMUIdx mmu_idx;
3028    int el = arm_current_el(env);
3029    bool secure = arm_is_secure_below_el3(env);
3030
3031    switch (ri->opc2 & 6) {
3032    case 0:
3033        /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
3034        switch (el) {
3035        case 3:
3036            mmu_idx = ARMMMUIdx_S1E3;
3037            break;
3038        case 2:
3039            mmu_idx = ARMMMUIdx_S1NSE1;
3040            break;
3041        case 1:
3042            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3043            break;
3044        default:
3045            g_assert_not_reached();
3046        }
3047        break;
3048    case 2:
3049        /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
3050        switch (el) {
3051        case 3:
3052            mmu_idx = ARMMMUIdx_S1SE0;
3053            break;
3054        case 2:
3055            mmu_idx = ARMMMUIdx_S1NSE0;
3056            break;
3057        case 1:
3058            mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3059            break;
3060        default:
3061            g_assert_not_reached();
3062        }
3063        break;
3064    case 4:
3065        /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
3066        mmu_idx = ARMMMUIdx_S12NSE1;
3067        break;
3068    case 6:
3069        /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
3070        mmu_idx = ARMMMUIdx_S12NSE0;
3071        break;
3072    default:
3073        g_assert_not_reached();
3074    }
3075
3076    par64 = do_ats_write(env, value, access_type, mmu_idx);
3077
3078    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3079}
3080
3081static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
3082                        uint64_t value)
3083{
3084    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3085    uint64_t par64;
3086
3087    par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S1E2);
3088
3089    A32_BANKED_CURRENT_REG_SET(env, par, par64);
3090}
3091
3092static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
3093                                     bool isread)
3094{
3095    if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
3096        return CP_ACCESS_TRAP;
3097    }
3098    return CP_ACCESS_OK;
3099}
3100
3101static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
3102                        uint64_t value)
3103{
3104    MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
3105    ARMMMUIdx mmu_idx;
3106    int secure = arm_is_secure_below_el3(env);
3107
3108    switch (ri->opc2 & 6) {
3109    case 0:
3110        switch (ri->opc1) {
3111        case 0: /* AT S1E1R, AT S1E1W */
3112            mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
3113            break;
3114        case 4: /* AT S1E2R, AT S1E2W */
3115            mmu_idx = ARMMMUIdx_S1E2;
3116            break;
3117        case 6: /* AT S1E3R, AT S1E3W */
3118            mmu_idx = ARMMMUIdx_S1E3;
3119            break;
3120        default:
3121            g_assert_not_reached();
3122        }
3123        break;
3124    case 2: /* AT S1E0R, AT S1E0W */
3125        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
3126        break;
3127    case 4: /* AT S12E1R, AT S12E1W */
3128        mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
3129        break;
3130    case 6: /* AT S12E0R, AT S12E0W */
3131        mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
3132        break;
3133    default:
3134        g_assert_not_reached();
3135    }
3136
3137    env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
3138}
3139#endif
3140
3141static const ARMCPRegInfo vapa_cp_reginfo[] = {
3142    { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
3143      .access = PL1_RW, .resetvalue = 0,
3144      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
3145                             offsetoflow32(CPUARMState, cp15.par_ns) },
3146      .writefn = par_write },
3147#ifndef CONFIG_USER_ONLY
3148    /* This underdecoding is safe because the reginfo is NO_RAW. */
3149    { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
3150      .access = PL1_W, .accessfn = ats_access,
3151      .writefn = ats_write, .type = ARM_CP_NO_RAW },
3152#endif
3153    REGINFO_SENTINEL
3154};
3155
3156/* Return basic MPU access permission bits.  */
3157static uint32_t simple_mpu_ap_bits(uint32_t val)
3158{
3159    uint32_t ret;
3160    uint32_t mask;
3161    int i;
3162    ret = 0;
3163    mask = 3;
3164    for (i = 0; i < 16; i += 2) {
3165        ret |= (val >> i) & mask;
3166        mask <<= 2;
3167    }
3168    return ret;
3169}
3170
3171/* Pad basic MPU access permission bits to extended format.  */
3172static uint32_t extended_mpu_ap_bits(uint32_t val)
3173{
3174    uint32_t ret;
3175    uint32_t mask;
3176    int i;
3177    ret = 0;
3178    mask = 3;
3179    for (i = 0; i < 16; i += 2) {
3180        ret |= (val & mask) << i;
3181        mask <<= 2;
3182    }
3183    return ret;
3184}
3185
3186static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3187                                 uint64_t value)
3188{
3189    env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
3190}
3191
3192static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3193{
3194    return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
3195}
3196
3197static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
3198                                 uint64_t value)
3199{
3200    env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
3201}
3202
3203static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
3204{
3205    return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
3206}
3207
3208static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
3209{
3210    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3211
3212    if (!u32p) {
3213        return 0;
3214    }
3215
3216    u32p += env->pmsav7.rnr[M_REG_NS];
3217    return *u32p;
3218}
3219
3220static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
3221                         uint64_t value)
3222{
3223    ARMCPU *cpu = env_archcpu(env);
3224    uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
3225
3226    if (!u32p) {
3227        return;
3228    }
3229
3230    u32p += env->pmsav7.rnr[M_REG_NS];
3231    tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
3232    *u32p = value;
3233}
3234
3235static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3236                              uint64_t value)
3237{
3238    ARMCPU *cpu = env_archcpu(env);
3239    uint32_t nrgs = cpu->pmsav7_dregion;
3240
3241    if (value >= nrgs) {
3242        qemu_log_mask(LOG_GUEST_ERROR,
3243                      "PMSAv7 RGNR write >= # supported regions, %" PRIu32
3244                      " > %" PRIu32 "\n", (uint32_t)value, nrgs);
3245        return;
3246    }
3247
3248    raw_write(env, ri, value);
3249}
3250
3251static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
3252    /* Reset for all these registers is handled in arm_cpu_reset(),
3253     * because the PMSAv7 is also used by M-profile CPUs, which do
3254     * not register cpregs but still need the state to be reset.
3255     */
3256    { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
3257      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3258      .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
3259      .readfn = pmsav7_read, .writefn = pmsav7_write,
3260      .resetfn = arm_cp_reset_ignore },
3261    { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
3262      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3263      .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
3264      .readfn = pmsav7_read, .writefn = pmsav7_write,
3265      .resetfn = arm_cp_reset_ignore },
3266    { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
3267      .access = PL1_RW, .type = ARM_CP_NO_RAW,
3268      .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
3269      .readfn = pmsav7_read, .writefn = pmsav7_write,
3270      .resetfn = arm_cp_reset_ignore },
3271    { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
3272      .access = PL1_RW,
3273      .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
3274      .writefn = pmsav7_rgnr_write,
3275      .resetfn = arm_cp_reset_ignore },
3276    REGINFO_SENTINEL
3277};
3278
3279static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
3280    { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3281      .access = PL1_RW, .type = ARM_CP_ALIAS,
3282      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3283      .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
3284    { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3285      .access = PL1_RW, .type = ARM_CP_ALIAS,
3286      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3287      .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
3288    { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
3289      .access = PL1_RW,
3290      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
3291      .resetvalue = 0, },
3292    { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
3293      .access = PL1_RW,
3294      .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
3295      .resetvalue = 0, },
3296    { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
3297      .access = PL1_RW,
3298      .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
3299    { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
3300      .access = PL1_RW,
3301      .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
3302    /* Protection region base and size registers */
3303    { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
3304      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3305      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
3306    { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
3307      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3308      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
3309    { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
3310      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3311      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
3312    { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
3313      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3314      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
3315    { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
3316      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3317      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
3318    { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
3319      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3320      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
3321    { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
3322      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3323      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
3324    { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
3325      .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
3326      .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
3327    REGINFO_SENTINEL
3328};
3329
3330static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
3331                                 uint64_t value)
3332{
3333    TCR *tcr = raw_ptr(env, ri);
3334    int maskshift = extract32(value, 0, 3);
3335
3336    if (!arm_feature(env, ARM_FEATURE_V8)) {
3337        if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
3338            /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
3339             * using Long-desciptor translation table format */
3340            value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
3341        } else if (arm_feature(env, ARM_FEATURE_EL3)) {
3342            /* In an implementation that includes the Security Extensions
3343             * TTBCR has additional fields PD0 [4] and PD1 [5] for
3344             * Short-descriptor translation table format.
3345             */
3346            value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
3347        } else {
3348            value &= TTBCR_N;
3349        }
3350    }
3351
3352    /* Update the masks corresponding to the TCR bank being written
3353     * Note that we always calculate mask and base_mask, but
3354     * they are only used for short-descriptor tables (ie if EAE is 0);
3355     * for long-descriptor tables the TCR fields are used differently
3356     * and the mask and base_mask values are meaningless.
3357     */
3358    tcr->raw_tcr = value;
3359    tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
3360    tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
3361}
3362
3363static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3364                             uint64_t value)
3365{
3366    ARMCPU *cpu = env_archcpu(env);
3367    TCR *tcr = raw_ptr(env, ri);
3368
3369    if (arm_feature(env, ARM_FEATURE_LPAE)) {
3370        /* With LPAE the TTBCR could result in a change of ASID
3371         * via the TTBCR.A1 bit, so do a TLB flush.
3372         */
3373        tlb_flush(CPU(cpu));
3374    }
3375    /* Preserve the high half of TCR_EL1, set via TTBCR2.  */
3376    value = deposit64(tcr->raw_tcr, 0, 32, value);
3377    vmsa_ttbcr_raw_write(env, ri, value);
3378}
3379
3380static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
3381{
3382    TCR *tcr = raw_ptr(env, ri);
3383
3384    /* Reset both the TCR as well as the masks corresponding to the bank of
3385     * the TCR being reset.
3386     */
3387    tcr->raw_tcr = 0;
3388    tcr->mask = 0;
3389    tcr->base_mask = 0xffffc000u;
3390}
3391
3392static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3393                               uint64_t value)
3394{
3395    ARMCPU *cpu = env_archcpu(env);
3396    TCR *tcr = raw_ptr(env, ri);
3397
3398    /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
3399    tlb_flush(CPU(cpu));
3400    tcr->raw_tcr = value;
3401}
3402
3403static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3404                            uint64_t value)
3405{
3406    /* If the ASID changes (with a 64-bit write), we must flush the TLB.  */
3407    if (cpreg_field_is_64bit(ri) &&
3408        extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
3409        ARMCPU *cpu = env_archcpu(env);
3410        tlb_flush(CPU(cpu));
3411    }
3412    raw_write(env, ri, value);
3413}
3414
3415static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3416                        uint64_t value)
3417{
3418    ARMCPU *cpu = env_archcpu(env);
3419    CPUState *cs = CPU(cpu);
3420
3421    /* Accesses to VTTBR may change the VMID so we must flush the TLB.  */
3422    if (raw_read(env, ri) != value) {
3423        tlb_flush_by_mmuidx(cs,
3424                            ARMMMUIdxBit_S12NSE1 |
3425                            ARMMMUIdxBit_S12NSE0 |
3426                            ARMMMUIdxBit_S2NS);
3427        raw_write(env, ri, value);
3428    }
3429}
3430
3431static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
3432    { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
3433      .access = PL1_RW, .type = ARM_CP_ALIAS,
3434      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
3435                             offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
3436    { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
3437      .access = PL1_RW, .resetvalue = 0,
3438      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
3439                             offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
3440    { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
3441      .access = PL1_RW, .resetvalue = 0,
3442      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
3443                             offsetof(CPUARMState, cp15.dfar_ns) } },
3444    { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
3445      .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
3446      .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
3447      .resetvalue = 0, },
3448    REGINFO_SENTINEL
3449};
3450
3451static const ARMCPRegInfo vmsa_cp_reginfo[] = {
3452    { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
3453      .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
3454      .access = PL1_RW,
3455      .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
3456    { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
3457      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
3458      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3459      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3460                             offsetof(CPUARMState, cp15.ttbr0_ns) } },
3461    { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
3462      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
3463      .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
3464      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3465                             offsetof(CPUARMState, cp15.ttbr1_ns) } },
3466    { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
3467      .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3468      .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
3469      .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
3470      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
3471    { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
3472      .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
3473      .raw_writefn = vmsa_ttbcr_raw_write,
3474      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
3475                             offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
3476    REGINFO_SENTINEL
3477};
3478
3479/* Note that unlike TTBCR, writing to TTBCR2 does not require flushing
3480 * qemu tlbs nor adjusting cached masks.
3481 */
3482static const ARMCPRegInfo ttbcr2_reginfo = {
3483    .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3,
3484    .access = PL1_RW, .type = ARM_CP_ALIAS,
3485    .bank_fieldoffsets = { offsetofhigh32(CPUARMState, cp15.tcr_el[3]),
3486                           offsetofhigh32(CPUARMState, cp15.tcr_el[1]) },
3487};
3488
3489static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
3490                                uint64_t value)
3491{
3492    env->cp15.c15_ticonfig = value & 0xe7;
3493    /* The OS_TYPE bit in this register changes the reported CPUID! */
3494    env->cp15.c0_cpuid = (value & (1 << 5)) ?
3495        ARM_CPUID_TI915T : ARM_CPUID_TI925T;
3496}
3497
3498static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
3499                                uint64_t value)
3500{
3501    env->cp15.c15_threadid = value & 0xffff;
3502}
3503
3504static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
3505                           uint64_t value)
3506{
3507    /* Wait-for-interrupt (deprecated) */
3508    cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
3509}
3510
3511static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
3512                                  uint64_t value)
3513{
3514    /* On OMAP there are registers indicating the max/min index of dcache lines
3515     * containing a dirty line; cache flush operations have to reset these.
3516     */
3517    env->cp15.c15_i_max = 0x000;
3518    env->cp15.c15_i_min = 0xff0;
3519}
3520
3521static const ARMCPRegInfo omap_cp_reginfo[] = {
3522    { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
3523      .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
3524      .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
3525      .resetvalue = 0, },
3526    { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
3527      .access = PL1_RW, .type = ARM_CP_NOP },
3528    { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
3529      .access = PL1_RW,
3530      .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
3531      .writefn = omap_ticonfig_write },
3532    { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
3533      .access = PL1_RW,
3534      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
3535    { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
3536      .access = PL1_RW, .resetvalue = 0xff0,
3537      .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
3538    { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
3539      .access = PL1_RW,
3540      .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
3541      .writefn = omap_threadid_write },
3542    { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
3543      .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3544      .type = ARM_CP_NO_RAW,
3545      .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
3546    /* TODO: Peripheral port remap register:
3547     * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
3548     * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
3549     * when MMU is off.
3550     */
3551    { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
3552      .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
3553      .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
3554      .writefn = omap_cachemaint_write },
3555    { .name = "C9", .cp = 15, .crn = 9,
3556      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
3557      .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
3558    REGINFO_SENTINEL
3559};
3560
3561static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
3562                              uint64_t value)
3563{
3564    env->cp15.c15_cpar = value & 0x3fff;
3565}
3566
3567static const ARMCPRegInfo xscale_cp_reginfo[] = {
3568    { .name = "XSCALE_CPAR",
3569      .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
3570      .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
3571      .writefn = xscale_cpar_write, },
3572    { .name = "XSCALE_AUXCR",
3573      .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
3574      .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
3575      .resetvalue = 0, },
3576    /* XScale specific cache-lockdown: since we have no cache we NOP these
3577     * and hope the guest does not really rely on cache behaviour.
3578     */
3579    { .name = "XSCALE_LOCK_ICACHE_LINE",
3580      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
3581      .access = PL1_W, .type = ARM_CP_NOP },
3582    { .name = "XSCALE_UNLOCK_ICACHE",
3583      .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
3584      .access = PL1_W, .type = ARM_CP_NOP },
3585    { .name = "XSCALE_DCACHE_LOCK",
3586      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
3587      .access = PL1_RW, .type = ARM_CP_NOP },
3588    { .name = "XSCALE_UNLOCK_DCACHE",
3589      .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
3590      .access = PL1_W, .type = ARM_CP_NOP },
3591    REGINFO_SENTINEL
3592};
3593
3594static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
3595    /* RAZ/WI the whole crn=15 space, when we don't have a more specific
3596     * implementation of this implementation-defined space.
3597     * Ideally this should eventually disappear in favour of actually
3598     * implementing the correct behaviour for all cores.
3599     */
3600    { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
3601      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3602      .access = PL1_RW,
3603      .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
3604      .resetvalue = 0 },
3605    REGINFO_SENTINEL
3606};
3607
3608static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
3609    /* Cache status: RAZ because we have no cache so it's always clean */
3610    { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
3611      .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3612      .resetvalue = 0 },
3613    REGINFO_SENTINEL
3614};
3615
3616static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
3617    /* We never have a a block transfer operation in progress */
3618    { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
3619      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3620      .resetvalue = 0 },
3621    /* The cache ops themselves: these all NOP for QEMU */
3622    { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
3623      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3624    { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
3625      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3626    { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
3627      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3628    { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
3629      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3630    { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
3631      .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3632    { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
3633      .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
3634    REGINFO_SENTINEL
3635};
3636
3637static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
3638    /* The cache test-and-clean instructions always return (1 << 30)
3639     * to indicate that there are no dirty cache lines.
3640     */
3641    { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
3642      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3643      .resetvalue = (1 << 30) },
3644    { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
3645      .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
3646      .resetvalue = (1 << 30) },
3647    REGINFO_SENTINEL
3648};
3649
3650static const ARMCPRegInfo strongarm_cp_reginfo[] = {
3651    /* Ignore ReadBuffer accesses */
3652    { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
3653      .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
3654      .access = PL1_RW, .resetvalue = 0,
3655      .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
3656    REGINFO_SENTINEL
3657};
3658
3659static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3660{
3661    ARMCPU *cpu = env_archcpu(env);
3662    unsigned int cur_el = arm_current_el(env);
3663    bool secure = arm_is_secure(env);
3664
3665    if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3666        return env->cp15.vpidr_el2;
3667    }
3668    return raw_read(env, ri);
3669}
3670
3671static uint64_t mpidr_read_val(CPUARMState *env)
3672{
3673    ARMCPU *cpu = env_archcpu(env);
3674    uint64_t mpidr = cpu->mp_affinity;
3675
3676    if (arm_feature(env, ARM_FEATURE_V7MP)) {
3677        mpidr |= (1U << 31);
3678        /* Cores which are uniprocessor (non-coherent)
3679         * but still implement the MP extensions set
3680         * bit 30. (For instance, Cortex-R5).
3681         */
3682        if (cpu->mp_is_up) {
3683            mpidr |= (1u << 30);
3684        }
3685    }
3686    return mpidr;
3687}
3688
3689static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3690{
3691    unsigned int cur_el = arm_current_el(env);
3692    bool secure = arm_is_secure(env);
3693
3694    if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
3695        return env->cp15.vmpidr_el2;
3696    }
3697    return mpidr_read_val(env);
3698}
3699
3700static const ARMCPRegInfo lpae_cp_reginfo[] = {
3701    /* NOP AMAIR0/1 */
3702    { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
3703      .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
3704      .access = PL1_RW, .type = ARM_CP_CONST,
3705      .resetvalue = 0 },
3706    /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
3707    { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
3708      .access = PL1_RW, .type = ARM_CP_CONST,
3709      .resetvalue = 0 },
3710    { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
3711      .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
3712      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
3713                             offsetof(CPUARMState, cp15.par_ns)} },
3714    { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
3715      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3716      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
3717                             offsetof(CPUARMState, cp15.ttbr0_ns) },
3718      .writefn = vmsa_ttbr_write, },
3719    { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
3720      .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
3721      .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
3722                             offsetof(CPUARMState, cp15.ttbr1_ns) },
3723      .writefn = vmsa_ttbr_write, },
3724    REGINFO_SENTINEL
3725};
3726
3727static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3728{
3729    return vfp_get_fpcr(env);
3730}
3731
3732static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3733                            uint64_t value)
3734{
3735    vfp_set_fpcr(env, value);
3736}
3737
3738static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
3739{
3740    return vfp_get_fpsr(env);
3741}
3742
3743static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
3744                            uint64_t value)
3745{
3746    vfp_set_fpsr(env, value);
3747}
3748
3749static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
3750                                       bool isread)
3751{
3752    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
3753        return CP_ACCESS_TRAP;
3754    }
3755    return CP_ACCESS_OK;
3756}
3757
3758static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
3759                            uint64_t value)
3760{
3761    env->daif = value & PSTATE_DAIF;
3762}
3763
3764static CPAccessResult aa64_cacheop_access(CPUARMState *env,
3765                                          const ARMCPRegInfo *ri,
3766                                          bool isread)
3767{
3768    /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
3769     * SCTLR_EL1.UCI is set.
3770     */
3771    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
3772        return CP_ACCESS_TRAP;
3773    }
3774    return CP_ACCESS_OK;
3775}
3776
3777/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
3778 * Page D4-1736 (DDI0487A.b)
3779 */
3780
3781static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3782                                      uint64_t value)
3783{
3784    CPUState *cs = env_cpu(env);
3785    bool sec = arm_is_secure_below_el3(env);
3786
3787    if (sec) {
3788        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3789                                            ARMMMUIdxBit_S1SE1 |
3790                                            ARMMMUIdxBit_S1SE0);
3791    } else {
3792        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3793                                            ARMMMUIdxBit_S12NSE1 |
3794                                            ARMMMUIdxBit_S12NSE0);
3795    }
3796}
3797
3798static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3799                                    uint64_t value)
3800{
3801    CPUState *cs = env_cpu(env);
3802
3803    if (tlb_force_broadcast(env)) {
3804        tlbi_aa64_vmalle1is_write(env, NULL, value);
3805        return;
3806    }
3807
3808    if (arm_is_secure_below_el3(env)) {
3809        tlb_flush_by_mmuidx(cs,
3810                            ARMMMUIdxBit_S1SE1 |
3811                            ARMMMUIdxBit_S1SE0);
3812    } else {
3813        tlb_flush_by_mmuidx(cs,
3814                            ARMMMUIdxBit_S12NSE1 |
3815                            ARMMMUIdxBit_S12NSE0);
3816    }
3817}
3818
3819static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3820                                  uint64_t value)
3821{
3822    /* Note that the 'ALL' scope must invalidate both stage 1 and
3823     * stage 2 translations, whereas most other scopes only invalidate
3824     * stage 1 translations.
3825     */
3826    ARMCPU *cpu = env_archcpu(env);
3827    CPUState *cs = CPU(cpu);
3828
3829    if (arm_is_secure_below_el3(env)) {
3830        tlb_flush_by_mmuidx(cs,
3831                            ARMMMUIdxBit_S1SE1 |
3832                            ARMMMUIdxBit_S1SE0);
3833    } else {
3834        if (arm_feature(env, ARM_FEATURE_EL2)) {
3835            tlb_flush_by_mmuidx(cs,
3836                                ARMMMUIdxBit_S12NSE1 |
3837                                ARMMMUIdxBit_S12NSE0 |
3838                                ARMMMUIdxBit_S2NS);
3839        } else {
3840            tlb_flush_by_mmuidx(cs,
3841                                ARMMMUIdxBit_S12NSE1 |
3842                                ARMMMUIdxBit_S12NSE0);
3843        }
3844    }
3845}
3846
3847static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3848                                  uint64_t value)
3849{
3850    ARMCPU *cpu = env_archcpu(env);
3851    CPUState *cs = CPU(cpu);
3852
3853    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2);
3854}
3855
3856static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3857                                  uint64_t value)
3858{
3859    ARMCPU *cpu = env_archcpu(env);
3860    CPUState *cs = CPU(cpu);
3861
3862    tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3);
3863}
3864
3865static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3866                                    uint64_t value)
3867{
3868    /* Note that the 'ALL' scope must invalidate both stage 1 and
3869     * stage 2 translations, whereas most other scopes only invalidate
3870     * stage 1 translations.
3871     */
3872    CPUState *cs = env_cpu(env);
3873    bool sec = arm_is_secure_below_el3(env);
3874    bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
3875
3876    if (sec) {
3877        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3878                                            ARMMMUIdxBit_S1SE1 |
3879                                            ARMMMUIdxBit_S1SE0);
3880    } else if (has_el2) {
3881        tlb_flush_by_mmuidx_all_cpus_synced(cs,
3882                                            ARMMMUIdxBit_S12NSE1 |
3883                                            ARMMMUIdxBit_S12NSE0 |
3884                                            ARMMMUIdxBit_S2NS);
3885    } else {
3886          tlb_flush_by_mmuidx_all_cpus_synced(cs,
3887                                              ARMMMUIdxBit_S12NSE1 |
3888                                              ARMMMUIdxBit_S12NSE0);
3889    }
3890}
3891
3892static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3893                                    uint64_t value)
3894{
3895    CPUState *cs = env_cpu(env);
3896
3897    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2);
3898}
3899
3900static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3901                                    uint64_t value)
3902{
3903    CPUState *cs = env_cpu(env);
3904
3905    tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3);
3906}
3907
3908static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
3909                                 uint64_t value)
3910{
3911    /* Invalidate by VA, EL2
3912     * Currently handles both VAE2 and VALE2, since we don't support
3913     * flush-last-level-only.
3914     */
3915    ARMCPU *cpu = env_archcpu(env);
3916    CPUState *cs = CPU(cpu);
3917    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3918
3919    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2);
3920}
3921
3922static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
3923                                 uint64_t value)
3924{
3925    /* Invalidate by VA, EL3
3926     * Currently handles both VAE3 and VALE3, since we don't support
3927     * flush-last-level-only.
3928     */
3929    ARMCPU *cpu = env_archcpu(env);
3930    CPUState *cs = CPU(cpu);
3931    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3932
3933    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E3);
3934}
3935
3936static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3937                                   uint64_t value)
3938{
3939    ARMCPU *cpu = env_archcpu(env);
3940    CPUState *cs = CPU(cpu);
3941    bool sec = arm_is_secure_below_el3(env);
3942    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3943
3944    if (sec) {
3945        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3946                                                 ARMMMUIdxBit_S1SE1 |
3947                                                 ARMMMUIdxBit_S1SE0);
3948    } else {
3949        tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3950                                                 ARMMMUIdxBit_S12NSE1 |
3951                                                 ARMMMUIdxBit_S12NSE0);
3952    }
3953}
3954
3955static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
3956                                 uint64_t value)
3957{
3958    /* Invalidate by VA, EL1&0 (AArch64 version).
3959     * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
3960     * since we don't support flush-for-specific-ASID-only or
3961     * flush-last-level-only.
3962     */
3963    ARMCPU *cpu = env_archcpu(env);
3964    CPUState *cs = CPU(cpu);
3965    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3966
3967    if (tlb_force_broadcast(env)) {
3968        tlbi_aa64_vae1is_write(env, NULL, value);
3969        return;
3970    }
3971
3972    if (arm_is_secure_below_el3(env)) {
3973        tlb_flush_page_by_mmuidx(cs, pageaddr,
3974                                 ARMMMUIdxBit_S1SE1 |
3975                                 ARMMMUIdxBit_S1SE0);
3976    } else {
3977        tlb_flush_page_by_mmuidx(cs, pageaddr,
3978                                 ARMMMUIdxBit_S12NSE1 |
3979                                 ARMMMUIdxBit_S12NSE0);
3980    }
3981}
3982
3983static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3984                                   uint64_t value)
3985{
3986    CPUState *cs = env_cpu(env);
3987    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3988
3989    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
3990                                             ARMMMUIdxBit_S1E2);
3991}
3992
3993static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
3994                                   uint64_t value)
3995{
3996    CPUState *cs = env_cpu(env);
3997    uint64_t pageaddr = sextract64(value << 12, 0, 56);
3998
3999    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4000                                             ARMMMUIdxBit_S1E3);
4001}
4002
4003static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
4004                                    uint64_t value)
4005{
4006    /* Invalidate by IPA. This has to invalidate any structures that
4007     * contain only stage 2 translation information, but does not need
4008     * to apply to structures that contain combined stage 1 and stage 2
4009     * translation information.
4010     * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
4011     */
4012    ARMCPU *cpu = env_archcpu(env);
4013    CPUState *cs = CPU(cpu);
4014    uint64_t pageaddr;
4015
4016    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4017        return;
4018    }
4019
4020    pageaddr = sextract64(value << 12, 0, 48);
4021
4022    tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S2NS);
4023}
4024
4025static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
4026                                      uint64_t value)
4027{
4028    CPUState *cs = env_cpu(env);
4029    uint64_t pageaddr;
4030
4031    if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
4032        return;
4033    }
4034
4035    pageaddr = sextract64(value << 12, 0, 48);
4036
4037    tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr,
4038                                             ARMMMUIdxBit_S2NS);
4039}
4040
4041static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
4042                                      bool isread)
4043{
4044    /* We don't implement EL2, so the only control on DC ZVA is the
4045     * bit in the SCTLR which can prohibit access for EL0.
4046     */
4047    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
4048        return CP_ACCESS_TRAP;
4049    }
4050    return CP_ACCESS_OK;
4051}
4052
4053static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
4054{
4055    ARMCPU *cpu = env_archcpu(env);
4056    int dzp_bit = 1 << 4;
4057
4058    /* DZP indicates whether DC ZVA access is allowed */
4059    if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
4060        dzp_bit = 0;
4061    }
4062    return cpu->dcz_blocksize | dzp_bit;
4063}
4064
4065static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
4066                                    bool isread)
4067{
4068    if (!(env->pstate & PSTATE_SP)) {
4069        /* Access to SP_EL0 is undefined if it's being used as
4070         * the stack pointer.
4071         */
4072        return CP_ACCESS_TRAP_UNCATEGORIZED;
4073    }
4074    return CP_ACCESS_OK;
4075}
4076
4077static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
4078{
4079    return env->pstate & PSTATE_SP;
4080}
4081
4082static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
4083{
4084    update_spsel(env, val);
4085}
4086
4087static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4088                        uint64_t value)
4089{
4090    ARMCPU *cpu = env_archcpu(env);
4091
4092    if (raw_read(env, ri) == value) {
4093        /* Skip the TLB flush if nothing actually changed; Linux likes
4094         * to do a lot of pointless SCTLR writes.
4095         */
4096        return;
4097    }
4098
4099    if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) {
4100        /* M bit is RAZ/WI for PMSA with no MPU implemented */
4101        value &= ~SCTLR_M;
4102    }
4103
4104    raw_write(env, ri, value);
4105    /* ??? Lots of these bits are not implemented.  */
4106    /* This may enable/disable the MMU, so do a TLB flush.  */
4107    tlb_flush(CPU(cpu));
4108}
4109
4110static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
4111                                     bool isread)
4112{
4113    if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
4114        return CP_ACCESS_TRAP_FP_EL2;
4115    }
4116    if (env->cp15.cptr_el[3] & CPTR_TFP) {
4117        return CP_ACCESS_TRAP_FP_EL3;
4118    }
4119    return CP_ACCESS_OK;
4120}
4121
4122static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
4123                       uint64_t value)
4124{
4125    env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
4126}
4127
4128static const ARMCPRegInfo v8_cp_reginfo[] = {
4129    /* Minimal set of EL0-visible registers. This will need to be expanded
4130     * significantly for system emulation of AArch64 CPUs.
4131     */
4132    { .name = "NZCV", .state = ARM_CP_STATE_AA64,
4133      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
4134      .access = PL0_RW, .type = ARM_CP_NZCV },
4135    { .name = "DAIF", .state = ARM_CP_STATE_AA64,
4136      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
4137      .type = ARM_CP_NO_RAW,
4138      .access = PL0_RW, .accessfn = aa64_daif_access,
4139      .fieldoffset = offsetof(CPUARMState, daif),
4140      .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
4141    { .name = "FPCR", .state = ARM_CP_STATE_AA64,
4142      .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
4143      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4144      .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
4145    { .name = "FPSR", .state = ARM_CP_STATE_AA64,
4146      .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
4147      .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END,
4148      .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
4149    { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
4150      .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
4151      .access = PL0_R, .type = ARM_CP_NO_RAW,
4152      .readfn = aa64_dczid_read },
4153    { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
4154      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
4155      .access = PL0_W, .type = ARM_CP_DC_ZVA,
4156#ifndef CONFIG_USER_ONLY
4157      /* Avoid overhead of an access check that always passes in user-mode */
4158      .accessfn = aa64_zva_access,
4159#endif
4160    },
4161    { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
4162      .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
4163      .access = PL1_R, .type = ARM_CP_CURRENTEL },
4164    /* Cache ops: all NOPs since we don't emulate caches */
4165    { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
4166      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4167      .access = PL1_W, .type = ARM_CP_NOP },
4168    { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
4169      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4170      .access = PL1_W, .type = ARM_CP_NOP },
4171    { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
4172      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
4173      .access = PL0_W, .type = ARM_CP_NOP,
4174      .accessfn = aa64_cacheop_access },
4175    { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
4176      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4177      .access = PL1_W, .type = ARM_CP_NOP },
4178    { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
4179      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4180      .access = PL1_W, .type = ARM_CP_NOP },
4181    { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
4182      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
4183      .access = PL0_W, .type = ARM_CP_NOP,
4184      .accessfn = aa64_cacheop_access },
4185    { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
4186      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4187      .access = PL1_W, .type = ARM_CP_NOP },
4188    { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
4189      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
4190      .access = PL0_W, .type = ARM_CP_NOP,
4191      .accessfn = aa64_cacheop_access },
4192    { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
4193      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
4194      .access = PL0_W, .type = ARM_CP_NOP,
4195      .accessfn = aa64_cacheop_access },
4196    { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
4197      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4198      .access = PL1_W, .type = ARM_CP_NOP },
4199    /* TLBI operations */
4200    { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
4201      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
4202      .access = PL1_W, .type = ARM_CP_NO_RAW,
4203      .writefn = tlbi_aa64_vmalle1is_write },
4204    { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
4205      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
4206      .access = PL1_W, .type = ARM_CP_NO_RAW,
4207      .writefn = tlbi_aa64_vae1is_write },
4208    { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
4209      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
4210      .access = PL1_W, .type = ARM_CP_NO_RAW,
4211      .writefn = tlbi_aa64_vmalle1is_write },
4212    { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
4213      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
4214      .access = PL1_W, .type = ARM_CP_NO_RAW,
4215      .writefn = tlbi_aa64_vae1is_write },
4216    { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
4217      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4218      .access = PL1_W, .type = ARM_CP_NO_RAW,
4219      .writefn = tlbi_aa64_vae1is_write },
4220    { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
4221      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4222      .access = PL1_W, .type = ARM_CP_NO_RAW,
4223      .writefn = tlbi_aa64_vae1is_write },
4224    { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
4225      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
4226      .access = PL1_W, .type = ARM_CP_NO_RAW,
4227      .writefn = tlbi_aa64_vmalle1_write },
4228    { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
4229      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
4230      .access = PL1_W, .type = ARM_CP_NO_RAW,
4231      .writefn = tlbi_aa64_vae1_write },
4232    { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
4233      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
4234      .access = PL1_W, .type = ARM_CP_NO_RAW,
4235      .writefn = tlbi_aa64_vmalle1_write },
4236    { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
4237      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
4238      .access = PL1_W, .type = ARM_CP_NO_RAW,
4239      .writefn = tlbi_aa64_vae1_write },
4240    { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
4241      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4242      .access = PL1_W, .type = ARM_CP_NO_RAW,
4243      .writefn = tlbi_aa64_vae1_write },
4244    { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
4245      .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4246      .access = PL1_W, .type = ARM_CP_NO_RAW,
4247      .writefn = tlbi_aa64_vae1_write },
4248    { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
4249      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4250      .access = PL2_W, .type = ARM_CP_NO_RAW,
4251      .writefn = tlbi_aa64_ipas2e1is_write },
4252    { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
4253      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4254      .access = PL2_W, .type = ARM_CP_NO_RAW,
4255      .writefn = tlbi_aa64_ipas2e1is_write },
4256    { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
4257      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4258      .access = PL2_W, .type = ARM_CP_NO_RAW,
4259      .writefn = tlbi_aa64_alle1is_write },
4260    { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
4261      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
4262      .access = PL2_W, .type = ARM_CP_NO_RAW,
4263      .writefn = tlbi_aa64_alle1is_write },
4264    { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
4265      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4266      .access = PL2_W, .type = ARM_CP_NO_RAW,
4267      .writefn = tlbi_aa64_ipas2e1_write },
4268    { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
4269      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4270      .access = PL2_W, .type = ARM_CP_NO_RAW,
4271      .writefn = tlbi_aa64_ipas2e1_write },
4272    { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
4273      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4274      .access = PL2_W, .type = ARM_CP_NO_RAW,
4275      .writefn = tlbi_aa64_alle1_write },
4276    { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
4277      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
4278      .access = PL2_W, .type = ARM_CP_NO_RAW,
4279      .writefn = tlbi_aa64_alle1is_write },
4280#ifndef CONFIG_USER_ONLY
4281    /* 64 bit address translation operations */
4282    { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
4283      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
4284      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4285    { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
4286      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
4287      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4288    { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
4289      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
4290      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4291    { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
4292      .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
4293      .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4294    { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
4295      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
4296      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4297    { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
4298      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
4299      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4300    { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
4301      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
4302      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4303    { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
4304      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
4305      .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4306    /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
4307    { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
4308      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
4309      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4310    { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
4311      .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
4312      .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4313    { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
4314      .type = ARM_CP_ALIAS,
4315      .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
4316      .access = PL1_RW, .resetvalue = 0,
4317      .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
4318      .writefn = par_write },
4319#endif
4320    /* TLB invalidate last level of translation table walk */
4321    { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
4322      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
4323    { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
4324      .type = ARM_CP_NO_RAW, .access = PL1_W,
4325      .writefn = tlbimvaa_is_write },
4326    { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
4327      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
4328    { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
4329      .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
4330    { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4331      .type = ARM_CP_NO_RAW, .access = PL2_W,
4332      .writefn = tlbimva_hyp_write },
4333    { .name = "TLBIMVALHIS",
4334      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4335      .type = ARM_CP_NO_RAW, .access = PL2_W,
4336      .writefn = tlbimva_hyp_is_write },
4337    { .name = "TLBIIPAS2",
4338      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
4339      .type = ARM_CP_NO_RAW, .access = PL2_W,
4340      .writefn = tlbiipas2_write },
4341    { .name = "TLBIIPAS2IS",
4342      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
4343      .type = ARM_CP_NO_RAW, .access = PL2_W,
4344      .writefn = tlbiipas2_is_write },
4345    { .name = "TLBIIPAS2L",
4346      .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
4347      .type = ARM_CP_NO_RAW, .access = PL2_W,
4348      .writefn = tlbiipas2_write },
4349    { .name = "TLBIIPAS2LIS",
4350      .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
4351      .type = ARM_CP_NO_RAW, .access = PL2_W,
4352      .writefn = tlbiipas2_is_write },
4353    /* 32 bit cache operations */
4354    { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
4355      .type = ARM_CP_NOP, .access = PL1_W },
4356    { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
4357      .type = ARM_CP_NOP, .access = PL1_W },
4358    { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
4359      .type = ARM_CP_NOP, .access = PL1_W },
4360    { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
4361      .type = ARM_CP_NOP, .access = PL1_W },
4362    { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
4363      .type = ARM_CP_NOP, .access = PL1_W },
4364    { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
4365      .type = ARM_CP_NOP, .access = PL1_W },
4366    { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
4367      .type = ARM_CP_NOP, .access = PL1_W },
4368    { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
4369      .type = ARM_CP_NOP, .access = PL1_W },
4370    { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
4371      .type = ARM_CP_NOP, .access = PL1_W },
4372    { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
4373      .type = ARM_CP_NOP, .access = PL1_W },
4374    { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
4375      .type = ARM_CP_NOP, .access = PL1_W },
4376    { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
4377      .type = ARM_CP_NOP, .access = PL1_W },
4378    { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
4379      .type = ARM_CP_NOP, .access = PL1_W },
4380    /* MMU Domain access control / MPU write buffer control */
4381    { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
4382      .access = PL1_RW, .resetvalue = 0,
4383      .writefn = dacr_write, .raw_writefn = raw_write,
4384      .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
4385                             offsetoflow32(CPUARMState, cp15.dacr_ns) } },
4386    { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
4387      .type = ARM_CP_ALIAS,
4388      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
4389      .access = PL1_RW,
4390      .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
4391    { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
4392      .type = ARM_CP_ALIAS,
4393      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
4394      .access = PL1_RW,
4395      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
4396    /* We rely on the access checks not allowing the guest to write to the
4397     * state field when SPSel indicates that it's being used as the stack
4398     * pointer.
4399     */
4400    { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
4401      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
4402      .access = PL1_RW, .accessfn = sp_el0_access,
4403      .type = ARM_CP_ALIAS,
4404      .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
4405    { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
4406      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
4407      .access = PL2_RW, .type = ARM_CP_ALIAS,
4408      .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
4409    { .name = "SPSel", .state = ARM_CP_STATE_AA64,
4410      .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
4411      .type = ARM_CP_NO_RAW,
4412      .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
4413    { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
4414      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
4415      .type = ARM_CP_ALIAS,
4416      .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
4417      .access = PL2_RW, .accessfn = fpexc32_access },
4418    { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
4419      .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
4420      .access = PL2_RW, .resetvalue = 0,
4421      .writefn = dacr_write, .raw_writefn = raw_write,
4422      .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
4423    { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
4424      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
4425      .access = PL2_RW, .resetvalue = 0,
4426      .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
4427    { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
4428      .type = ARM_CP_ALIAS,
4429      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
4430      .access = PL2_RW,
4431      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
4432    { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
4433      .type = ARM_CP_ALIAS,
4434      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
4435      .access = PL2_RW,
4436      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
4437    { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
4438      .type = ARM_CP_ALIAS,
4439      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
4440      .access = PL2_RW,
4441      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
4442    { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
4443      .type = ARM_CP_ALIAS,
4444      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
4445      .access = PL2_RW,
4446      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
4447    { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
4448      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
4449      .resetvalue = 0,
4450      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
4451    { .name = "SDCR", .type = ARM_CP_ALIAS,
4452      .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
4453      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
4454      .writefn = sdcr_write,
4455      .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
4456    REGINFO_SENTINEL
4457};
4458
4459/* Used to describe the behaviour of EL2 regs when EL2 does not exist.  */
4460static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
4461    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4462      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4463      .access = PL2_RW,
4464      .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
4465    { .name = "HCR_EL2", .state = ARM_CP_STATE_BOTH,
4466      .type = ARM_CP_NO_RAW,
4467      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4468      .access = PL2_RW,
4469      .type = ARM_CP_CONST, .resetvalue = 0 },
4470    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4471      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4472      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4473    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4474      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4475      .access = PL2_RW,
4476      .type = ARM_CP_CONST, .resetvalue = 0 },
4477    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4478      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4479      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4480    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4481      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4482      .access = PL2_RW, .type = ARM_CP_CONST,
4483      .resetvalue = 0 },
4484    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4485      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4486      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4487    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4488      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4489      .access = PL2_RW, .type = ARM_CP_CONST,
4490      .resetvalue = 0 },
4491    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4492      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4493      .access = PL2_RW, .type = ARM_CP_CONST,
4494      .resetvalue = 0 },
4495    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4496      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4497      .access = PL2_RW, .type = ARM_CP_CONST,
4498      .resetvalue = 0 },
4499    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4500      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4501      .access = PL2_RW, .type = ARM_CP_CONST,
4502      .resetvalue = 0 },
4503    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4504      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4505      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4506    { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
4507      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4508      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4509      .type = ARM_CP_CONST, .resetvalue = 0 },
4510    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4511      .cp = 15, .opc1 = 6, .crm = 2,
4512      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4513      .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
4514    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4515      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4516      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4517    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4518      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4519      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4520    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4521      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4522      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4523    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4524      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4525      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4526    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4527      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4528      .resetvalue = 0 },
4529    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4530      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4531      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4532    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4533      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4534      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4535    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4536      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4537      .resetvalue = 0 },
4538    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4539      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4540      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4541    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4542      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
4543      .resetvalue = 0 },
4544    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4545      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4546      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4547    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4548      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4549      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4550    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4551      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4552      .access = PL2_RW, .accessfn = access_tda,
4553      .type = ARM_CP_CONST, .resetvalue = 0 },
4554    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
4555      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4556      .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
4557      .type = ARM_CP_CONST, .resetvalue = 0 },
4558    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4559      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4560      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4561    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4562      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4563      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4564    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4565      .type = ARM_CP_CONST,
4566      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4567      .access = PL2_RW, .resetvalue = 0 },
4568    REGINFO_SENTINEL
4569};
4570
4571/* Ditto, but for registers which exist in ARMv8 but not v7 */
4572static const ARMCPRegInfo el3_no_el2_v8_cp_reginfo[] = {
4573    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4574      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4575      .access = PL2_RW,
4576      .type = ARM_CP_CONST, .resetvalue = 0 },
4577    REGINFO_SENTINEL
4578};
4579
4580static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
4581{
4582    ARMCPU *cpu = env_archcpu(env);
4583    uint64_t valid_mask = HCR_MASK;
4584
4585    if (arm_feature(env, ARM_FEATURE_EL3)) {
4586        valid_mask &= ~HCR_HCD;
4587    } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
4588        /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
4589         * However, if we're using the SMC PSCI conduit then QEMU is
4590         * effectively acting like EL3 firmware and so the guest at
4591         * EL2 should retain the ability to prevent EL1 from being
4592         * able to make SMC calls into the ersatz firmware, so in
4593         * that case HCR.TSC should be read/write.
4594         */
4595        valid_mask &= ~HCR_TSC;
4596    }
4597    if (cpu_isar_feature(aa64_lor, cpu)) {
4598        valid_mask |= HCR_TLOR;
4599    }
4600    if (cpu_isar_feature(aa64_pauth, cpu)) {
4601        valid_mask |= HCR_API | HCR_APK;
4602    }
4603
4604    /* Clear RES0 bits.  */
4605    value &= valid_mask;
4606
4607    /* These bits change the MMU setup:
4608     * HCR_VM enables stage 2 translation
4609     * HCR_PTW forbids certain page-table setups
4610     * HCR_DC Disables stage1 and enables stage2 translation
4611     */
4612    if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
4613        tlb_flush(CPU(cpu));
4614    }
4615    env->cp15.hcr_el2 = value;
4616
4617    /*
4618     * Updates to VI and VF require us to update the status of
4619     * virtual interrupts, which are the logical OR of these bits
4620     * and the state of the input lines from the GIC. (This requires
4621     * that we have the iothread lock, which is done by marking the
4622     * reginfo structs as ARM_CP_IO.)
4623     * Note that if a write to HCR pends a VIRQ or VFIQ it is never
4624     * possible for it to be taken immediately, because VIRQ and
4625     * VFIQ are masked unless running at EL0 or EL1, and HCR
4626     * can only be written at EL2.
4627     */
4628    g_assert(qemu_mutex_iothread_locked());
4629    arm_cpu_update_virq(cpu);
4630    arm_cpu_update_vfiq(cpu);
4631}
4632
4633static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri,
4634                          uint64_t value)
4635{
4636    /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */
4637    value = deposit64(env->cp15.hcr_el2, 32, 32, value);
4638    hcr_write(env, NULL, value);
4639}
4640
4641static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
4642                         uint64_t value)
4643{
4644    /* Handle HCR write, i.e. write to low half of HCR_EL2 */
4645    value = deposit64(env->cp15.hcr_el2, 0, 32, value);
4646    hcr_write(env, NULL, value);
4647}
4648
4649/*
4650 * Return the effective value of HCR_EL2.
4651 * Bits that are not included here:
4652 * RW       (read from SCR_EL3.RW as needed)
4653 */
4654uint64_t arm_hcr_el2_eff(CPUARMState *env)
4655{
4656    uint64_t ret = env->cp15.hcr_el2;
4657
4658    if (arm_is_secure_below_el3(env)) {
4659        /*
4660         * "This register has no effect if EL2 is not enabled in the
4661         * current Security state".  This is ARMv8.4-SecEL2 speak for
4662         * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
4663         *
4664         * Prior to that, the language was "In an implementation that
4665         * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
4666         * as if this field is 0 for all purposes other than a direct
4667         * read or write access of HCR_EL2".  With lots of enumeration
4668         * on a per-field basis.  In current QEMU, this is condition
4669         * is arm_is_secure_below_el3.
4670         *
4671         * Since the v8.4 language applies to the entire register, and
4672         * appears to be backward compatible, use that.
4673         */
4674        ret = 0;
4675    } else if (ret & HCR_TGE) {
4676        /* These bits are up-to-date as of ARMv8.4.  */
4677        if (ret & HCR_E2H) {
4678            ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
4679                     HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
4680                     HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
4681                     HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
4682        } else {
4683            ret |= HCR_FMO | HCR_IMO | HCR_AMO;
4684        }
4685        ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
4686                 HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
4687                 HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
4688                 HCR_TLOR);
4689    }
4690
4691    return ret;
4692}
4693
4694static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
4695                           uint64_t value)
4696{
4697    /*
4698     * For A-profile AArch32 EL3, if NSACR.CP10
4699     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4700     */
4701    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4702        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4703        value &= ~(0x3 << 10);
4704        value |= env->cp15.cptr_el[2] & (0x3 << 10);
4705    }
4706    env->cp15.cptr_el[2] = value;
4707}
4708
4709static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
4710{
4711    /*
4712     * For A-profile AArch32 EL3, if NSACR.CP10
4713     * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1.
4714     */
4715    uint64_t value = env->cp15.cptr_el[2];
4716
4717    if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
4718        !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
4719        value |= 0x3 << 10;
4720    }
4721    return value;
4722}
4723
4724static const ARMCPRegInfo el2_cp_reginfo[] = {
4725    { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
4726      .type = ARM_CP_IO,
4727      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4728      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4729      .writefn = hcr_write },
4730    { .name = "HCR", .state = ARM_CP_STATE_AA32,
4731      .type = ARM_CP_ALIAS | ARM_CP_IO,
4732      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
4733      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
4734      .writefn = hcr_writelow },
4735    { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH,
4736      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7,
4737      .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
4738    { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
4739      .type = ARM_CP_ALIAS,
4740      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
4741      .access = PL2_RW,
4742      .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
4743    { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
4744      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
4745      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
4746    { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH,
4747      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
4748      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
4749    { .name = "HIFAR", .state = ARM_CP_STATE_AA32,
4750      .type = ARM_CP_ALIAS,
4751      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2,
4752      .access = PL2_RW,
4753      .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) },
4754    { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
4755      .type = ARM_CP_ALIAS,
4756      .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
4757      .access = PL2_RW,
4758      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
4759    { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
4760      .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
4761      .access = PL2_RW, .writefn = vbar_write,
4762      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
4763      .resetvalue = 0 },
4764    { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
4765      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
4766      .access = PL3_RW, .type = ARM_CP_ALIAS,
4767      .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
4768    { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
4769      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
4770      .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
4771      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]),
4772      .readfn = cptr_el2_read, .writefn = cptr_el2_write },
4773    { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
4774      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
4775      .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
4776      .resetvalue = 0 },
4777    { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
4778      .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
4779      .access = PL2_RW, .type = ARM_CP_ALIAS,
4780      .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
4781    { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
4782      .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
4783      .access = PL2_RW, .type = ARM_CP_CONST,
4784      .resetvalue = 0 },
4785    /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
4786    { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32,
4787      .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
4788      .access = PL2_RW, .type = ARM_CP_CONST,
4789      .resetvalue = 0 },
4790    { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
4791      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
4792      .access = PL2_RW, .type = ARM_CP_CONST,
4793      .resetvalue = 0 },
4794    { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
4795      .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
4796      .access = PL2_RW, .type = ARM_CP_CONST,
4797      .resetvalue = 0 },
4798    { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
4799      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
4800      .access = PL2_RW,
4801      /* no .writefn needed as this can't cause an ASID change;
4802       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4803       */
4804      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
4805    { .name = "VTCR", .state = ARM_CP_STATE_AA32,
4806      .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4807      .type = ARM_CP_ALIAS,
4808      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4809      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4810    { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
4811      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
4812      .access = PL2_RW,
4813      /* no .writefn needed as this can't cause an ASID change;
4814       * no .raw_writefn or .resetfn needed as we never use mask/base_mask
4815       */
4816      .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
4817    { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
4818      .cp = 15, .opc1 = 6, .crm = 2,
4819      .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4820      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4821      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
4822      .writefn = vttbr_write },
4823    { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
4824      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
4825      .access = PL2_RW, .writefn = vttbr_write,
4826      .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
4827    { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
4828      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
4829      .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
4830      .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
4831    { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
4832      .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
4833      .access = PL2_RW, .resetvalue = 0,
4834      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
4835    { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
4836      .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
4837      .access = PL2_RW, .resetvalue = 0,
4838      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4839    { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
4840      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
4841      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
4842    { .name = "TLBIALLNSNH",
4843      .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
4844      .type = ARM_CP_NO_RAW, .access = PL2_W,
4845      .writefn = tlbiall_nsnh_write },
4846    { .name = "TLBIALLNSNHIS",
4847      .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
4848      .type = ARM_CP_NO_RAW, .access = PL2_W,
4849      .writefn = tlbiall_nsnh_is_write },
4850    { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4851      .type = ARM_CP_NO_RAW, .access = PL2_W,
4852      .writefn = tlbiall_hyp_write },
4853    { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4854      .type = ARM_CP_NO_RAW, .access = PL2_W,
4855      .writefn = tlbiall_hyp_is_write },
4856    { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4857      .type = ARM_CP_NO_RAW, .access = PL2_W,
4858      .writefn = tlbimva_hyp_write },
4859    { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4860      .type = ARM_CP_NO_RAW, .access = PL2_W,
4861      .writefn = tlbimva_hyp_is_write },
4862    { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
4863      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
4864      .type = ARM_CP_NO_RAW, .access = PL2_W,
4865      .writefn = tlbi_aa64_alle2_write },
4866    { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
4867      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
4868      .type = ARM_CP_NO_RAW, .access = PL2_W,
4869      .writefn = tlbi_aa64_vae2_write },
4870    { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
4871      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
4872      .access = PL2_W, .type = ARM_CP_NO_RAW,
4873      .writefn = tlbi_aa64_vae2_write },
4874    { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
4875      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
4876      .access = PL2_W, .type = ARM_CP_NO_RAW,
4877      .writefn = tlbi_aa64_alle2is_write },
4878    { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
4879      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
4880      .type = ARM_CP_NO_RAW, .access = PL2_W,
4881      .writefn = tlbi_aa64_vae2is_write },
4882    { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
4883      .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
4884      .access = PL2_W, .type = ARM_CP_NO_RAW,
4885      .writefn = tlbi_aa64_vae2is_write },
4886#ifndef CONFIG_USER_ONLY
4887    /* Unlike the other EL2-related AT operations, these must
4888     * UNDEF from EL3 if EL2 is not implemented, which is why we
4889     * define them here rather than with the rest of the AT ops.
4890     */
4891    { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
4892      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4893      .access = PL2_W, .accessfn = at_s1e2_access,
4894      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4895    { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
4896      .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4897      .access = PL2_W, .accessfn = at_s1e2_access,
4898      .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
4899    /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
4900     * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
4901     * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
4902     * to behave as if SCR.NS was 1.
4903     */
4904    { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
4905      .access = PL2_W,
4906      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4907    { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
4908      .access = PL2_W,
4909      .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
4910    { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
4911      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
4912      /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
4913       * reset values as IMPDEF. We choose to reset to 3 to comply with
4914       * both ARMv7 and ARMv8.
4915       */
4916      .access = PL2_RW, .resetvalue = 3,
4917      .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
4918    { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
4919      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
4920      .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
4921      .writefn = gt_cntvoff_write,
4922      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4923    { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
4924      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
4925      .writefn = gt_cntvoff_write,
4926      .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
4927    { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
4928      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
4929      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4930      .type = ARM_CP_IO, .access = PL2_RW,
4931      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4932    { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
4933      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
4934      .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
4935      .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
4936    { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
4937      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
4938      .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
4939      .resetfn = gt_hyp_timer_reset,
4940      .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
4941    { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
4942      .type = ARM_CP_IO,
4943      .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
4944      .access = PL2_RW,
4945      .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
4946      .resetvalue = 0,
4947      .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
4948#endif
4949    /* The only field of MDCR_EL2 that has a defined architectural reset value
4950     * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
4951     * don't implement any PMU event counters, so using zero as a reset
4952     * value for MDCR_EL2 is okay
4953     */
4954    { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
4955      .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
4956      .access = PL2_RW, .resetvalue = 0,
4957      .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
4958    { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
4959      .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4960      .access = PL2_RW, .accessfn = access_el3_aa32ns,
4961      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4962    { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
4963      .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
4964      .access = PL2_RW,
4965      .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
4966    { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
4967      .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
4968      .access = PL2_RW,
4969      .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
4970    REGINFO_SENTINEL
4971};
4972
4973static const ARMCPRegInfo el2_v8_cp_reginfo[] = {
4974    { .name = "HCR2", .state = ARM_CP_STATE_AA32,
4975      .type = ARM_CP_ALIAS | ARM_CP_IO,
4976      .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4,
4977      .access = PL2_RW,
4978      .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2),
4979      .writefn = hcr_writehigh },
4980    REGINFO_SENTINEL
4981};
4982
4983static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
4984                                   bool isread)
4985{
4986    /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
4987     * At Secure EL1 it traps to EL3.
4988     */
4989    if (arm_current_el(env) == 3) {
4990        return CP_ACCESS_OK;
4991    }
4992    if (arm_is_secure_below_el3(env)) {
4993        return CP_ACCESS_TRAP_EL3;
4994    }
4995    /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
4996    if (isread) {
4997        return CP_ACCESS_OK;
4998    }
4999    return CP_ACCESS_TRAP_UNCATEGORIZED;
5000}
5001
5002static const ARMCPRegInfo el3_cp_reginfo[] = {
5003    { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
5004      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
5005      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
5006      .resetvalue = 0, .writefn = scr_write },
5007    { .name = "SCR",  .type = ARM_CP_ALIAS,
5008      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
5009      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5010      .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
5011      .writefn = scr_write },
5012    { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
5013      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
5014      .access = PL3_RW, .resetvalue = 0,
5015      .fieldoffset = offsetof(CPUARMState, cp15.sder) },
5016    { .name = "SDER",
5017      .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
5018      .access = PL3_RW, .resetvalue = 0,
5019      .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
5020    { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
5021      .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
5022      .writefn = vbar_write, .resetvalue = 0,
5023      .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
5024    { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
5025      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
5026      .access = PL3_RW, .resetvalue = 0,
5027      .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
5028    { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
5029      .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
5030      .access = PL3_RW,
5031      /* no .writefn needed as this can't cause an ASID change;
5032       * we must provide a .raw_writefn and .resetfn because we handle
5033       * reset and migration for the AArch32 TTBCR(S), which might be
5034       * using mask and base_mask.
5035       */
5036      .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
5037      .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
5038    { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
5039      .type = ARM_CP_ALIAS,
5040      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
5041      .access = PL3_RW,
5042      .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
5043    { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
5044      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
5045      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
5046    { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
5047      .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
5048      .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
5049    { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
5050      .type = ARM_CP_ALIAS,
5051      .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
5052      .access = PL3_RW,
5053      .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
5054    { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
5055      .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
5056      .access = PL3_RW, .writefn = vbar_write,
5057      .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
5058      .resetvalue = 0 },
5059    { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
5060      .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
5061      .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
5062      .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
5063    { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
5064      .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
5065      .access = PL3_RW, .resetvalue = 0,
5066      .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
5067    { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
5068      .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
5069      .access = PL3_RW, .type = ARM_CP_CONST,
5070      .resetvalue = 0 },
5071    { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
5072      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
5073      .access = PL3_RW, .type = ARM_CP_CONST,
5074      .resetvalue = 0 },
5075    { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
5076      .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
5077      .access = PL3_RW, .type = ARM_CP_CONST,
5078      .resetvalue = 0 },
5079    { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
5080      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
5081      .access = PL3_W, .type = ARM_CP_NO_RAW,
5082      .writefn = tlbi_aa64_alle3is_write },
5083    { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
5084      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
5085      .access = PL3_W, .type = ARM_CP_NO_RAW,
5086      .writefn = tlbi_aa64_vae3is_write },
5087    { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
5088      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
5089      .access = PL3_W, .type = ARM_CP_NO_RAW,
5090      .writefn = tlbi_aa64_vae3is_write },
5091    { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
5092      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
5093      .access = PL3_W, .type = ARM_CP_NO_RAW,
5094      .writefn = tlbi_aa64_alle3_write },
5095    { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
5096      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
5097      .access = PL3_W, .type = ARM_CP_NO_RAW,
5098      .writefn = tlbi_aa64_vae3_write },
5099    { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
5100      .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
5101      .access = PL3_W, .type = ARM_CP_NO_RAW,
5102      .writefn = tlbi_aa64_vae3_write },
5103    REGINFO_SENTINEL
5104};
5105
5106static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
5107                                     bool isread)
5108{
5109    /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
5110     * but the AArch32 CTR has its own reginfo struct)
5111     */
5112    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
5113        return CP_ACCESS_TRAP;
5114    }
5115    return CP_ACCESS_OK;
5116}
5117
5118static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
5119                        uint64_t value)
5120{
5121    /* Writes to OSLAR_EL1 may update the OS lock status, which can be
5122     * read via a bit in OSLSR_EL1.
5123     */
5124    int oslock;
5125
5126    if (ri->state == ARM_CP_STATE_AA32) {
5127        oslock = (value == 0xC5ACCE55);
5128    } else {
5129        oslock = value & 1;
5130    }
5131
5132    env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
5133}
5134
5135static const ARMCPRegInfo debug_cp_reginfo[] = {
5136    /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
5137     * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
5138     * unlike DBGDRAR it is never accessible from EL0.
5139     * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
5140     * accessor.
5141     */
5142    { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
5143      .access = PL0_R, .accessfn = access_tdra,
5144      .type = ARM_CP_CONST, .resetvalue = 0 },
5145    { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
5146      .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
5147      .access = PL1_R, .accessfn = access_tdra,
5148      .type = ARM_CP_CONST, .resetvalue = 0 },
5149    { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
5150      .access = PL0_R, .accessfn = access_tdra,
5151      .type = ARM_CP_CONST, .resetvalue = 0 },
5152    /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
5153    { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
5154      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5155      .access = PL1_RW, .accessfn = access_tda,
5156      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
5157      .resetvalue = 0 },
5158    /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
5159     * We don't implement the configurable EL0 access.
5160     */
5161    { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
5162      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5163      .type = ARM_CP_ALIAS,
5164      .access = PL1_R, .accessfn = access_tda,
5165      .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
5166    { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
5167      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
5168      .access = PL1_W, .type = ARM_CP_NO_RAW,
5169      .accessfn = access_tdosa,
5170      .writefn = oslar_write },
5171    { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
5172      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
5173      .access = PL1_R, .resetvalue = 10,
5174      .accessfn = access_tdosa,
5175      .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
5176    /* Dummy OSDLR_EL1: 32-bit Linux will read this */
5177    { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
5178      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
5179      .access = PL1_RW, .accessfn = access_tdosa,
5180      .type = ARM_CP_NOP },
5181    /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
5182     * implement vector catch debug events yet.
5183     */
5184    { .name = "DBGVCR",
5185      .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
5186      .access = PL1_RW, .accessfn = access_tda,
5187      .type = ARM_CP_NOP },
5188    /* Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
5189     * to save and restore a 32-bit guest's DBGVCR)
5190     */
5191    { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
5192      .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
5193      .access = PL2_RW, .accessfn = access_tda,
5194      .type = ARM_CP_NOP },
5195    /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
5196     * Channel but Linux may try to access this register. The 32-bit
5197     * alias is DBGDCCINT.
5198     */
5199    { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
5200      .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5201      .access = PL1_RW, .accessfn = access_tda,
5202      .type = ARM_CP_NOP },
5203    REGINFO_SENTINEL
5204};
5205
5206static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
5207    /* 64 bit access versions of the (dummy) debug registers */
5208    { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
5209      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5210    { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
5211      .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
5212    REGINFO_SENTINEL
5213};
5214
5215/* Return the exception level to which exceptions should be taken
5216 * via SVEAccessTrap.  If an exception should be routed through
5217 * AArch64.AdvSIMDFPAccessTrap, return 0; fp_exception_el should
5218 * take care of raising that exception.
5219 * C.f. the ARM pseudocode function CheckSVEEnabled.
5220 */
5221int sve_exception_el(CPUARMState *env, int el)
5222{
5223#ifndef CONFIG_USER_ONLY
5224    if (el <= 1) {
5225        bool disabled = false;
5226
5227        /* The CPACR.ZEN controls traps to EL1:
5228         * 0, 2 : trap EL0 and EL1 accesses
5229         * 1    : trap only EL0 accesses
5230         * 3    : trap no accesses
5231         */
5232        if (!extract32(env->cp15.cpacr_el1, 16, 1)) {
5233            disabled = true;
5234        } else if (!extract32(env->cp15.cpacr_el1, 17, 1)) {
5235            disabled = el == 0;
5236        }
5237        if (disabled) {
5238            /* route_to_el2 */
5239            return (arm_feature(env, ARM_FEATURE_EL2)
5240                    && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
5241        }
5242
5243        /* Check CPACR.FPEN.  */
5244        if (!extract32(env->cp15.cpacr_el1, 20, 1)) {
5245            disabled = true;
5246        } else if (!extract32(env->cp15.cpacr_el1, 21, 1)) {
5247            disabled = el == 0;
5248        }
5249        if (disabled) {
5250            return 0;
5251        }
5252    }
5253
5254    /* CPTR_EL2.  Since TZ and TFP are positive,
5255     * they will be zero when EL2 is not present.
5256     */
5257    if (el <= 2 && !arm_is_secure_below_el3(env)) {
5258        if (env->cp15.cptr_el[2] & CPTR_TZ) {
5259            return 2;
5260        }
5261        if (env->cp15.cptr_el[2] & CPTR_TFP) {
5262            return 0;
5263        }
5264    }
5265
5266    /* CPTR_EL3.  Since EZ is negative we must check for EL3.  */
5267    if (arm_feature(env, ARM_FEATURE_EL3)
5268        && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
5269        return 3;
5270    }
5271#endif
5272    return 0;
5273}
5274
5275/*
5276 * Given that SVE is enabled, return the vector length for EL.
5277 */
5278uint32_t sve_zcr_len_for_el(CPUARMState *env, int el)
5279{
5280    ARMCPU *cpu = env_archcpu(env);
5281    uint32_t zcr_len = cpu->sve_max_vq - 1;
5282
5283    if (el <= 1) {
5284        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[1]);
5285    }
5286    if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) {
5287        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[2]);
5288    }
5289    if (arm_feature(env, ARM_FEATURE_EL3)) {
5290        zcr_len = MIN(zcr_len, 0xf & (uint32_t)env->vfp.zcr_el[3]);
5291    }
5292    return zcr_len;
5293}
5294
5295static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5296                      uint64_t value)
5297{
5298    int cur_el = arm_current_el(env);
5299    int old_len = sve_zcr_len_for_el(env, cur_el);
5300    int new_len;
5301
5302    /* Bits other than [3:0] are RAZ/WI.  */
5303    raw_write(env, ri, value & 0xf);
5304
5305    /*
5306     * Because we arrived here, we know both FP and SVE are enabled;
5307     * otherwise we would have trapped access to the ZCR_ELn register.
5308     */
5309    new_len = sve_zcr_len_for_el(env, cur_el);
5310    if (new_len < old_len) {
5311        aarch64_sve_narrow_vq(env, new_len + 1);
5312    }
5313}
5314
5315static const ARMCPRegInfo zcr_el1_reginfo = {
5316    .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
5317    .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
5318    .access = PL1_RW, .type = ARM_CP_SVE,
5319    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
5320    .writefn = zcr_write, .raw_writefn = raw_write
5321};
5322
5323static const ARMCPRegInfo zcr_el2_reginfo = {
5324    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5325    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5326    .access = PL2_RW, .type = ARM_CP_SVE,
5327    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]),
5328    .writefn = zcr_write, .raw_writefn = raw_write
5329};
5330
5331static const ARMCPRegInfo zcr_no_el2_reginfo = {
5332    .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64,
5333    .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0,
5334    .access = PL2_RW, .type = ARM_CP_SVE,
5335    .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore
5336};
5337
5338static const ARMCPRegInfo zcr_el3_reginfo = {
5339    .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64,
5340    .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0,
5341    .access = PL3_RW, .type = ARM_CP_SVE,
5342    .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]),
5343    .writefn = zcr_write, .raw_writefn = raw_write
5344};
5345
5346void hw_watchpoint_update(ARMCPU *cpu, int n)
5347{
5348    CPUARMState *env = &cpu->env;
5349    vaddr len = 0;
5350    vaddr wvr = env->cp15.dbgwvr[n];
5351    uint64_t wcr = env->cp15.dbgwcr[n];
5352    int mask;
5353    int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
5354
5355    if (env->cpu_watchpoint[n]) {
5356        cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
5357        env->cpu_watchpoint[n] = NULL;
5358    }
5359
5360    if (!extract64(wcr, 0, 1)) {
5361        /* E bit clear : watchpoint disabled */
5362        return;
5363    }
5364
5365    switch (extract64(wcr, 3, 2)) {
5366    case 0:
5367        /* LSC 00 is reserved and must behave as if the wp is disabled */
5368        return;
5369    case 1:
5370        flags |= BP_MEM_READ;
5371        break;
5372    case 2:
5373        flags |= BP_MEM_WRITE;
5374        break;
5375    case 3:
5376        flags |= BP_MEM_ACCESS;
5377        break;
5378    }
5379
5380    /* Attempts to use both MASK and BAS fields simultaneously are
5381     * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
5382     * thus generating a watchpoint for every byte in the masked region.
5383     */
5384    mask = extract64(wcr, 24, 4);
5385    if (mask == 1 || mask == 2) {
5386        /* Reserved values of MASK; we must act as if the mask value was
5387         * some non-reserved value, or as if the watchpoint were disabled.
5388         * We choose the latter.
5389         */
5390        return;
5391    } else if (mask) {
5392        /* Watchpoint covers an aligned area up to 2GB in size */
5393        len = 1ULL << mask;
5394        /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
5395         * whether the watchpoint fires when the unmasked bits match; we opt
5396         * to generate the exceptions.
5397         */
5398        wvr &= ~(len - 1);
5399    } else {
5400        /* Watchpoint covers bytes defined by the byte address select bits */
5401        int bas = extract64(wcr, 5, 8);
5402        int basstart;
5403
5404        if (bas == 0) {
5405            /* This must act as if the watchpoint is disabled */
5406            return;
5407        }
5408
5409        if (extract64(wvr, 2, 1)) {
5410            /* Deprecated case of an only 4-aligned address. BAS[7:4] are
5411             * ignored, and BAS[3:0] define which bytes to watch.
5412             */
5413            bas &= 0xf;
5414        }
5415        /* The BAS bits are supposed to be programmed to indicate a contiguous
5416         * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
5417         * we fire for each byte in the word/doubleword addressed by the WVR.
5418         * We choose to ignore any non-zero bits after the first range of 1s.
5419         */
5420        basstart = ctz32(bas);
5421        len = cto32(bas >> basstart);
5422        wvr += basstart;
5423    }
5424
5425    cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
5426                          &env->cpu_watchpoint[n]);
5427}
5428
5429void hw_watchpoint_update_all(ARMCPU *cpu)
5430{
5431    int i;
5432    CPUARMState *env = &cpu->env;
5433
5434    /* Completely clear out existing QEMU watchpoints and our array, to
5435     * avoid possible stale entries following migration load.
5436     */
5437    cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
5438    memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
5439
5440    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
5441        hw_watchpoint_update(cpu, i);
5442    }
5443}
5444
5445static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5446                         uint64_t value)
5447{
5448    ARMCPU *cpu = env_archcpu(env);
5449    int i = ri->crm;
5450
5451    /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
5452     * register reads and behaves as if values written are sign extended.
5453     * Bits [1:0] are RES0.
5454     */
5455    value = sextract64(value, 0, 49) & ~3ULL;
5456
5457    raw_write(env, ri, value);
5458    hw_watchpoint_update(cpu, i);
5459}
5460
5461static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5462                         uint64_t value)
5463{
5464    ARMCPU *cpu = env_archcpu(env);
5465    int i = ri->crm;
5466
5467    raw_write(env, ri, value);
5468    hw_watchpoint_update(cpu, i);
5469}
5470
5471void hw_breakpoint_update(ARMCPU *cpu, int n)
5472{
5473    CPUARMState *env = &cpu->env;
5474    uint64_t bvr = env->cp15.dbgbvr[n];
5475    uint64_t bcr = env->cp15.dbgbcr[n];
5476    vaddr addr;
5477    int bt;
5478    int flags = BP_CPU;
5479
5480    if (env->cpu_breakpoint[n]) {
5481        cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
5482        env->cpu_breakpoint[n] = NULL;
5483    }
5484
5485    if (!extract64(bcr, 0, 1)) {
5486        /* E bit clear : watchpoint disabled */
5487        return;
5488    }
5489
5490    bt = extract64(bcr, 20, 4);
5491
5492    switch (bt) {
5493    case 4: /* unlinked address mismatch (reserved if AArch64) */
5494    case 5: /* linked address mismatch (reserved if AArch64) */
5495        qemu_log_mask(LOG_UNIMP,
5496                      "arm: address mismatch breakpoint types not implemented\n");
5497        return;
5498    case 0: /* unlinked address match */
5499    case 1: /* linked address match */
5500    {
5501        /* Bits [63:49] are hardwired to the value of bit [48]; that is,
5502         * we behave as if the register was sign extended. Bits [1:0] are
5503         * RES0. The BAS field is used to allow setting breakpoints on 16
5504         * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
5505         * a bp will fire if the addresses covered by the bp and the addresses
5506         * covered by the insn overlap but the insn doesn't start at the
5507         * start of the bp address range. We choose to require the insn and
5508         * the bp to have the same address. The constraints on writing to
5509         * BAS enforced in dbgbcr_write mean we have only four cases:
5510         *  0b0000  => no breakpoint
5511         *  0b0011  => breakpoint on addr
5512         *  0b1100  => breakpoint on addr + 2
5513         *  0b1111  => breakpoint on addr
5514         * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
5515         */
5516        int bas = extract64(bcr, 5, 4);
5517        addr = sextract64(bvr, 0, 49) & ~3ULL;
5518        if (bas == 0) {
5519            return;
5520        }
5521        if (bas == 0xc) {
5522            addr += 2;
5523        }
5524        break;
5525    }
5526    case 2: /* unlinked context ID match */
5527    case 8: /* unlinked VMID match (reserved if no EL2) */
5528    case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
5529        qemu_log_mask(LOG_UNIMP,
5530                      "arm: unlinked context breakpoint types not implemented\n");
5531        return;
5532    case 9: /* linked VMID match (reserved if no EL2) */
5533    case 11: /* linked context ID and VMID match (reserved if no EL2) */
5534    case 3: /* linked context ID match */
5535    default:
5536        /* We must generate no events for Linked context matches (unless
5537         * they are linked to by some other bp/wp, which is handled in
5538         * updates for the linking bp/wp). We choose to also generate no events
5539         * for reserved values.
5540         */
5541        return;
5542    }
5543
5544    cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
5545}
5546
5547void hw_breakpoint_update_all(ARMCPU *cpu)
5548{
5549    int i;
5550    CPUARMState *env = &cpu->env;
5551
5552    /* Completely clear out existing QEMU breakpoints and our array, to
5553     * avoid possible stale entries following migration load.
5554     */
5555    cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
5556    memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
5557
5558    for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
5559        hw_breakpoint_update(cpu, i);
5560    }
5561}
5562
5563static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5564                         uint64_t value)
5565{
5566    ARMCPU *cpu = env_archcpu(env);
5567    int i = ri->crm;
5568
5569    raw_write(env, ri, value);
5570    hw_breakpoint_update(cpu, i);
5571}
5572
5573static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
5574                         uint64_t value)
5575{
5576    ARMCPU *cpu = env_archcpu(env);
5577    int i = ri->crm;
5578
5579    /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
5580     * copy of BAS[0].
5581     */
5582    value = deposit64(value, 6, 1, extract64(value, 5, 1));
5583    value = deposit64(value, 8, 1, extract64(value, 7, 1));
5584
5585    raw_write(env, ri, value);
5586    hw_breakpoint_update(cpu, i);
5587}
5588
5589static void define_debug_regs(ARMCPU *cpu)
5590{
5591    /* Define v7 and v8 architectural debug registers.
5592     * These are just dummy implementations for now.
5593     */
5594    int i;
5595    int wrps, brps, ctx_cmps;
5596    ARMCPRegInfo dbgdidr = {
5597        .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
5598        .access = PL0_R, .accessfn = access_tda,
5599        .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
5600    };
5601
5602    /* Note that all these register fields hold "number of Xs minus 1". */
5603    brps = extract32(cpu->dbgdidr, 24, 4);
5604    wrps = extract32(cpu->dbgdidr, 28, 4);
5605    ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
5606
5607    assert(ctx_cmps <= brps);
5608
5609    /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
5610     * of the debug registers such as number of breakpoints;
5611     * check that if they both exist then they agree.
5612     */
5613    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
5614        assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
5615        assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
5616        assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
5617    }
5618
5619    define_one_arm_cp_reg(cpu, &dbgdidr);
5620    define_arm_cp_regs(cpu, debug_cp_reginfo);
5621
5622    if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
5623        define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
5624    }
5625
5626    for (i = 0; i < brps + 1; i++) {
5627        ARMCPRegInfo dbgregs[] = {
5628            { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
5629              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
5630              .access = PL1_RW, .accessfn = access_tda,
5631              .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
5632              .writefn = dbgbvr_write, .raw_writefn = raw_write
5633            },
5634            { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
5635              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
5636              .access = PL1_RW, .accessfn = access_tda,
5637              .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
5638              .writefn = dbgbcr_write, .raw_writefn = raw_write
5639            },
5640            REGINFO_SENTINEL
5641        };
5642        define_arm_cp_regs(cpu, dbgregs);
5643    }
5644
5645    for (i = 0; i < wrps + 1; i++) {
5646        ARMCPRegInfo dbgregs[] = {
5647            { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
5648              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
5649              .access = PL1_RW, .accessfn = access_tda,
5650              .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
5651              .writefn = dbgwvr_write, .raw_writefn = raw_write
5652            },
5653            { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
5654              .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
5655              .access = PL1_RW, .accessfn = access_tda,
5656              .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
5657              .writefn = dbgwcr_write, .raw_writefn = raw_write
5658            },
5659            REGINFO_SENTINEL
5660        };
5661        define_arm_cp_regs(cpu, dbgregs);
5662    }
5663}
5664
5665/* We don't know until after realize whether there's a GICv3
5666 * attached, and that is what registers the gicv3 sysregs.
5667 * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
5668 * at runtime.
5669 */
5670static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
5671{
5672    ARMCPU *cpu = env_archcpu(env);
5673    uint64_t pfr1 = cpu->id_pfr1;
5674
5675    if (env->gicv3state) {
5676        pfr1 |= 1 << 28;
5677    }
5678    return pfr1;
5679}
5680
5681static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
5682{
5683    ARMCPU *cpu = env_archcpu(env);
5684    uint64_t pfr0 = cpu->isar.id_aa64pfr0;
5685
5686    if (env->gicv3state) {
5687        pfr0 |= 1 << 24;
5688    }
5689    return pfr0;
5690}
5691
5692/* Shared logic between LORID and the rest of the LOR* registers.
5693 * Secure state has already been delt with.
5694 */
5695static CPAccessResult access_lor_ns(CPUARMState *env)
5696{
5697    int el = arm_current_el(env);
5698
5699    if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
5700        return CP_ACCESS_TRAP_EL2;
5701    }
5702    if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
5703        return CP_ACCESS_TRAP_EL3;
5704    }
5705    return CP_ACCESS_OK;
5706}
5707
5708static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
5709                                   bool isread)
5710{
5711    if (arm_is_secure_below_el3(env)) {
5712        /* Access ok in secure mode.  */
5713        return CP_ACCESS_OK;
5714    }
5715    return access_lor_ns(env);
5716}
5717
5718static CPAccessResult access_lor_other(CPUARMState *env,
5719                                       const ARMCPRegInfo *ri, bool isread)
5720{
5721    if (arm_is_secure_below_el3(env)) {
5722        /* Access denied in secure mode.  */
5723        return CP_ACCESS_TRAP;
5724    }
5725    return access_lor_ns(env);
5726}
5727
5728#ifdef TARGET_AARCH64
5729static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
5730                                   bool isread)
5731{
5732    int el = arm_current_el(env);
5733
5734    if (el < 2 &&
5735        arm_feature(env, ARM_FEATURE_EL2) &&
5736        !(arm_hcr_el2_eff(env) & HCR_APK)) {
5737        return CP_ACCESS_TRAP_EL2;
5738    }
5739    if (el < 3 &&
5740        arm_feature(env, ARM_FEATURE_EL3) &&
5741        !(env->cp15.scr_el3 & SCR_APK)) {
5742        return CP_ACCESS_TRAP_EL3;
5743    }
5744    return CP_ACCESS_OK;
5745}
5746
5747static const ARMCPRegInfo pauth_reginfo[] = {
5748    { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5749      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0,
5750      .access = PL1_RW, .accessfn = access_pauth,
5751      .fieldoffset = offsetof(CPUARMState, keys.apda.lo) },
5752    { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5753      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1,
5754      .access = PL1_RW, .accessfn = access_pauth,
5755      .fieldoffset = offsetof(CPUARMState, keys.apda.hi) },
5756    { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5757      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2,
5758      .access = PL1_RW, .accessfn = access_pauth,
5759      .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) },
5760    { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5761      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3,
5762      .access = PL1_RW, .accessfn = access_pauth,
5763      .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) },
5764    { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5765      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0,
5766      .access = PL1_RW, .accessfn = access_pauth,
5767      .fieldoffset = offsetof(CPUARMState, keys.apga.lo) },
5768    { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5769      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1,
5770      .access = PL1_RW, .accessfn = access_pauth,
5771      .fieldoffset = offsetof(CPUARMState, keys.apga.hi) },
5772    { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5773      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0,
5774      .access = PL1_RW, .accessfn = access_pauth,
5775      .fieldoffset = offsetof(CPUARMState, keys.apia.lo) },
5776    { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5777      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1,
5778      .access = PL1_RW, .accessfn = access_pauth,
5779      .fieldoffset = offsetof(CPUARMState, keys.apia.hi) },
5780    { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64,
5781      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2,
5782      .access = PL1_RW, .accessfn = access_pauth,
5783      .fieldoffset = offsetof(CPUARMState, keys.apib.lo) },
5784    { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64,
5785      .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3,
5786      .access = PL1_RW, .accessfn = access_pauth,
5787      .fieldoffset = offsetof(CPUARMState, keys.apib.hi) },
5788    REGINFO_SENTINEL
5789};
5790
5791static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
5792{
5793    Error *err = NULL;
5794    uint64_t ret;
5795
5796    /* Success sets NZCV = 0000.  */
5797    env->NF = env->CF = env->VF = 0, env->ZF = 1;
5798
5799    if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) {
5800        /*
5801         * ??? Failed, for unknown reasons in the crypto subsystem.
5802         * The best we can do is log the reason and return the
5803         * timed-out indication to the guest.  There is no reason
5804         * we know to expect this failure to be transitory, so the
5805         * guest may well hang retrying the operation.
5806         */
5807        qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
5808                      ri->name, error_get_pretty(err));
5809        error_free(err);
5810
5811        env->ZF = 0; /* NZCF = 0100 */
5812        return 0;
5813    }
5814    return ret;
5815}
5816
5817/* We do not support re-seeding, so the two registers operate the same.  */
5818static const ARMCPRegInfo rndr_reginfo[] = {
5819    { .name = "RNDR", .state = ARM_CP_STATE_AA64,
5820      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5821      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0,
5822      .access = PL0_R, .readfn = rndr_readfn },
5823    { .name = "RNDRRS", .state = ARM_CP_STATE_AA64,
5824      .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO,
5825      .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1,
5826      .access = PL0_R, .readfn = rndr_readfn },
5827    REGINFO_SENTINEL
5828};
5829#endif
5830
5831static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
5832                                     bool isread)
5833{
5834    int el = arm_current_el(env);
5835
5836    if (el == 0) {
5837        uint64_t sctlr = arm_sctlr(env, el);
5838        if (!(sctlr & SCTLR_EnRCTX)) {
5839            return CP_ACCESS_TRAP;
5840        }
5841    } else if (el == 1) {
5842        uint64_t hcr = arm_hcr_el2_eff(env);
5843        if (hcr & HCR_NV) {
5844            return CP_ACCESS_TRAP_EL2;
5845        }
5846    }
5847    return CP_ACCESS_OK;
5848}
5849
5850static const ARMCPRegInfo predinv_reginfo[] = {
5851    { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64,
5852      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4,
5853      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5854    { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64,
5855      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5,
5856      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5857    { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64,
5858      .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7,
5859      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5860    /*
5861     * Note the AArch32 opcodes have a different OPC1.
5862     */
5863    { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32,
5864      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4,
5865      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5866    { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32,
5867      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5,
5868      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5869    { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32,
5870      .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7,
5871      .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv },
5872    REGINFO_SENTINEL
5873};
5874
5875void register_cp_regs_for_features(ARMCPU *cpu)
5876{
5877    /* Register all the coprocessor registers based on feature bits */
5878    CPUARMState *env = &cpu->env;
5879    if (arm_feature(env, ARM_FEATURE_M)) {
5880        /* M profile has no coprocessor registers */
5881        return;
5882    }
5883
5884    define_arm_cp_regs(cpu, cp_reginfo);
5885    if (!arm_feature(env, ARM_FEATURE_V8)) {
5886        /* Must go early as it is full of wildcards that may be
5887         * overridden by later definitions.
5888         */
5889        define_arm_cp_regs(cpu, not_v8_cp_reginfo);
5890    }
5891
5892    if (arm_feature(env, ARM_FEATURE_V6)) {
5893        /* The ID registers all have impdef reset values */
5894        ARMCPRegInfo v6_idregs[] = {
5895            { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
5896              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
5897              .access = PL1_R, .type = ARM_CP_CONST,
5898              .resetvalue = cpu->id_pfr0 },
5899            /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
5900             * the value of the GIC field until after we define these regs.
5901             */
5902            { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
5903              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
5904              .access = PL1_R, .type = ARM_CP_NO_RAW,
5905              .readfn = id_pfr1_read,
5906              .writefn = arm_cp_write_ignore },
5907            { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
5908              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
5909              .access = PL1_R, .type = ARM_CP_CONST,
5910              .resetvalue = cpu->id_dfr0 },
5911            { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
5912              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
5913              .access = PL1_R, .type = ARM_CP_CONST,
5914              .resetvalue = cpu->id_afr0 },
5915            { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
5916              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
5917              .access = PL1_R, .type = ARM_CP_CONST,
5918              .resetvalue = cpu->id_mmfr0 },
5919            { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
5920              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
5921              .access = PL1_R, .type = ARM_CP_CONST,
5922              .resetvalue = cpu->id_mmfr1 },
5923            { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
5924              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
5925              .access = PL1_R, .type = ARM_CP_CONST,
5926              .resetvalue = cpu->id_mmfr2 },
5927            { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
5928              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
5929              .access = PL1_R, .type = ARM_CP_CONST,
5930              .resetvalue = cpu->id_mmfr3 },
5931            { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
5932              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
5933              .access = PL1_R, .type = ARM_CP_CONST,
5934              .resetvalue = cpu->isar.id_isar0 },
5935            { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
5936              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
5937              .access = PL1_R, .type = ARM_CP_CONST,
5938              .resetvalue = cpu->isar.id_isar1 },
5939            { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
5940              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
5941              .access = PL1_R, .type = ARM_CP_CONST,
5942              .resetvalue = cpu->isar.id_isar2 },
5943            { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
5944              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
5945              .access = PL1_R, .type = ARM_CP_CONST,
5946              .resetvalue = cpu->isar.id_isar3 },
5947            { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
5948              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
5949              .access = PL1_R, .type = ARM_CP_CONST,
5950              .resetvalue = cpu->isar.id_isar4 },
5951            { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
5952              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
5953              .access = PL1_R, .type = ARM_CP_CONST,
5954              .resetvalue = cpu->isar.id_isar5 },
5955            { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
5956              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
5957              .access = PL1_R, .type = ARM_CP_CONST,
5958              .resetvalue = cpu->id_mmfr4 },
5959            { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
5960              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
5961              .access = PL1_R, .type = ARM_CP_CONST,
5962              .resetvalue = cpu->isar.id_isar6 },
5963            REGINFO_SENTINEL
5964        };
5965        define_arm_cp_regs(cpu, v6_idregs);
5966        define_arm_cp_regs(cpu, v6_cp_reginfo);
5967    } else {
5968        define_arm_cp_regs(cpu, not_v6_cp_reginfo);
5969    }
5970    if (arm_feature(env, ARM_FEATURE_V6K)) {
5971        define_arm_cp_regs(cpu, v6k_cp_reginfo);
5972    }
5973    if (arm_feature(env, ARM_FEATURE_V7MP) &&
5974        !arm_feature(env, ARM_FEATURE_PMSA)) {
5975        define_arm_cp_regs(cpu, v7mp_cp_reginfo);
5976    }
5977    if (arm_feature(env, ARM_FEATURE_V7VE)) {
5978        define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
5979    }
5980    if (arm_feature(env, ARM_FEATURE_V7)) {
5981        /* v7 performance monitor control register: same implementor
5982         * field as main ID register, and we implement four counters in
5983         * addition to the cycle count register.
5984         */
5985        unsigned int i, pmcrn = 4;
5986        ARMCPRegInfo pmcr = {
5987            .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
5988            .access = PL0_RW,
5989            .type = ARM_CP_IO | ARM_CP_ALIAS,
5990            .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
5991            .accessfn = pmreg_access, .writefn = pmcr_write,
5992            .raw_writefn = raw_write,
5993        };
5994        ARMCPRegInfo pmcr64 = {
5995            .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
5996            .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
5997            .access = PL0_RW, .accessfn = pmreg_access,
5998            .type = ARM_CP_IO,
5999            .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
6000            .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT),
6001            .writefn = pmcr_write, .raw_writefn = raw_write,
6002        };
6003        define_one_arm_cp_reg(cpu, &pmcr);
6004        define_one_arm_cp_reg(cpu, &pmcr64);
6005        for (i = 0; i < pmcrn; i++) {
6006            char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
6007            char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
6008            char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
6009            char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
6010            ARMCPRegInfo pmev_regs[] = {
6011                { .name = pmevcntr_name, .cp = 15, .crn = 14,
6012                  .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6013                  .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6014                  .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6015                  .accessfn = pmreg_access },
6016                { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
6017                  .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
6018                  .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6019                  .type = ARM_CP_IO,
6020                  .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
6021                  .raw_readfn = pmevcntr_rawread,
6022                  .raw_writefn = pmevcntr_rawwrite },
6023                { .name = pmevtyper_name, .cp = 15, .crn = 14,
6024                  .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
6025                  .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
6026                  .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6027                  .accessfn = pmreg_access },
6028                { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
6029                  .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
6030                  .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
6031                  .type = ARM_CP_IO,
6032                  .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
6033                  .raw_writefn = pmevtyper_rawwrite },
6034                REGINFO_SENTINEL
6035            };
6036            define_arm_cp_regs(cpu, pmev_regs);
6037            g_free(pmevcntr_name);
6038            g_free(pmevcntr_el0_name);
6039            g_free(pmevtyper_name);
6040            g_free(pmevtyper_el0_name);
6041        }
6042        ARMCPRegInfo clidr = {
6043            .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
6044            .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
6045            .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
6046        };
6047        define_one_arm_cp_reg(cpu, &clidr);
6048        define_arm_cp_regs(cpu, v7_cp_reginfo);
6049        define_debug_regs(cpu);
6050    } else {
6051        define_arm_cp_regs(cpu, not_v7_cp_reginfo);
6052    }
6053    if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
6054            FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) {
6055        ARMCPRegInfo v81_pmu_regs[] = {
6056            { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
6057              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
6058              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6059              .resetvalue = extract64(cpu->pmceid0, 32, 32) },
6060            { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
6061              .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
6062              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6063              .resetvalue = extract64(cpu->pmceid1, 32, 32) },
6064            REGINFO_SENTINEL
6065        };
6066        define_arm_cp_regs(cpu, v81_pmu_regs);
6067    }
6068    if (arm_feature(env, ARM_FEATURE_V8)) {
6069        /* AArch64 ID registers, which all have impdef reset values.
6070         * Note that within the ID register ranges the unused slots
6071         * must all RAZ, not UNDEF; future architecture versions may
6072         * define new registers here.
6073         */
6074        ARMCPRegInfo v8_idregs[] = {
6075            /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
6076             * know the right value for the GIC field until after we
6077             * define these regs.
6078             */
6079            { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
6080              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
6081              .access = PL1_R, .type = ARM_CP_NO_RAW,
6082              .readfn = id_aa64pfr0_read,
6083              .writefn = arm_cp_write_ignore },
6084            { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
6085              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
6086              .access = PL1_R, .type = ARM_CP_CONST,
6087              .resetvalue = cpu->isar.id_aa64pfr1},
6088            { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6089              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
6090              .access = PL1_R, .type = ARM_CP_CONST,
6091              .resetvalue = 0 },
6092            { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6093              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
6094              .access = PL1_R, .type = ARM_CP_CONST,
6095              .resetvalue = 0 },
6096            { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64,
6097              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
6098              .access = PL1_R, .type = ARM_CP_CONST,
6099              /* At present, only SVEver == 0 is defined anyway.  */
6100              .resetvalue = 0 },
6101            { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6102              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
6103              .access = PL1_R, .type = ARM_CP_CONST,
6104              .resetvalue = 0 },
6105            { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6106              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
6107              .access = PL1_R, .type = ARM_CP_CONST,
6108              .resetvalue = 0 },
6109            { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6110              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
6111              .access = PL1_R, .type = ARM_CP_CONST,
6112              .resetvalue = 0 },
6113            { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
6114              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
6115              .access = PL1_R, .type = ARM_CP_CONST,
6116              .resetvalue = cpu->id_aa64dfr0 },
6117            { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
6118              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
6119              .access = PL1_R, .type = ARM_CP_CONST,
6120              .resetvalue = cpu->id_aa64dfr1 },
6121            { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6122              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
6123              .access = PL1_R, .type = ARM_CP_CONST,
6124              .resetvalue = 0 },
6125            { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6126              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
6127              .access = PL1_R, .type = ARM_CP_CONST,
6128              .resetvalue = 0 },
6129            { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
6130              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
6131              .access = PL1_R, .type = ARM_CP_CONST,
6132              .resetvalue = cpu->id_aa64afr0 },
6133            { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
6134              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
6135              .access = PL1_R, .type = ARM_CP_CONST,
6136              .resetvalue = cpu->id_aa64afr1 },
6137            { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6138              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
6139              .access = PL1_R, .type = ARM_CP_CONST,
6140              .resetvalue = 0 },
6141            { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6142              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
6143              .access = PL1_R, .type = ARM_CP_CONST,
6144              .resetvalue = 0 },
6145            { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
6146              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
6147              .access = PL1_R, .type = ARM_CP_CONST,
6148              .resetvalue = cpu->isar.id_aa64isar0 },
6149            { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
6150              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
6151              .access = PL1_R, .type = ARM_CP_CONST,
6152              .resetvalue = cpu->isar.id_aa64isar1 },
6153            { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6154              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
6155              .access = PL1_R, .type = ARM_CP_CONST,
6156              .resetvalue = 0 },
6157            { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6158              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
6159              .access = PL1_R, .type = ARM_CP_CONST,
6160              .resetvalue = 0 },
6161            { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6162              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
6163              .access = PL1_R, .type = ARM_CP_CONST,
6164              .resetvalue = 0 },
6165            { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6166              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
6167              .access = PL1_R, .type = ARM_CP_CONST,
6168              .resetvalue = 0 },
6169            { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6170              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
6171              .access = PL1_R, .type = ARM_CP_CONST,
6172              .resetvalue = 0 },
6173            { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6174              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
6175              .access = PL1_R, .type = ARM_CP_CONST,
6176              .resetvalue = 0 },
6177            { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
6178              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
6179              .access = PL1_R, .type = ARM_CP_CONST,
6180              .resetvalue = cpu->isar.id_aa64mmfr0 },
6181            { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
6182              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
6183              .access = PL1_R, .type = ARM_CP_CONST,
6184              .resetvalue = cpu->isar.id_aa64mmfr1 },
6185            { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6186              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
6187              .access = PL1_R, .type = ARM_CP_CONST,
6188              .resetvalue = 0 },
6189            { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6190              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
6191              .access = PL1_R, .type = ARM_CP_CONST,
6192              .resetvalue = 0 },
6193            { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6194              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
6195              .access = PL1_R, .type = ARM_CP_CONST,
6196              .resetvalue = 0 },
6197            { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6198              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
6199              .access = PL1_R, .type = ARM_CP_CONST,
6200              .resetvalue = 0 },
6201            { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6202              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
6203              .access = PL1_R, .type = ARM_CP_CONST,
6204              .resetvalue = 0 },
6205            { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6206              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
6207              .access = PL1_R, .type = ARM_CP_CONST,
6208              .resetvalue = 0 },
6209            { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
6210              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
6211              .access = PL1_R, .type = ARM_CP_CONST,
6212              .resetvalue = cpu->isar.mvfr0 },
6213            { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
6214              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
6215              .access = PL1_R, .type = ARM_CP_CONST,
6216              .resetvalue = cpu->isar.mvfr1 },
6217            { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
6218              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
6219              .access = PL1_R, .type = ARM_CP_CONST,
6220              .resetvalue = cpu->isar.mvfr2 },
6221            { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6222              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
6223              .access = PL1_R, .type = ARM_CP_CONST,
6224              .resetvalue = 0 },
6225            { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6226              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
6227              .access = PL1_R, .type = ARM_CP_CONST,
6228              .resetvalue = 0 },
6229            { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6230              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
6231              .access = PL1_R, .type = ARM_CP_CONST,
6232              .resetvalue = 0 },
6233            { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6234              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
6235              .access = PL1_R, .type = ARM_CP_CONST,
6236              .resetvalue = 0 },
6237            { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
6238              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
6239              .access = PL1_R, .type = ARM_CP_CONST,
6240              .resetvalue = 0 },
6241            { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
6242              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
6243              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6244              .resetvalue = extract64(cpu->pmceid0, 0, 32) },
6245            { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
6246              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
6247              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6248              .resetvalue = cpu->pmceid0 },
6249            { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
6250              .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
6251              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6252              .resetvalue = extract64(cpu->pmceid1, 0, 32) },
6253            { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
6254              .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
6255              .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
6256              .resetvalue = cpu->pmceid1 },
6257            REGINFO_SENTINEL
6258        };
6259#ifdef CONFIG_USER_ONLY
6260        ARMCPRegUserSpaceInfo v8_user_idregs[] = {
6261            { .name = "ID_AA64PFR0_EL1",
6262              .exported_bits = 0x000f000f00ff0000,
6263              .fixed_bits    = 0x0000000000000011 },
6264            { .name = "ID_AA64PFR1_EL1",
6265              .exported_bits = 0x00000000000000f0 },
6266            { .name = "ID_AA64PFR*_EL1_RESERVED",
6267              .is_glob = true                     },
6268            { .name = "ID_AA64ZFR0_EL1"           },
6269            { .name = "ID_AA64MMFR0_EL1",
6270              .fixed_bits    = 0x00000000ff000000 },
6271            { .name = "ID_AA64MMFR1_EL1"          },
6272            { .name = "ID_AA64MMFR*_EL1_RESERVED",
6273              .is_glob = true                     },
6274            { .name = "ID_AA64DFR0_EL1",
6275              .fixed_bits    = 0x0000000000000006 },
6276            { .name = "ID_AA64DFR1_EL1"           },
6277            { .name = "ID_AA64DFR*_EL1_RESERVED",
6278              .is_glob = true                     },
6279            { .name = "ID_AA64AFR*",
6280              .is_glob = true                     },
6281            { .name = "ID_AA64ISAR0_EL1",
6282              .exported_bits = 0x00fffffff0fffff0 },
6283            { .name = "ID_AA64ISAR1_EL1",
6284              .exported_bits = 0x000000f0ffffffff },
6285            { .name = "ID_AA64ISAR*_EL1_RESERVED",
6286              .is_glob = true                     },
6287            REGUSERINFO_SENTINEL
6288        };
6289        modify_arm_cp_regs(v8_idregs, v8_user_idregs);
6290#endif
6291        /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
6292        if (!arm_feature(env, ARM_FEATURE_EL3) &&
6293            !arm_feature(env, ARM_FEATURE_EL2)) {
6294            ARMCPRegInfo rvbar = {
6295                .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
6296                .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
6297                .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
6298            };
6299            define_one_arm_cp_reg(cpu, &rvbar);
6300        }
6301        define_arm_cp_regs(cpu, v8_idregs);
6302        define_arm_cp_regs(cpu, v8_cp_reginfo);
6303    }
6304    if (arm_feature(env, ARM_FEATURE_EL2)) {
6305        uint64_t vmpidr_def = mpidr_read_val(env);
6306        ARMCPRegInfo vpidr_regs[] = {
6307            { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
6308              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6309              .access = PL2_RW, .accessfn = access_el3_aa32ns,
6310              .resetvalue = cpu->midr, .type = ARM_CP_ALIAS,
6311              .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) },
6312            { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
6313              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6314              .access = PL2_RW, .resetvalue = cpu->midr,
6315              .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6316            { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
6317              .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6318              .access = PL2_RW, .accessfn = access_el3_aa32ns,
6319              .resetvalue = vmpidr_def, .type = ARM_CP_ALIAS,
6320              .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) },
6321            { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
6322              .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6323              .access = PL2_RW,
6324              .resetvalue = vmpidr_def,
6325              .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
6326            REGINFO_SENTINEL
6327        };
6328        define_arm_cp_regs(cpu, vpidr_regs);
6329        define_arm_cp_regs(cpu, el2_cp_reginfo);
6330        if (arm_feature(env, ARM_FEATURE_V8)) {
6331            define_arm_cp_regs(cpu, el2_v8_cp_reginfo);
6332        }
6333        /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
6334        if (!arm_feature(env, ARM_FEATURE_EL3)) {
6335            ARMCPRegInfo rvbar = {
6336                .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
6337                .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
6338                .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
6339            };
6340            define_one_arm_cp_reg(cpu, &rvbar);
6341        }
6342    } else {
6343        /* If EL2 is missing but higher ELs are enabled, we need to
6344         * register the no_el2 reginfos.
6345         */
6346        if (arm_feature(env, ARM_FEATURE_EL3)) {
6347            /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
6348             * of MIDR_EL1 and MPIDR_EL1.
6349             */
6350            ARMCPRegInfo vpidr_regs[] = {
6351                { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6352                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
6353                  .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6354                  .type = ARM_CP_CONST, .resetvalue = cpu->midr,
6355                  .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
6356                { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
6357                  .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
6358                  .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
6359                  .type = ARM_CP_NO_RAW,
6360                  .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
6361                REGINFO_SENTINEL
6362            };
6363            define_arm_cp_regs(cpu, vpidr_regs);
6364            define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
6365            if (arm_feature(env, ARM_FEATURE_V8)) {
6366                define_arm_cp_regs(cpu, el3_no_el2_v8_cp_reginfo);
6367            }
6368        }
6369    }
6370    if (arm_feature(env, ARM_FEATURE_EL3)) {
6371        define_arm_cp_regs(cpu, el3_cp_reginfo);
6372        ARMCPRegInfo el3_regs[] = {
6373            { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
6374              .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
6375              .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
6376            { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
6377              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
6378              .access = PL3_RW,
6379              .raw_writefn = raw_write, .writefn = sctlr_write,
6380              .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
6381              .resetvalue = cpu->reset_sctlr },
6382            REGINFO_SENTINEL
6383        };
6384
6385        define_arm_cp_regs(cpu, el3_regs);
6386    }
6387    /* The behaviour of NSACR is sufficiently various that we don't
6388     * try to describe it in a single reginfo:
6389     *  if EL3 is 64 bit, then trap to EL3 from S EL1,
6390     *     reads as constant 0xc00 from NS EL1 and NS EL2
6391     *  if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
6392     *  if v7 without EL3, register doesn't exist
6393     *  if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
6394     */
6395    if (arm_feature(env, ARM_FEATURE_EL3)) {
6396        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6397            ARMCPRegInfo nsacr = {
6398                .name = "NSACR", .type = ARM_CP_CONST,
6399                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6400                .access = PL1_RW, .accessfn = nsacr_access,
6401                .resetvalue = 0xc00
6402            };
6403            define_one_arm_cp_reg(cpu, &nsacr);
6404        } else {
6405            ARMCPRegInfo nsacr = {
6406                .name = "NSACR",
6407                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6408                .access = PL3_RW | PL1_R,
6409                .resetvalue = 0,
6410                .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
6411            };
6412            define_one_arm_cp_reg(cpu, &nsacr);
6413        }
6414    } else {
6415        if (arm_feature(env, ARM_FEATURE_V8)) {
6416            ARMCPRegInfo nsacr = {
6417                .name = "NSACR", .type = ARM_CP_CONST,
6418                .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
6419                .access = PL1_R,
6420                .resetvalue = 0xc00
6421            };
6422            define_one_arm_cp_reg(cpu, &nsacr);
6423        }
6424    }
6425
6426    if (arm_feature(env, ARM_FEATURE_PMSA)) {
6427        if (arm_feature(env, ARM_FEATURE_V6)) {
6428            /* PMSAv6 not implemented */
6429            assert(arm_feature(env, ARM_FEATURE_V7));
6430            define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6431            define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
6432        } else {
6433            define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
6434        }
6435    } else {
6436        define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
6437        define_arm_cp_regs(cpu, vmsa_cp_reginfo);
6438        /* TTCBR2 is introduced with ARMv8.2-A32HPD.  */
6439        if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) {
6440            define_one_arm_cp_reg(cpu, &ttbcr2_reginfo);
6441        }
6442    }
6443    if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
6444        define_arm_cp_regs(cpu, t2ee_cp_reginfo);
6445    }
6446    if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
6447        define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
6448    }
6449    if (arm_feature(env, ARM_FEATURE_VAPA)) {
6450        define_arm_cp_regs(cpu, vapa_cp_reginfo);
6451    }
6452    if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
6453        define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
6454    }
6455    if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
6456        define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
6457    }
6458    if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
6459        define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
6460    }
6461    if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
6462        define_arm_cp_regs(cpu, omap_cp_reginfo);
6463    }
6464    if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
6465        define_arm_cp_regs(cpu, strongarm_cp_reginfo);
6466    }
6467    if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6468        define_arm_cp_regs(cpu, xscale_cp_reginfo);
6469    }
6470    if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
6471        define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
6472    }
6473    if (arm_feature(env, ARM_FEATURE_LPAE)) {
6474        define_arm_cp_regs(cpu, lpae_cp_reginfo);
6475    }
6476    /* Slightly awkwardly, the OMAP and StrongARM cores need all of
6477     * cp15 crn=0 to be writes-ignored, whereas for other cores they should
6478     * be read-only (ie write causes UNDEF exception).
6479     */
6480    {
6481        ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
6482            /* Pre-v8 MIDR space.
6483             * Note that the MIDR isn't a simple constant register because
6484             * of the TI925 behaviour where writes to another register can
6485             * cause the MIDR value to change.
6486             *
6487             * Unimplemented registers in the c15 0 0 0 space default to
6488             * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
6489             * and friends override accordingly.
6490             */
6491            { .name = "MIDR",
6492              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
6493              .access = PL1_R, .resetvalue = cpu->midr,
6494              .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
6495              .readfn = midr_read,
6496              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6497              .type = ARM_CP_OVERRIDE },
6498            /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
6499            { .name = "DUMMY",
6500              .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
6501              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6502            { .name = "DUMMY",
6503              .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
6504              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6505            { .name = "DUMMY",
6506              .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
6507              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6508            { .name = "DUMMY",
6509              .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
6510              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6511            { .name = "DUMMY",
6512              .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
6513              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6514            REGINFO_SENTINEL
6515        };
6516        ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
6517            { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
6518              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
6519              .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
6520              .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
6521              .readfn = midr_read },
6522            /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
6523            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6524              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6525              .access = PL1_R, .resetvalue = cpu->midr },
6526            { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
6527              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
6528              .access = PL1_R, .resetvalue = cpu->midr },
6529            { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
6530              .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
6531              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
6532            REGINFO_SENTINEL
6533        };
6534        ARMCPRegInfo id_cp_reginfo[] = {
6535            /* These are common to v8 and pre-v8 */
6536            { .name = "CTR",
6537              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
6538              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6539            { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
6540              .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
6541              .access = PL0_R, .accessfn = ctr_el0_access,
6542              .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
6543            /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
6544            { .name = "TCMTR",
6545              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
6546              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
6547            REGINFO_SENTINEL
6548        };
6549        /* TLBTR is specific to VMSA */
6550        ARMCPRegInfo id_tlbtr_reginfo = {
6551              .name = "TLBTR",
6552              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
6553              .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
6554        };
6555        /* MPUIR is specific to PMSA V6+ */
6556        ARMCPRegInfo id_mpuir_reginfo = {
6557              .name = "MPUIR",
6558              .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
6559              .access = PL1_R, .type = ARM_CP_CONST,
6560              .resetvalue = cpu->pmsav7_dregion << 8
6561        };
6562        ARMCPRegInfo crn0_wi_reginfo = {
6563            .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
6564            .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
6565            .type = ARM_CP_NOP | ARM_CP_OVERRIDE
6566        };
6567#ifdef CONFIG_USER_ONLY
6568        ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = {
6569            { .name = "MIDR_EL1",
6570              .exported_bits = 0x00000000ffffffff },
6571            { .name = "REVIDR_EL1"                },
6572            REGUSERINFO_SENTINEL
6573        };
6574        modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo);
6575#endif
6576        if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
6577            arm_feature(env, ARM_FEATURE_STRONGARM)) {
6578            ARMCPRegInfo *r;
6579            /* Register the blanket "writes ignored" value first to cover the
6580             * whole space. Then update the specific ID registers to allow write
6581             * access, so that they ignore writes rather than causing them to
6582             * UNDEF.
6583             */
6584            define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
6585            for (r = id_pre_v8_midr_cp_reginfo;
6586                 r->type != ARM_CP_SENTINEL; r++) {
6587                r->access = PL1_RW;
6588            }
6589            for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
6590                r->access = PL1_RW;
6591            }
6592            id_mpuir_reginfo.access = PL1_RW;
6593            id_tlbtr_reginfo.access = PL1_RW;
6594        }
6595        if (arm_feature(env, ARM_FEATURE_V8)) {
6596            define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
6597        } else {
6598            define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
6599        }
6600        define_arm_cp_regs(cpu, id_cp_reginfo);
6601        if (!arm_feature(env, ARM_FEATURE_PMSA)) {
6602            define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
6603        } else if (arm_feature(env, ARM_FEATURE_V7)) {
6604            define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
6605        }
6606    }
6607
6608    if (arm_feature(env, ARM_FEATURE_MPIDR)) {
6609        ARMCPRegInfo mpidr_cp_reginfo[] = {
6610            { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH,
6611              .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
6612              .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
6613            REGINFO_SENTINEL
6614        };
6615#ifdef CONFIG_USER_ONLY
6616        ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = {
6617            { .name = "MPIDR_EL1",
6618              .fixed_bits = 0x0000000080000000 },
6619            REGUSERINFO_SENTINEL
6620        };
6621        modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo);
6622#endif
6623        define_arm_cp_regs(cpu, mpidr_cp_reginfo);
6624    }
6625
6626    if (arm_feature(env, ARM_FEATURE_AUXCR)) {
6627        ARMCPRegInfo auxcr_reginfo[] = {
6628            { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
6629              .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
6630              .access = PL1_RW, .type = ARM_CP_CONST,
6631              .resetvalue = cpu->reset_auxcr },
6632            { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
6633              .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
6634              .access = PL2_RW, .type = ARM_CP_CONST,
6635              .resetvalue = 0 },
6636            { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
6637              .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
6638              .access = PL3_RW, .type = ARM_CP_CONST,
6639              .resetvalue = 0 },
6640            REGINFO_SENTINEL
6641        };
6642        define_arm_cp_regs(cpu, auxcr_reginfo);
6643        if (arm_feature(env, ARM_FEATURE_V8)) {
6644            /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */
6645            ARMCPRegInfo hactlr2_reginfo = {
6646                .name = "HACTLR2", .state = ARM_CP_STATE_AA32,
6647                .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3,
6648                .access = PL2_RW, .type = ARM_CP_CONST,
6649                .resetvalue = 0
6650            };
6651            define_one_arm_cp_reg(cpu, &hactlr2_reginfo);
6652        }
6653    }
6654
6655    if (arm_feature(env, ARM_FEATURE_CBAR)) {
6656        if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6657            /* 32 bit view is [31:18] 0...0 [43:32]. */
6658            uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
6659                | extract64(cpu->reset_cbar, 32, 12);
6660            ARMCPRegInfo cbar_reginfo[] = {
6661                { .name = "CBAR",
6662                  .type = ARM_CP_CONST,
6663                  .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6664                  .access = PL1_R, .resetvalue = cpu->reset_cbar },
6665                { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
6666                  .type = ARM_CP_CONST,
6667                  .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
6668                  .access = PL1_R, .resetvalue = cbar32 },
6669                REGINFO_SENTINEL
6670            };
6671            /* We don't implement a r/w 64 bit CBAR currently */
6672            assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
6673            define_arm_cp_regs(cpu, cbar_reginfo);
6674        } else {
6675            ARMCPRegInfo cbar = {
6676                .name = "CBAR",
6677                .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
6678                .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
6679                .fieldoffset = offsetof(CPUARMState,
6680                                        cp15.c15_config_base_address)
6681            };
6682            if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
6683                cbar.access = PL1_R;
6684                cbar.fieldoffset = 0;
6685                cbar.type = ARM_CP_CONST;
6686            }
6687            define_one_arm_cp_reg(cpu, &cbar);
6688        }
6689    }
6690
6691    if (arm_feature(env, ARM_FEATURE_VBAR)) {
6692        ARMCPRegInfo vbar_cp_reginfo[] = {
6693            { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
6694              .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
6695              .access = PL1_RW, .writefn = vbar_write,
6696              .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
6697                                     offsetof(CPUARMState, cp15.vbar_ns) },
6698              .resetvalue = 0 },
6699            REGINFO_SENTINEL
6700        };
6701        define_arm_cp_regs(cpu, vbar_cp_reginfo);
6702    }
6703
6704    /* Generic registers whose values depend on the implementation */
6705    {
6706        ARMCPRegInfo sctlr = {
6707            .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
6708            .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
6709            .access = PL1_RW,
6710            .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
6711                                   offsetof(CPUARMState, cp15.sctlr_ns) },
6712            .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
6713            .raw_writefn = raw_write,
6714        };
6715        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6716            /* Normally we would always end the TB on an SCTLR write, but Linux
6717             * arch/arm/mach-pxa/sleep.S expects two instructions following
6718             * an MMU enable to execute from cache.  Imitate this behaviour.
6719             */
6720            sctlr.type |= ARM_CP_SUPPRESS_TB_END;
6721        }
6722        define_one_arm_cp_reg(cpu, &sctlr);
6723    }
6724
6725    if (cpu_isar_feature(aa64_lor, cpu)) {
6726        /*
6727         * A trivial implementation of ARMv8.1-LOR leaves all of these
6728         * registers fixed at 0, which indicates that there are zero
6729         * supported Limited Ordering regions.
6730         */
6731        static const ARMCPRegInfo lor_reginfo[] = {
6732            { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
6733              .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
6734              .access = PL1_RW, .accessfn = access_lor_other,
6735              .type = ARM_CP_CONST, .resetvalue = 0 },
6736            { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
6737              .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
6738              .access = PL1_RW, .accessfn = access_lor_other,
6739              .type = ARM_CP_CONST, .resetvalue = 0 },
6740            { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
6741              .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
6742              .access = PL1_RW, .accessfn = access_lor_other,
6743              .type = ARM_CP_CONST, .resetvalue = 0 },
6744            { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
6745              .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
6746              .access = PL1_RW, .accessfn = access_lor_other,
6747              .type = ARM_CP_CONST, .resetvalue = 0 },
6748            { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
6749              .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
6750              .access = PL1_R, .accessfn = access_lorid,
6751              .type = ARM_CP_CONST, .resetvalue = 0 },
6752            REGINFO_SENTINEL
6753        };
6754        define_arm_cp_regs(cpu, lor_reginfo);
6755    }
6756
6757    if (cpu_isar_feature(aa64_sve, cpu)) {
6758        define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
6759        if (arm_feature(env, ARM_FEATURE_EL2)) {
6760            define_one_arm_cp_reg(cpu, &zcr_el2_reginfo);
6761        } else {
6762            define_one_arm_cp_reg(cpu, &zcr_no_el2_reginfo);
6763        }
6764        if (arm_feature(env, ARM_FEATURE_EL3)) {
6765            define_one_arm_cp_reg(cpu, &zcr_el3_reginfo);
6766        }
6767    }
6768
6769#ifdef TARGET_AARCH64
6770    if (cpu_isar_feature(aa64_pauth, cpu)) {
6771        define_arm_cp_regs(cpu, pauth_reginfo);
6772    }
6773    if (cpu_isar_feature(aa64_rndr, cpu)) {
6774        define_arm_cp_regs(cpu, rndr_reginfo);
6775    }
6776#endif
6777
6778    /*
6779     * While all v8.0 cpus support aarch64, QEMU does have configurations
6780     * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max,
6781     * which will set ID_ISAR6.
6782     */
6783    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)
6784        ? cpu_isar_feature(aa64_predinv, cpu)
6785        : cpu_isar_feature(aa32_predinv, cpu)) {
6786        define_arm_cp_regs(cpu, predinv_reginfo);
6787    }
6788}
6789
6790void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
6791{
6792    CPUState *cs = CPU(cpu);
6793    CPUARMState *env = &cpu->env;
6794
6795    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
6796        gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
6797                                 aarch64_fpu_gdb_set_reg,
6798                                 34, "aarch64-fpu.xml", 0);
6799    } else if (arm_feature(env, ARM_FEATURE_NEON)) {
6800        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6801                                 51, "arm-neon.xml", 0);
6802    } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
6803        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6804                                 35, "arm-vfp3.xml", 0);
6805    } else if (arm_feature(env, ARM_FEATURE_VFP)) {
6806        gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
6807                                 19, "arm-vfp.xml", 0);
6808    }
6809    gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg,
6810                             arm_gen_dynamic_xml(cs),
6811                             "system-registers.xml", 0);
6812}
6813
6814/* Sort alphabetically by type name, except for "any". */
6815static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
6816{
6817    ObjectClass *class_a = (ObjectClass *)a;
6818    ObjectClass *class_b = (ObjectClass *)b;
6819    const char *name_a, *name_b;
6820
6821    name_a = object_class_get_name(class_a);
6822    name_b = object_class_get_name(class_b);
6823    if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
6824        return 1;
6825    } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
6826        return -1;
6827    } else {
6828        return strcmp(name_a, name_b);
6829    }
6830}
6831
6832static void arm_cpu_list_entry(gpointer data, gpointer user_data)
6833{
6834    ObjectClass *oc = data;
6835    const char *typename;
6836    char *name;
6837
6838    typename = object_class_get_name(oc);
6839    name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
6840    qemu_printf("  %s\n", name);
6841    g_free(name);
6842}
6843
6844void arm_cpu_list(void)
6845{
6846    GSList *list;
6847
6848    list = object_class_get_list(TYPE_ARM_CPU, false);
6849    list = g_slist_sort(list, arm_cpu_list_compare);
6850    qemu_printf("Available CPUs:\n");
6851    g_slist_foreach(list, arm_cpu_list_entry, NULL);
6852    g_slist_free(list);
6853}
6854
6855static void arm_cpu_add_definition(gpointer data, gpointer user_data)
6856{
6857    ObjectClass *oc = data;
6858    CpuDefinitionInfoList **cpu_list = user_data;
6859    CpuDefinitionInfoList *entry;
6860    CpuDefinitionInfo *info;
6861    const char *typename;
6862
6863    typename = object_class_get_name(oc);
6864    info = g_malloc0(sizeof(*info));
6865    info->name = g_strndup(typename,
6866                           strlen(typename) - strlen("-" TYPE_ARM_CPU));
6867    info->q_typename = g_strdup(typename);
6868
6869    entry = g_malloc0(sizeof(*entry));
6870    entry->value = info;
6871    entry->next = *cpu_list;
6872    *cpu_list = entry;
6873}
6874
6875CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
6876{
6877    CpuDefinitionInfoList *cpu_list = NULL;
6878    GSList *list;
6879
6880    list = object_class_get_list(TYPE_ARM_CPU, false);
6881    g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
6882    g_slist_free(list);
6883
6884    return cpu_list;
6885}
6886
6887static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
6888                                   void *opaque, int state, int secstate,
6889                                   int crm, int opc1, int opc2,
6890                                   const char *name)
6891{
6892    /* Private utility function for define_one_arm_cp_reg_with_opaque():
6893     * add a single reginfo struct to the hash table.
6894     */
6895    uint32_t *key = g_new(uint32_t, 1);
6896    ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
6897    int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
6898    int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
6899
6900    r2->name = g_strdup(name);
6901    /* Reset the secure state to the specific incoming state.  This is
6902     * necessary as the register may have been defined with both states.
6903     */
6904    r2->secure = secstate;
6905
6906    if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
6907        /* Register is banked (using both entries in array).
6908         * Overwriting fieldoffset as the array is only used to define
6909         * banked registers but later only fieldoffset is used.
6910         */
6911        r2->fieldoffset = r->bank_fieldoffsets[ns];
6912    }
6913
6914    if (state == ARM_CP_STATE_AA32) {
6915        if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
6916            /* If the register is banked then we don't need to migrate or
6917             * reset the 32-bit instance in certain cases:
6918             *
6919             * 1) If the register has both 32-bit and 64-bit instances then we
6920             *    can count on the 64-bit instance taking care of the
6921             *    non-secure bank.
6922             * 2) If ARMv8 is enabled then we can count on a 64-bit version
6923             *    taking care of the secure bank.  This requires that separate
6924             *    32 and 64-bit definitions are provided.
6925             */
6926            if ((r->state == ARM_CP_STATE_BOTH && ns) ||
6927                (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
6928                r2->type |= ARM_CP_ALIAS;
6929            }
6930        } else if ((secstate != r->secure) && !ns) {
6931            /* The register is not banked so we only want to allow migration of
6932             * the non-secure instance.
6933             */
6934            r2->type |= ARM_CP_ALIAS;
6935        }
6936
6937        if (r->state == ARM_CP_STATE_BOTH) {
6938            /* We assume it is a cp15 register if the .cp field is left unset.
6939             */
6940            if (r2->cp == 0) {
6941                r2->cp = 15;
6942            }
6943
6944#ifdef HOST_WORDS_BIGENDIAN
6945            if (r2->fieldoffset) {
6946                r2->fieldoffset += sizeof(uint32_t);
6947            }
6948#endif
6949        }
6950    }
6951    if (state == ARM_CP_STATE_AA64) {
6952        /* To allow abbreviation of ARMCPRegInfo
6953         * definitions, we treat cp == 0 as equivalent to
6954         * the value for "standard guest-visible sysreg".
6955         * STATE_BOTH definitions are also always "standard
6956         * sysreg" in their AArch64 view (the .cp value may
6957         * be non-zero for the benefit of the AArch32 view).
6958         */
6959        if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
6960            r2->cp = CP_REG_ARM64_SYSREG_CP;
6961        }
6962        *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
6963                                  r2->opc0, opc1, opc2);
6964    } else {
6965        *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
6966    }
6967    if (opaque) {
6968        r2->opaque = opaque;
6969    }
6970    /* reginfo passed to helpers is correct for the actual access,
6971     * and is never ARM_CP_STATE_BOTH:
6972     */
6973    r2->state = state;
6974    /* Make sure reginfo passed to helpers for wildcarded regs
6975     * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
6976     */
6977    r2->crm = crm;
6978    r2->opc1 = opc1;
6979    r2->opc2 = opc2;
6980    /* By convention, for wildcarded registers only the first
6981     * entry is used for migration; the others are marked as
6982     * ALIAS so we don't try to transfer the register
6983     * multiple times. Special registers (ie NOP/WFI) are
6984     * never migratable and not even raw-accessible.
6985     */
6986    if ((r->type & ARM_CP_SPECIAL)) {
6987        r2->type |= ARM_CP_NO_RAW;
6988    }
6989    if (((r->crm == CP_ANY) && crm != 0) ||
6990        ((r->opc1 == CP_ANY) && opc1 != 0) ||
6991        ((r->opc2 == CP_ANY) && opc2 != 0)) {
6992        r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
6993    }
6994
6995    /* Check that raw accesses are either forbidden or handled. Note that
6996     * we can't assert this earlier because the setup of fieldoffset for
6997     * banked registers has to be done first.
6998     */
6999    if (!(r2->type & ARM_CP_NO_RAW)) {
7000        assert(!raw_accessors_invalid(r2));
7001    }
7002
7003    /* Overriding of an existing definition must be explicitly
7004     * requested.
7005     */
7006    if (!(r->type & ARM_CP_OVERRIDE)) {
7007        ARMCPRegInfo *oldreg;
7008        oldreg = g_hash_table_lookup(cpu->cp_regs, key);
7009        if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
7010            fprintf(stderr, "Register redefined: cp=%d %d bit "
7011                    "crn=%d crm=%d opc1=%d opc2=%d, "
7012                    "was %s, now %s\n", r2->cp, 32 + 32 * is64,
7013                    r2->crn, r2->crm, r2->opc1, r2->opc2,
7014                    oldreg->name, r2->name);
7015            g_assert_not_reached();
7016        }
7017    }
7018    g_hash_table_insert(cpu->cp_regs, key, r2);
7019}
7020
7021
7022void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
7023                                       const ARMCPRegInfo *r, void *opaque)
7024{
7025    /* Define implementations of coprocessor registers.
7026     * We store these in a hashtable because typically
7027     * there are less than 150 registers in a space which
7028     * is 16*16*16*8*8 = 262144 in size.
7029     * Wildcarding is supported for the crm, opc1 and opc2 fields.
7030     * If a register is defined twice then the second definition is
7031     * used, so this can be used to define some generic registers and
7032     * then override them with implementation specific variations.
7033     * At least one of the original and the second definition should
7034     * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
7035     * against accidental use.
7036     *
7037     * The state field defines whether the register is to be
7038     * visible in the AArch32 or AArch64 execution state. If the
7039     * state is set to ARM_CP_STATE_BOTH then we synthesise a
7040     * reginfo structure for the AArch32 view, which sees the lower
7041     * 32 bits of the 64 bit register.
7042     *
7043     * Only registers visible in AArch64 may set r->opc0; opc0 cannot
7044     * be wildcarded. AArch64 registers are always considered to be 64
7045     * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
7046     * the register, if any.
7047     */
7048    int crm, opc1, opc2, state;
7049    int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
7050    int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
7051    int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
7052    int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
7053    int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
7054    int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
7055    /* 64 bit registers have only CRm and Opc1 fields */
7056    assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
7057    /* op0 only exists in the AArch64 encodings */
7058    assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
7059    /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
7060    assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
7061    /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
7062     * encodes a minimum access level for the register. We roll this
7063     * runtime check into our general permission check code, so check
7064     * here that the reginfo's specified permissions are strict enough
7065     * to encompass the generic architectural permission check.
7066     */
7067    if (r->state != ARM_CP_STATE_AA32) {
7068        int mask = 0;
7069        switch (r->opc1) {
7070        case 0:
7071            /* min_EL EL1, but some accessible to EL0 via kernel ABI */
7072            mask = PL0U_R | PL1_RW;
7073            break;
7074        case 1: case 2:
7075            /* min_EL EL1 */
7076            mask = PL1_RW;
7077            break;
7078        case 3:
7079            /* min_EL EL0 */
7080            mask = PL0_RW;
7081            break;
7082        case 4:
7083            /* min_EL EL2 */
7084            mask = PL2_RW;
7085            break;
7086        case 5:
7087            /* unallocated encoding, so not possible */
7088            assert(false);
7089            break;
7090        case 6:
7091            /* min_EL EL3 */
7092            mask = PL3_RW;
7093            break;
7094        case 7:
7095            /* min_EL EL1, secure mode only (we don't check the latter) */
7096            mask = PL1_RW;
7097            break;
7098        default:
7099            /* broken reginfo with out-of-range opc1 */
7100            assert(false);
7101            break;
7102        }
7103        /* assert our permissions are not too lax (stricter is fine) */
7104        assert((r->access & ~mask) == 0);
7105    }
7106
7107    /* Check that the register definition has enough info to handle
7108     * reads and writes if they are permitted.
7109     */
7110    if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
7111        if (r->access & PL3_R) {
7112            assert((r->fieldoffset ||
7113                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7114                   r->readfn);
7115        }
7116        if (r->access & PL3_W) {
7117            assert((r->fieldoffset ||
7118                   (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
7119                   r->writefn);
7120        }
7121    }
7122    /* Bad type field probably means missing sentinel at end of reg list */
7123    assert(cptype_valid(r->type));
7124    for (crm = crmmin; crm <= crmmax; crm++) {
7125        for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
7126            for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
7127                for (state = ARM_CP_STATE_AA32;
7128                     state <= ARM_CP_STATE_AA64; state++) {
7129                    if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
7130                        continue;
7131                    }
7132                    if (state == ARM_CP_STATE_AA32) {
7133                        /* Under AArch32 CP registers can be common
7134                         * (same for secure and non-secure world) or banked.
7135                         */
7136                        char *name;
7137
7138                        switch (r->secure) {
7139                        case ARM_CP_SECSTATE_S:
7140                        case ARM_CP_SECSTATE_NS:
7141                            add_cpreg_to_hashtable(cpu, r, opaque, state,
7142                                                   r->secure, crm, opc1, opc2,
7143                                                   r->name);
7144                            break;
7145                        default:
7146                            name = g_strdup_printf("%s_S", r->name);
7147                            add_cpreg_to_hashtable(cpu, r, opaque, state,
7148                                                   ARM_CP_SECSTATE_S,
7149                                                   crm, opc1, opc2, name);
7150                            g_free(name);
7151                            add_cpreg_to_hashtable(cpu, r, opaque, state,
7152                                                   ARM_CP_SECSTATE_NS,
7153                                                   crm, opc1, opc2, r->name);
7154                            break;
7155                        }
7156                    } else {
7157                        /* AArch64 registers get mapped to non-secure instance
7158                         * of AArch32 */
7159                        add_cpreg_to_hashtable(cpu, r, opaque, state,
7160                                               ARM_CP_SECSTATE_NS,
7161                                               crm, opc1, opc2, r->name);
7162                    }
7163                }
7164            }
7165        }
7166    }
7167}
7168
7169void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
7170                                    const ARMCPRegInfo *regs, void *opaque)
7171{
7172    /* Define a whole list of registers */
7173    const ARMCPRegInfo *r;
7174    for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7175        define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
7176    }
7177}
7178
7179/*
7180 * Modify ARMCPRegInfo for access from userspace.
7181 *
7182 * This is a data driven modification directed by
7183 * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as
7184 * user-space cannot alter any values and dynamic values pertaining to
7185 * execution state are hidden from user space view anyway.
7186 */
7187void modify_arm_cp_regs(ARMCPRegInfo *regs, const ARMCPRegUserSpaceInfo *mods)
7188{
7189    const ARMCPRegUserSpaceInfo *m;
7190    ARMCPRegInfo *r;
7191
7192    for (m = mods; m->name; m++) {
7193        GPatternSpec *pat = NULL;
7194        if (m->is_glob) {
7195            pat = g_pattern_spec_new(m->name);
7196        }
7197        for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
7198            if (pat && g_pattern_match_string(pat, r->name)) {
7199                r->type = ARM_CP_CONST;
7200                r->access = PL0U_R;
7201                r->resetvalue = 0;
7202                /* continue */
7203            } else if (strcmp(r->name, m->name) == 0) {
7204                r->type = ARM_CP_CONST;
7205                r->access = PL0U_R;
7206                r->resetvalue &= m->exported_bits;
7207                r->resetvalue |= m->fixed_bits;
7208                break;
7209            }
7210        }
7211        if (pat) {
7212            g_pattern_spec_free(pat);
7213        }
7214    }
7215}
7216
7217const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
7218{
7219    return g_hash_table_lookup(cpregs, &encoded_cp);
7220}
7221
7222void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
7223                         uint64_t value)
7224{
7225    /* Helper coprocessor write function for write-ignore registers */
7226}
7227
7228uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
7229{
7230    /* Helper coprocessor write function for read-as-zero registers */
7231    return 0;
7232}
7233
7234void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
7235{
7236    /* Helper coprocessor reset function for do-nothing-on-reset registers */
7237}
7238
7239static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
7240{
7241    /* Return true if it is not valid for us to switch to
7242     * this CPU mode (ie all the UNPREDICTABLE cases in
7243     * the ARM ARM CPSRWriteByInstr pseudocode).
7244     */
7245
7246    /* Changes to or from Hyp via MSR and CPS are illegal. */
7247    if (write_type == CPSRWriteByInstr &&
7248        ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
7249         mode == ARM_CPU_MODE_HYP)) {
7250        return 1;
7251    }
7252
7253    switch (mode) {
7254    case ARM_CPU_MODE_USR:
7255        return 0;
7256    case ARM_CPU_MODE_SYS:
7257    case ARM_CPU_MODE_SVC:
7258    case ARM_CPU_MODE_ABT:
7259    case ARM_CPU_MODE_UND:
7260    case ARM_CPU_MODE_IRQ:
7261    case ARM_CPU_MODE_FIQ:
7262        /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
7263         * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
7264         */
7265        /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
7266         * and CPS are treated as illegal mode changes.
7267         */
7268        if (write_type == CPSRWriteByInstr &&
7269            (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
7270            (arm_hcr_el2_eff(env) & HCR_TGE)) {
7271            return 1;
7272        }
7273        return 0;
7274    case ARM_CPU_MODE_HYP:
7275        return !arm_feature(env, ARM_FEATURE_EL2)
7276            || arm_current_el(env) < 2 || arm_is_secure_below_el3(env);
7277    case ARM_CPU_MODE_MON:
7278        return arm_current_el(env) < 3;
7279    default:
7280        return 1;
7281    }
7282}
7283
7284uint32_t cpsr_read(CPUARMState *env)
7285{
7286    int ZF;
7287    ZF = (env->ZF == 0);
7288    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
7289        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
7290        | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
7291        | ((env->condexec_bits & 0xfc) << 8)
7292        | (env->GE << 16) | (env->daif & CPSR_AIF);
7293}
7294
7295void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
7296                CPSRWriteType write_type)
7297{
7298    uint32_t changed_daif;
7299
7300    if (mask & CPSR_NZCV) {
7301        env->ZF = (~val) & CPSR_Z;
7302        env->NF = val;
7303        env->CF = (val >> 29) & 1;
7304        env->VF = (val << 3) & 0x80000000;
7305    }
7306    if (mask & CPSR_Q)
7307        env->QF = ((val & CPSR_Q) != 0);
7308    if (mask & CPSR_T)
7309        env->thumb = ((val & CPSR_T) != 0);
7310    if (mask & CPSR_IT_0_1) {
7311        env->condexec_bits &= ~3;
7312        env->condexec_bits |= (val >> 25) & 3;
7313    }
7314    if (mask & CPSR_IT_2_7) {
7315        env->condexec_bits &= 3;
7316        env->condexec_bits |= (val >> 8) & 0xfc;
7317    }
7318    if (mask & CPSR_GE) {
7319        env->GE = (val >> 16) & 0xf;
7320    }
7321
7322    /* In a V7 implementation that includes the security extensions but does
7323     * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
7324     * whether non-secure software is allowed to change the CPSR_F and CPSR_A
7325     * bits respectively.
7326     *
7327     * In a V8 implementation, it is permitted for privileged software to
7328     * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
7329     */
7330    if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
7331        arm_feature(env, ARM_FEATURE_EL3) &&
7332        !arm_feature(env, ARM_FEATURE_EL2) &&
7333        !arm_is_secure(env)) {
7334
7335        changed_daif = (env->daif ^ val) & mask;
7336
7337        if (changed_daif & CPSR_A) {
7338            /* Check to see if we are allowed to change the masking of async
7339             * abort exceptions from a non-secure state.
7340             */
7341            if (!(env->cp15.scr_el3 & SCR_AW)) {
7342                qemu_log_mask(LOG_GUEST_ERROR,
7343                              "Ignoring attempt to switch CPSR_A flag from "
7344                              "non-secure world with SCR.AW bit clear\n");
7345                mask &= ~CPSR_A;
7346            }
7347        }
7348
7349        if (changed_daif & CPSR_F) {
7350            /* Check to see if we are allowed to change the masking of FIQ
7351             * exceptions from a non-secure state.
7352             */
7353            if (!(env->cp15.scr_el3 & SCR_FW)) {
7354                qemu_log_mask(LOG_GUEST_ERROR,
7355                              "Ignoring attempt to switch CPSR_F flag from "
7356                              "non-secure world with SCR.FW bit clear\n");
7357                mask &= ~CPSR_F;
7358            }
7359
7360            /* Check whether non-maskable FIQ (NMFI) support is enabled.
7361             * If this bit is set software is not allowed to mask
7362             * FIQs, but is allowed to set CPSR_F to 0.
7363             */
7364            if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
7365                (val & CPSR_F)) {
7366                qemu_log_mask(LOG_GUEST_ERROR,
7367                              "Ignoring attempt to enable CPSR_F flag "
7368                              "(non-maskable FIQ [NMFI] support enabled)\n");
7369                mask &= ~CPSR_F;
7370            }
7371        }
7372    }
7373
7374    env->daif &= ~(CPSR_AIF & mask);
7375    env->daif |= val & CPSR_AIF & mask;
7376
7377    if (write_type != CPSRWriteRaw &&
7378        ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
7379        if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
7380            /* Note that we can only get here in USR mode if this is a
7381             * gdb stub write; for this case we follow the architectural
7382             * behaviour for guest writes in USR mode of ignoring an attempt
7383             * to switch mode. (Those are caught by translate.c for writes
7384             * triggered by guest instructions.)
7385             */
7386            mask &= ~CPSR_M;
7387        } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
7388            /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
7389             * v7, and has defined behaviour in v8:
7390             *  + leave CPSR.M untouched
7391             *  + allow changes to the other CPSR fields
7392             *  + set PSTATE.IL
7393             * For user changes via the GDB stub, we don't set PSTATE.IL,
7394             * as this would be unnecessarily harsh for a user error.
7395             */
7396            mask &= ~CPSR_M;
7397            if (write_type != CPSRWriteByGDBStub &&
7398                arm_feature(env, ARM_FEATURE_V8)) {
7399                mask |= CPSR_IL;
7400                val |= CPSR_IL;
7401            }
7402            qemu_log_mask(LOG_GUEST_ERROR,
7403                          "Illegal AArch32 mode switch attempt from %s to %s\n",
7404                          aarch32_mode_name(env->uncached_cpsr),
7405                          aarch32_mode_name(val));
7406        } else {
7407            qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n",
7408                          write_type == CPSRWriteExceptionReturn ?
7409                          "Exception return from AArch32" :
7410                          "AArch32 mode switch from",
7411                          aarch32_mode_name(env->uncached_cpsr),
7412                          aarch32_mode_name(val), env->regs[15]);
7413            switch_mode(env, val & CPSR_M);
7414        }
7415    }
7416    mask &= ~CACHED_CPSR_BITS;
7417    env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
7418}
7419
7420/* Sign/zero extend */
7421uint32_t HELPER(sxtb16)(uint32_t x)
7422{
7423    uint32_t res;
7424    res = (uint16_t)(int8_t)x;
7425    res |= (uint32_t)(int8_t)(x >> 16) << 16;
7426    return res;
7427}
7428
7429uint32_t HELPER(uxtb16)(uint32_t x)
7430{
7431    uint32_t res;
7432    res = (uint16_t)(uint8_t)x;
7433    res |= (uint32_t)(uint8_t)(x >> 16) << 16;
7434    return res;
7435}
7436
7437int32_t HELPER(sdiv)(int32_t num, int32_t den)
7438{
7439    if (den == 0)
7440      return 0;
7441    if (num == INT_MIN && den == -1)
7442      return INT_MIN;
7443    return num / den;
7444}
7445
7446uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
7447{
7448    if (den == 0)
7449      return 0;
7450    return num / den;
7451}
7452
7453uint32_t HELPER(rbit)(uint32_t x)
7454{
7455    return revbit32(x);
7456}
7457
7458#ifdef CONFIG_USER_ONLY
7459
7460static void switch_mode(CPUARMState *env, int mode)
7461{
7462    ARMCPU *cpu = env_archcpu(env);
7463
7464    if (mode != ARM_CPU_MODE_USR) {
7465        cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
7466    }
7467}
7468
7469uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7470                                 uint32_t cur_el, bool secure)
7471{
7472    return 1;
7473}
7474
7475void aarch64_sync_64_to_32(CPUARMState *env)
7476{
7477    g_assert_not_reached();
7478}
7479
7480#else
7481
7482static void switch_mode(CPUARMState *env, int mode)
7483{
7484    int old_mode;
7485    int i;
7486
7487    old_mode = env->uncached_cpsr & CPSR_M;
7488    if (mode == old_mode)
7489        return;
7490
7491    if (old_mode == ARM_CPU_MODE_FIQ) {
7492        memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
7493        memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
7494    } else if (mode == ARM_CPU_MODE_FIQ) {
7495        memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
7496        memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
7497    }
7498
7499    i = bank_number(old_mode);
7500    env->banked_r13[i] = env->regs[13];
7501    env->banked_spsr[i] = env->spsr;
7502
7503    i = bank_number(mode);
7504    env->regs[13] = env->banked_r13[i];
7505    env->spsr = env->banked_spsr[i];
7506
7507    env->banked_r14[r14_bank_number(old_mode)] = env->regs[14];
7508    env->regs[14] = env->banked_r14[r14_bank_number(mode)];
7509}
7510
7511/* Physical Interrupt Target EL Lookup Table
7512 *
7513 * [ From ARM ARM section G1.13.4 (Table G1-15) ]
7514 *
7515 * The below multi-dimensional table is used for looking up the target
7516 * exception level given numerous condition criteria.  Specifically, the
7517 * target EL is based on SCR and HCR routing controls as well as the
7518 * currently executing EL and secure state.
7519 *
7520 *    Dimensions:
7521 *    target_el_table[2][2][2][2][2][4]
7522 *                    |  |  |  |  |  +--- Current EL
7523 *                    |  |  |  |  +------ Non-secure(0)/Secure(1)
7524 *                    |  |  |  +--------- HCR mask override
7525 *                    |  |  +------------ SCR exec state control
7526 *                    |  +--------------- SCR mask override
7527 *                    +------------------ 32-bit(0)/64-bit(1) EL3
7528 *
7529 *    The table values are as such:
7530 *    0-3 = EL0-EL3
7531 *     -1 = Cannot occur
7532 *
7533 * The ARM ARM target EL table includes entries indicating that an "exception
7534 * is not taken".  The two cases where this is applicable are:
7535 *    1) An exception is taken from EL3 but the SCR does not have the exception
7536 *    routed to EL3.
7537 *    2) An exception is taken from EL2 but the HCR does not have the exception
7538 *    routed to EL2.
7539 * In these two cases, the below table contain a target of EL1.  This value is
7540 * returned as it is expected that the consumer of the table data will check
7541 * for "target EL >= current EL" to ensure the exception is not taken.
7542 *
7543 *            SCR     HCR
7544 *         64  EA     AMO                 From
7545 *        BIT IRQ     IMO      Non-secure         Secure
7546 *        EL3 FIQ  RW FMO   EL0 EL1 EL2 EL3   EL0 EL1 EL2 EL3
7547 */
7548static const int8_t target_el_table[2][2][2][2][2][4] = {
7549    {{{{/* 0   0   0   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
7550       {/* 0   0   0   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},
7551      {{/* 0   0   1   0 */{ 1,  1,  2, -1 },{ 3, -1, -1,  3 },},
7552       {/* 0   0   1   1 */{ 2,  2,  2, -1 },{ 3, -1, -1,  3 },},},},
7553     {{{/* 0   1   0   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
7554       {/* 0   1   0   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},
7555      {{/* 0   1   1   0 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},
7556       {/* 0   1   1   1 */{ 3,  3,  3, -1 },{ 3, -1, -1,  3 },},},},},
7557    {{{{/* 1   0   0   0 */{ 1,  1,  2, -1 },{ 1,  1, -1,  1 },},
7558       {/* 1   0   0   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},
7559      {{/* 1   0   1   0 */{ 1,  1,  1, -1 },{ 1,  1, -1,  1 },},
7560       {/* 1   0   1   1 */{ 2,  2,  2, -1 },{ 1,  1, -1,  1 },},},},
7561     {{{/* 1   1   0   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
7562       {/* 1   1   0   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},
7563      {{/* 1   1   1   0 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},
7564       {/* 1   1   1   1 */{ 3,  3,  3, -1 },{ 3,  3, -1,  3 },},},},},
7565};
7566
7567/*
7568 * Determine the target EL for physical exceptions
7569 */
7570uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
7571                                 uint32_t cur_el, bool secure)
7572{
7573    CPUARMState *env = cs->env_ptr;
7574    bool rw;
7575    bool scr;
7576    bool hcr;
7577    int target_el;
7578    /* Is the highest EL AArch64? */
7579    bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
7580    uint64_t hcr_el2;
7581
7582    if (arm_feature(env, ARM_FEATURE_EL3)) {
7583        rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
7584    } else {
7585        /* Either EL2 is the highest EL (and so the EL2 register width
7586         * is given by is64); or there is no EL2 or EL3, in which case
7587         * the value of 'rw' does not affect the table lookup anyway.
7588         */
7589        rw = is64;
7590    }
7591
7592    hcr_el2 = arm_hcr_el2_eff(env);
7593    switch (excp_idx) {
7594    case EXCP_IRQ:
7595        scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
7596        hcr = hcr_el2 & HCR_IMO;
7597        break;
7598    case EXCP_FIQ:
7599        scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
7600        hcr = hcr_el2 & HCR_FMO;
7601        break;
7602    default:
7603        scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
7604        hcr = hcr_el2 & HCR_AMO;
7605        break;
7606    };
7607
7608    /* Perform a table-lookup for the target EL given the current state */
7609    target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
7610
7611    assert(target_el > 0);
7612
7613    return target_el;
7614}
7615
7616void arm_log_exception(int idx)
7617{
7618    if (qemu_loglevel_mask(CPU_LOG_INT)) {
7619        const char *exc = NULL;
7620        static const char * const excnames[] = {
7621            [EXCP_UDEF] = "Undefined Instruction",
7622            [EXCP_SWI] = "SVC",
7623            [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
7624            [EXCP_DATA_ABORT] = "Data Abort",
7625            [EXCP_IRQ] = "IRQ",
7626            [EXCP_FIQ] = "FIQ",
7627            [EXCP_BKPT] = "Breakpoint",
7628            [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
7629            [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
7630            [EXCP_HVC] = "Hypervisor Call",
7631            [EXCP_HYP_TRAP] = "Hypervisor Trap",
7632            [EXCP_SMC] = "Secure Monitor Call",
7633            [EXCP_VIRQ] = "Virtual IRQ",
7634            [EXCP_VFIQ] = "Virtual FIQ",
7635            [EXCP_SEMIHOST] = "Semihosting call",
7636            [EXCP_NOCP] = "v7M NOCP UsageFault",
7637            [EXCP_INVSTATE] = "v7M INVSTATE UsageFault",
7638            [EXCP_STKOF] = "v8M STKOF UsageFault",
7639            [EXCP_LAZYFP] = "v7M exception during lazy FP stacking",
7640            [EXCP_LSERR] = "v8M LSERR UsageFault",
7641            [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
7642        };
7643
7644        if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
7645            exc = excnames[idx];
7646        }
7647        if (!exc) {
7648            exc = "unknown";
7649        }
7650        qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
7651    }
7652}
7653
7654/*
7655 * Function used to synchronize QEMU's AArch64 register set with AArch32
7656 * register set.  This is necessary when switching between AArch32 and AArch64
7657 * execution state.
7658 */
7659void aarch64_sync_32_to_64(CPUARMState *env)
7660{
7661    int i;
7662    uint32_t mode = env->uncached_cpsr & CPSR_M;
7663
7664    /* We can blanket copy R[0:7] to X[0:7] */
7665    for (i = 0; i < 8; i++) {
7666        env->xregs[i] = env->regs[i];
7667    }
7668
7669    /*
7670     * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
7671     * Otherwise, they come from the banked user regs.
7672     */
7673    if (mode == ARM_CPU_MODE_FIQ) {
7674        for (i = 8; i < 13; i++) {
7675            env->xregs[i] = env->usr_regs[i - 8];
7676        }
7677    } else {
7678        for (i = 8; i < 13; i++) {
7679            env->xregs[i] = env->regs[i];
7680        }
7681    }
7682
7683    /*
7684     * Registers x13-x23 are the various mode SP and FP registers. Registers
7685     * r13 and r14 are only copied if we are in that mode, otherwise we copy
7686     * from the mode banked register.
7687     */
7688    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7689        env->xregs[13] = env->regs[13];
7690        env->xregs[14] = env->regs[14];
7691    } else {
7692        env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
7693        /* HYP is an exception in that it is copied from r14 */
7694        if (mode == ARM_CPU_MODE_HYP) {
7695            env->xregs[14] = env->regs[14];
7696        } else {
7697            env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)];
7698        }
7699    }
7700
7701    if (mode == ARM_CPU_MODE_HYP) {
7702        env->xregs[15] = env->regs[13];
7703    } else {
7704        env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
7705    }
7706
7707    if (mode == ARM_CPU_MODE_IRQ) {
7708        env->xregs[16] = env->regs[14];
7709        env->xregs[17] = env->regs[13];
7710    } else {
7711        env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)];
7712        env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
7713    }
7714
7715    if (mode == ARM_CPU_MODE_SVC) {
7716        env->xregs[18] = env->regs[14];
7717        env->xregs[19] = env->regs[13];
7718    } else {
7719        env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)];
7720        env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
7721    }
7722
7723    if (mode == ARM_CPU_MODE_ABT) {
7724        env->xregs[20] = env->regs[14];
7725        env->xregs[21] = env->regs[13];
7726    } else {
7727        env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)];
7728        env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
7729    }
7730
7731    if (mode == ARM_CPU_MODE_UND) {
7732        env->xregs[22] = env->regs[14];
7733        env->xregs[23] = env->regs[13];
7734    } else {
7735        env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)];
7736        env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
7737    }
7738
7739    /*
7740     * Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
7741     * mode, then we can copy from r8-r14.  Otherwise, we copy from the
7742     * FIQ bank for r8-r14.
7743     */
7744    if (mode == ARM_CPU_MODE_FIQ) {
7745        for (i = 24; i < 31; i++) {
7746            env->xregs[i] = env->regs[i - 16];   /* X[24:30] <- R[8:14] */
7747        }
7748    } else {
7749        for (i = 24; i < 29; i++) {
7750            env->xregs[i] = env->fiq_regs[i - 24];
7751        }
7752        env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
7753        env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)];
7754    }
7755
7756    env->pc = env->regs[15];
7757}
7758
7759/*
7760 * Function used to synchronize QEMU's AArch32 register set with AArch64
7761 * register set.  This is necessary when switching between AArch32 and AArch64
7762 * execution state.
7763 */
7764void aarch64_sync_64_to_32(CPUARMState *env)
7765{
7766    int i;
7767    uint32_t mode = env->uncached_cpsr & CPSR_M;
7768
7769    /* We can blanket copy X[0:7] to R[0:7] */
7770    for (i = 0; i < 8; i++) {
7771        env->regs[i] = env->xregs[i];
7772    }
7773
7774    /*
7775     * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
7776     * Otherwise, we copy x8-x12 into the banked user regs.
7777     */
7778    if (mode == ARM_CPU_MODE_FIQ) {
7779        for (i = 8; i < 13; i++) {
7780            env->usr_regs[i - 8] = env->xregs[i];
7781        }
7782    } else {
7783        for (i = 8; i < 13; i++) {
7784            env->regs[i] = env->xregs[i];
7785        }
7786    }
7787
7788    /*
7789     * Registers r13 & r14 depend on the current mode.
7790     * If we are in a given mode, we copy the corresponding x registers to r13
7791     * and r14.  Otherwise, we copy the x register to the banked r13 and r14
7792     * for the mode.
7793     */
7794    if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
7795        env->regs[13] = env->xregs[13];
7796        env->regs[14] = env->xregs[14];
7797    } else {
7798        env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
7799
7800        /*
7801         * HYP is an exception in that it does not have its own banked r14 but
7802         * shares the USR r14
7803         */
7804        if (mode == ARM_CPU_MODE_HYP) {
7805            env->regs[14] = env->xregs[14];
7806        } else {
7807            env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
7808        }
7809    }
7810
7811    if (mode == ARM_CPU_MODE_HYP) {
7812        env->regs[13] = env->xregs[15];
7813    } else {
7814        env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
7815    }
7816
7817    if (mode == ARM_CPU_MODE_IRQ) {
7818        env->regs[14] = env->xregs[16];
7819        env->regs[13] = env->xregs[17];
7820    } else {
7821        env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
7822        env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
7823    }
7824
7825    if (mode == ARM_CPU_MODE_SVC) {
7826        env->regs[14] = env->xregs[18];
7827        env->regs[13] = env->xregs[19];
7828    } else {
7829        env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
7830        env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
7831    }
7832
7833    if (mode == ARM_CPU_MODE_ABT) {
7834        env->regs[14] = env->xregs[20];
7835        env->regs[13] = env->xregs[21];
7836    } else {
7837        env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
7838        env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
7839    }
7840
7841    if (mode == ARM_CPU_MODE_UND) {
7842        env->regs[14] = env->xregs[22];
7843        env->regs[13] = env->xregs[23];
7844    } else {
7845        env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
7846        env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
7847    }
7848
7849    /* Registers x24-x30 are mapped to r8-r14 in FIQ mode.  If we are in FIQ
7850     * mode, then we can copy to r8-r14.  Otherwise, we copy to the
7851     * FIQ bank for r8-r14.
7852     */
7853    if (mode == ARM_CPU_MODE_FIQ) {
7854        for (i = 24; i < 31; i++) {
7855            env->regs[i - 16] = env->xregs[i];   /* X[24:30] -> R[8:14] */
7856        }
7857    } else {
7858        for (i = 24; i < 29; i++) {
7859            env->fiq_regs[i - 24] = env->xregs[i];
7860        }
7861        env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
7862        env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
7863    }
7864
7865    env->regs[15] = env->pc;
7866}
7867
7868static void take_aarch32_exception(CPUARMState *env, int new_mode,
7869                                   uint32_t mask, uint32_t offset,
7870                                   uint32_t newpc)
7871{
7872    /* Change the CPU state so as to actually take the exception. */
7873    switch_mode(env, new_mode);
7874    /*
7875     * For exceptions taken to AArch32 we must clear the SS bit in both
7876     * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
7877     */
7878    env->uncached_cpsr &= ~PSTATE_SS;
7879    env->spsr = cpsr_read(env);
7880    /* Clear IT bits.  */
7881    env->condexec_bits = 0;
7882    /* Switch to the new mode, and to the correct instruction set.  */
7883    env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
7884    /* Set new mode endianness */
7885    env->uncached_cpsr &= ~CPSR_E;
7886    if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
7887        env->uncached_cpsr |= CPSR_E;
7888    }
7889    /* J and IL must always be cleared for exception entry */
7890    env->uncached_cpsr &= ~(CPSR_IL | CPSR_J);
7891    env->daif |= mask;
7892
7893    if (new_mode == ARM_CPU_MODE_HYP) {
7894        env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0;
7895        env->elr_el[2] = env->regs[15];
7896    } else {
7897        /*
7898         * this is a lie, as there was no c1_sys on V4T/V5, but who cares
7899         * and we should just guard the thumb mode on V4
7900         */
7901        if (arm_feature(env, ARM_FEATURE_V4T)) {
7902            env->thumb =
7903                (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
7904        }
7905        env->regs[14] = env->regs[15] + offset;
7906    }
7907    env->regs[15] = newpc;
7908}
7909
7910static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs)
7911{
7912    /*
7913     * Handle exception entry to Hyp mode; this is sufficiently
7914     * different to entry to other AArch32 modes that we handle it
7915     * separately here.
7916     *
7917     * The vector table entry used is always the 0x14 Hyp mode entry point,
7918     * unless this is an UNDEF/HVC/abort taken from Hyp to Hyp.
7919     * The offset applied to the preferred return address is always zero
7920     * (see DDI0487C.a section G1.12.3).
7921     * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values.
7922     */
7923    uint32_t addr, mask;
7924    ARMCPU *cpu = ARM_CPU(cs);
7925    CPUARMState *env = &cpu->env;
7926
7927    switch (cs->exception_index) {
7928    case EXCP_UDEF:
7929        addr = 0x04;
7930        break;
7931    case EXCP_SWI:
7932        addr = 0x14;
7933        break;
7934    case EXCP_BKPT:
7935        /* Fall through to prefetch abort.  */
7936    case EXCP_PREFETCH_ABORT:
7937        env->cp15.ifar_s = env->exception.vaddress;
7938        qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n",
7939                      (uint32_t)env->exception.vaddress);
7940        addr = 0x0c;
7941        break;
7942    case EXCP_DATA_ABORT:
7943        env->cp15.dfar_s = env->exception.vaddress;
7944        qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n",
7945                      (uint32_t)env->exception.vaddress);
7946        addr = 0x10;
7947        break;
7948    case EXCP_IRQ:
7949        addr = 0x18;
7950        break;
7951    case EXCP_FIQ:
7952        addr = 0x1c;
7953        break;
7954    case EXCP_HVC:
7955        addr = 0x08;
7956        break;
7957    case EXCP_HYP_TRAP:
7958        addr = 0x14;
7959        break;
7960    default:
7961        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
7962    }
7963
7964    if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) {
7965        if (!arm_feature(env, ARM_FEATURE_V8)) {
7966            /*
7967             * QEMU syndrome values are v8-style. v7 has the IL bit
7968             * UNK/SBZP for "field not valid" cases, where v8 uses RES1.
7969             * If this is a v7 CPU, squash the IL bit in those cases.
7970             */
7971            if (cs->exception_index == EXCP_PREFETCH_ABORT ||
7972                (cs->exception_index == EXCP_DATA_ABORT &&
7973                 !(env->exception.syndrome & ARM_EL_ISV)) ||
7974                syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) {
7975                env->exception.syndrome &= ~ARM_EL_IL;
7976            }
7977        }
7978        env->cp15.esr_el[2] = env->exception.syndrome;
7979    }
7980
7981    if (arm_current_el(env) != 2 && addr < 0x14) {
7982        addr = 0x14;
7983    }
7984
7985    mask = 0;
7986    if (!(env->cp15.scr_el3 & SCR_EA)) {
7987        mask |= CPSR_A;
7988    }
7989    if (!(env->cp15.scr_el3 & SCR_IRQ)) {
7990        mask |= CPSR_I;
7991    }
7992    if (!(env->cp15.scr_el3 & SCR_FIQ)) {
7993        mask |= CPSR_F;
7994    }
7995
7996    addr += env->cp15.hvbar;
7997
7998    take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr);
7999}
8000
8001static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
8002{
8003    ARMCPU *cpu = ARM_CPU(cs);
8004    CPUARMState *env = &cpu->env;
8005    uint32_t addr;
8006    uint32_t mask;
8007    int new_mode;
8008    uint32_t offset;
8009    uint32_t moe;
8010
8011    /* If this is a debug exception we must update the DBGDSCR.MOE bits */
8012    switch (syn_get_ec(env->exception.syndrome)) {
8013    case EC_BREAKPOINT:
8014    case EC_BREAKPOINT_SAME_EL:
8015        moe = 1;
8016        break;
8017    case EC_WATCHPOINT:
8018    case EC_WATCHPOINT_SAME_EL:
8019        moe = 10;
8020        break;
8021    case EC_AA32_BKPT:
8022        moe = 3;
8023        break;
8024    case EC_VECTORCATCH:
8025        moe = 5;
8026        break;
8027    default:
8028        moe = 0;
8029        break;
8030    }
8031
8032    if (moe) {
8033        env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
8034    }
8035
8036    if (env->exception.target_el == 2) {
8037        arm_cpu_do_interrupt_aarch32_hyp(cs);
8038        return;
8039    }
8040
8041    switch (cs->exception_index) {
8042    case EXCP_UDEF:
8043        new_mode = ARM_CPU_MODE_UND;
8044        addr = 0x04;
8045        mask = CPSR_I;
8046        if (env->thumb)
8047            offset = 2;
8048        else
8049            offset = 4;
8050        break;
8051    case EXCP_SWI:
8052        new_mode = ARM_CPU_MODE_SVC;
8053        addr = 0x08;
8054        mask = CPSR_I;
8055        /* The PC already points to the next instruction.  */
8056        offset = 0;
8057        break;
8058    case EXCP_BKPT:
8059        /* Fall through to prefetch abort.  */
8060    case EXCP_PREFETCH_ABORT:
8061        A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
8062        A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
8063        qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
8064                      env->exception.fsr, (uint32_t)env->exception.vaddress);
8065        new_mode = ARM_CPU_MODE_ABT;
8066        addr = 0x0c;
8067        mask = CPSR_A | CPSR_I;
8068        offset = 4;
8069        break;
8070    case EXCP_DATA_ABORT:
8071        A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
8072        A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
8073        qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
8074                      env->exception.fsr,
8075                      (uint32_t)env->exception.vaddress);
8076        new_mode = ARM_CPU_MODE_ABT;
8077        addr = 0x10;
8078        mask = CPSR_A | CPSR_I;
8079        offset = 8;
8080        break;
8081    case EXCP_IRQ:
8082        new_mode = ARM_CPU_MODE_IRQ;
8083        addr = 0x18;
8084        /* Disable IRQ and imprecise data aborts.  */
8085        mask = CPSR_A | CPSR_I;
8086        offset = 4;
8087        if (env->cp15.scr_el3 & SCR_IRQ) {
8088            /* IRQ routed to monitor mode */
8089            new_mode = ARM_CPU_MODE_MON;
8090            mask |= CPSR_F;
8091        }
8092        break;
8093    case EXCP_FIQ:
8094        new_mode = ARM_CPU_MODE_FIQ;
8095        addr = 0x1c;
8096        /* Disable FIQ, IRQ and imprecise data aborts.  */
8097        mask = CPSR_A | CPSR_I | CPSR_F;
8098        if (env->cp15.scr_el3 & SCR_FIQ) {
8099            /* FIQ routed to monitor mode */
8100            new_mode = ARM_CPU_MODE_MON;
8101        }
8102        offset = 4;
8103        break;
8104    case EXCP_VIRQ:
8105        new_mode = ARM_CPU_MODE_IRQ;
8106        addr = 0x18;
8107        /* Disable IRQ and imprecise data aborts.  */
8108        mask = CPSR_A | CPSR_I;
8109        offset = 4;
8110        break;
8111    case EXCP_VFIQ:
8112        new_mode = ARM_CPU_MODE_FIQ;
8113        addr = 0x1c;
8114        /* Disable FIQ, IRQ and imprecise data aborts.  */
8115        mask = CPSR_A | CPSR_I | CPSR_F;
8116        offset = 4;
8117        break;
8118    case EXCP_SMC:
8119        new_mode = ARM_CPU_MODE_MON;
8120        addr = 0x08;
8121        mask = CPSR_A | CPSR_I | CPSR_F;
8122        offset = 0;
8123        break;
8124    default:
8125        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8126        return; /* Never happens.  Keep compiler happy.  */
8127    }
8128
8129    if (new_mode == ARM_CPU_MODE_MON) {
8130        addr += env->cp15.mvbar;
8131    } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
8132        /* High vectors. When enabled, base address cannot be remapped. */
8133        addr += 0xffff0000;
8134    } else {
8135        /* ARM v7 architectures provide a vector base address register to remap
8136         * the interrupt vector table.
8137         * This register is only followed in non-monitor mode, and is banked.
8138         * Note: only bits 31:5 are valid.
8139         */
8140        addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
8141    }
8142
8143    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
8144        env->cp15.scr_el3 &= ~SCR_NS;
8145    }
8146
8147    take_aarch32_exception(env, new_mode, mask, offset, addr);
8148}
8149
8150/* Handle exception entry to a target EL which is using AArch64 */
8151static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
8152{
8153    ARMCPU *cpu = ARM_CPU(cs);
8154    CPUARMState *env = &cpu->env;
8155    unsigned int new_el = env->exception.target_el;
8156    target_ulong addr = env->cp15.vbar_el[new_el];
8157    unsigned int new_mode = aarch64_pstate_mode(new_el, true);
8158    unsigned int cur_el = arm_current_el(env);
8159
8160    /*
8161     * Note that new_el can never be 0.  If cur_el is 0, then
8162     * el0_a64 is is_a64(), else el0_a64 is ignored.
8163     */
8164    aarch64_sve_change_el(env, cur_el, new_el, is_a64(env));
8165
8166    if (cur_el < new_el) {
8167        /* Entry vector offset depends on whether the implemented EL
8168         * immediately lower than the target level is using AArch32 or AArch64
8169         */
8170        bool is_aa64;
8171
8172        switch (new_el) {
8173        case 3:
8174            is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
8175            break;
8176        case 2:
8177            is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
8178            break;
8179        case 1:
8180            is_aa64 = is_a64(env);
8181            break;
8182        default:
8183            g_assert_not_reached();
8184        }
8185
8186        if (is_aa64) {
8187            addr += 0x400;
8188        } else {
8189            addr += 0x600;
8190        }
8191    } else if (pstate_read(env) & PSTATE_SP) {
8192        addr += 0x200;
8193    }
8194
8195    switch (cs->exception_index) {
8196    case EXCP_PREFETCH_ABORT:
8197    case EXCP_DATA_ABORT:
8198        env->cp15.far_el[new_el] = env->exception.vaddress;
8199        qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
8200                      env->cp15.far_el[new_el]);
8201        /* fall through */
8202    case EXCP_BKPT:
8203    case EXCP_UDEF:
8204    case EXCP_SWI:
8205    case EXCP_HVC:
8206    case EXCP_HYP_TRAP:
8207    case EXCP_SMC:
8208        if (syn_get_ec(env->exception.syndrome) == EC_ADVSIMDFPACCESSTRAP) {
8209            /*
8210             * QEMU internal FP/SIMD syndromes from AArch32 include the
8211             * TA and coproc fields which are only exposed if the exception
8212             * is taken to AArch32 Hyp mode. Mask them out to get a valid
8213             * AArch64 format syndrome.
8214             */
8215            env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20);
8216        }
8217        env->cp15.esr_el[new_el] = env->exception.syndrome;
8218        break;
8219    case EXCP_IRQ:
8220    case EXCP_VIRQ:
8221        addr += 0x80;
8222        break;
8223    case EXCP_FIQ:
8224    case EXCP_VFIQ:
8225        addr += 0x100;
8226        break;
8227    case EXCP_SEMIHOST:
8228        qemu_log_mask(CPU_LOG_INT,
8229                      "...handling as semihosting call 0x%" PRIx64 "\n",
8230                      env->xregs[0]);
8231        env->xregs[0] = do_arm_semihosting(env);
8232        return;
8233    default:
8234        cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
8235    }
8236
8237    if (is_a64(env)) {
8238        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
8239        aarch64_save_sp(env, arm_current_el(env));
8240        env->elr_el[new_el] = env->pc;
8241    } else {
8242        env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
8243        env->elr_el[new_el] = env->regs[15];
8244
8245        aarch64_sync_32_to_64(env);
8246
8247        env->condexec_bits = 0;
8248    }
8249    qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
8250                  env->elr_el[new_el]);
8251
8252    pstate_write(env, PSTATE_DAIF | new_mode);
8253    env->aarch64 = 1;
8254    aarch64_restore_sp(env, new_el);
8255
8256    env->pc = addr;
8257
8258    qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
8259                  new_el, env->pc, pstate_read(env));
8260}
8261
8262static inline bool check_for_semihosting(CPUState *cs)
8263{
8264#ifdef CONFIG_TCG
8265    /* Check whether this exception is a semihosting call; if so
8266     * then handle it and return true; otherwise return false.
8267     */
8268    ARMCPU *cpu = ARM_CPU(cs);
8269    CPUARMState *env = &cpu->env;
8270
8271    if (is_a64(env)) {
8272        if (cs->exception_index == EXCP_SEMIHOST) {
8273            /* This is always the 64-bit semihosting exception.
8274             * The "is this usermode" and "is semihosting enabled"
8275             * checks have been done at translate time.
8276             */
8277            qemu_log_mask(CPU_LOG_INT,
8278                          "...handling as semihosting call 0x%" PRIx64 "\n",
8279                          env->xregs[0]);
8280            env->xregs[0] = do_arm_semihosting(env);
8281            return true;
8282        }
8283        return false;
8284    } else {
8285        uint32_t imm;
8286
8287        /* Only intercept calls from privileged modes, to provide some
8288         * semblance of security.
8289         */
8290        if (cs->exception_index != EXCP_SEMIHOST &&
8291            (!semihosting_enabled() ||
8292             ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
8293            return false;
8294        }
8295
8296        switch (cs->exception_index) {
8297        case EXCP_SEMIHOST:
8298            /* This is always a semihosting call; the "is this usermode"
8299             * and "is semihosting enabled" checks have been done at
8300             * translate time.
8301             */
8302            break;
8303        case EXCP_SWI:
8304            /* Check for semihosting interrupt.  */
8305            if (env->thumb) {
8306                imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
8307                    & 0xff;
8308                if (imm == 0xab) {
8309                    break;
8310                }
8311            } else {
8312                imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
8313                    & 0xffffff;
8314                if (imm == 0x123456) {
8315                    break;
8316                }
8317            }
8318            return false;
8319        case EXCP_BKPT:
8320            /* See if this is a semihosting syscall.  */
8321            if (env->thumb) {
8322                imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
8323                    & 0xff;
8324                if (imm == 0xab) {
8325                    env->regs[15] += 2;
8326                    break;
8327                }
8328            }
8329            return false;
8330        default:
8331            return false;
8332        }
8333
8334        qemu_log_mask(CPU_LOG_INT,
8335                      "...handling as semihosting call 0x%x\n",
8336                      env->regs[0]);
8337        env->regs[0] = do_arm_semihosting(env);
8338        return true;
8339    }
8340#else
8341    return false;
8342#endif
8343}
8344
8345/* Handle a CPU exception for A and R profile CPUs.
8346 * Do any appropriate logging, handle PSCI calls, and then hand off
8347 * to the AArch64-entry or AArch32-entry function depending on the
8348 * target exception level's register width.
8349 */
8350void arm_cpu_do_interrupt(CPUState *cs)
8351{
8352    ARMCPU *cpu = ARM_CPU(cs);
8353    CPUARMState *env = &cpu->env;
8354    unsigned int new_el = env->exception.target_el;
8355
8356    assert(!arm_feature(env, ARM_FEATURE_M));
8357
8358    arm_log_exception(cs->exception_index);
8359    qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
8360                  new_el);
8361    if (qemu_loglevel_mask(CPU_LOG_INT)
8362        && !excp_is_internal(cs->exception_index)) {
8363        qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
8364                      syn_get_ec(env->exception.syndrome),
8365                      env->exception.syndrome);
8366    }
8367
8368    if (arm_is_psci_call(cpu, cs->exception_index)) {
8369        arm_handle_psci_call(cpu);
8370        qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
8371        return;
8372    }
8373
8374    /* Semihosting semantics depend on the register width of the
8375     * code that caused the exception, not the target exception level,
8376     * so must be handled here.
8377     */
8378    if (check_for_semihosting(cs)) {
8379        return;
8380    }
8381
8382    /* Hooks may change global state so BQL should be held, also the
8383     * BQL needs to be held for any modification of
8384     * cs->interrupt_request.
8385     */
8386    g_assert(qemu_mutex_iothread_locked());
8387
8388    arm_call_pre_el_change_hook(cpu);
8389
8390    assert(!excp_is_internal(cs->exception_index));
8391    if (arm_el_is_aa64(env, new_el)) {
8392        arm_cpu_do_interrupt_aarch64(cs);
8393    } else {
8394        arm_cpu_do_interrupt_aarch32(cs);
8395    }
8396
8397    arm_call_el_change_hook(cpu);
8398
8399    if (!kvm_enabled()) {
8400        cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
8401    }
8402}
8403#endif /* !CONFIG_USER_ONLY */
8404
8405/* Return the exception level which controls this address translation regime */
8406static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
8407{
8408    switch (mmu_idx) {
8409    case ARMMMUIdx_S2NS:
8410    case ARMMMUIdx_S1E2:
8411        return 2;
8412    case ARMMMUIdx_S1E3:
8413        return 3;
8414    case ARMMMUIdx_S1SE0:
8415        return arm_el_is_aa64(env, 3) ? 1 : 3;
8416    case ARMMMUIdx_S1SE1:
8417    case ARMMMUIdx_S1NSE0:
8418    case ARMMMUIdx_S1NSE1:
8419    case ARMMMUIdx_MPrivNegPri:
8420    case ARMMMUIdx_MUserNegPri:
8421    case ARMMMUIdx_MPriv:
8422    case ARMMMUIdx_MUser:
8423    case ARMMMUIdx_MSPrivNegPri:
8424    case ARMMMUIdx_MSUserNegPri:
8425    case ARMMMUIdx_MSPriv:
8426    case ARMMMUIdx_MSUser:
8427        return 1;
8428    default:
8429        g_assert_not_reached();
8430    }
8431}
8432
8433#ifndef CONFIG_USER_ONLY
8434
8435/* Return the SCTLR value which controls this address translation regime */
8436static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
8437{
8438    return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
8439}
8440
8441/* Return true if the specified stage of address translation is disabled */
8442static inline bool regime_translation_disabled(CPUARMState *env,
8443                                               ARMMMUIdx mmu_idx)
8444{
8445    if (arm_feature(env, ARM_FEATURE_M)) {
8446        switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
8447                (R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
8448        case R_V7M_MPU_CTRL_ENABLE_MASK:
8449            /* Enabled, but not for HardFault and NMI */
8450            return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
8451        case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
8452            /* Enabled for all cases */
8453            return false;
8454        case 0:
8455        default:
8456            /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
8457             * we warned about that in armv7m_nvic.c when the guest set it.
8458             */
8459            return true;
8460        }
8461    }
8462
8463    if (mmu_idx == ARMMMUIdx_S2NS) {
8464        /* HCR.DC means HCR.VM behaves as 1 */
8465        return (env->cp15.hcr_el2 & (HCR_DC | HCR_VM)) == 0;
8466    }
8467
8468    if (env->cp15.hcr_el2 & HCR_TGE) {
8469        /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
8470        if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
8471            return true;
8472        }
8473    }
8474
8475    if ((env->cp15.hcr_el2 & HCR_DC) &&
8476        (mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1)) {
8477        /* HCR.DC means SCTLR_EL1.M behaves as 0 */
8478        return true;
8479    }
8480
8481    return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
8482}
8483
8484static inline bool regime_translation_big_endian(CPUARMState *env,
8485                                                 ARMMMUIdx mmu_idx)
8486{
8487    return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
8488}
8489
8490/* Return the TTBR associated with this translation regime */
8491static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
8492                                   int ttbrn)
8493{
8494    if (mmu_idx == ARMMMUIdx_S2NS) {
8495        return env->cp15.vttbr_el2;
8496    }
8497    if (ttbrn == 0) {
8498        return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
8499    } else {
8500        return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
8501    }
8502}
8503
8504#endif /* !CONFIG_USER_ONLY */
8505
8506/* Return the TCR controlling this translation regime */
8507static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
8508{
8509    if (mmu_idx == ARMMMUIdx_S2NS) {
8510        return &env->cp15.vtcr_el2;
8511    }
8512    return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
8513}
8514
8515/* Convert a possible stage1+2 MMU index into the appropriate
8516 * stage 1 MMU index
8517 */
8518static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
8519{
8520    if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
8521        mmu_idx += (ARMMMUIdx_S1NSE0 - ARMMMUIdx_S12NSE0);
8522    }
8523    return mmu_idx;
8524}
8525
8526/* Return true if the translation regime is using LPAE format page tables */
8527static inline bool regime_using_lpae_format(CPUARMState *env,
8528                                            ARMMMUIdx mmu_idx)
8529{
8530    int el = regime_el(env, mmu_idx);
8531    if (el == 2 || arm_el_is_aa64(env, el)) {
8532        return true;
8533    }
8534    if (arm_feature(env, ARM_FEATURE_LPAE)
8535        && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
8536        return true;
8537    }
8538    return false;
8539}
8540
8541/* Returns true if the stage 1 translation regime is using LPAE format page
8542 * tables. Used when raising alignment exceptions, whose FSR changes depending
8543 * on whether the long or short descriptor format is in use. */
8544bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
8545{
8546    mmu_idx = stage_1_mmu_idx(mmu_idx);
8547
8548    return regime_using_lpae_format(env, mmu_idx);
8549}
8550
8551#ifndef CONFIG_USER_ONLY
8552static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
8553{
8554    switch (mmu_idx) {
8555    case ARMMMUIdx_S1SE0:
8556    case ARMMMUIdx_S1NSE0:
8557    case ARMMMUIdx_MUser:
8558    case ARMMMUIdx_MSUser:
8559    case ARMMMUIdx_MUserNegPri:
8560    case ARMMMUIdx_MSUserNegPri:
8561        return true;
8562    default:
8563        return false;
8564    case ARMMMUIdx_S12NSE0:
8565    case ARMMMUIdx_S12NSE1:
8566        g_assert_not_reached();
8567    }
8568}
8569
8570/* Translate section/page access permissions to page
8571 * R/W protection flags
8572 *
8573 * @env:         CPUARMState
8574 * @mmu_idx:     MMU index indicating required translation regime
8575 * @ap:          The 3-bit access permissions (AP[2:0])
8576 * @domain_prot: The 2-bit domain access permissions
8577 */
8578static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
8579                                int ap, int domain_prot)
8580{
8581    bool is_user = regime_is_user(env, mmu_idx);
8582
8583    if (domain_prot == 3) {
8584        return PAGE_READ | PAGE_WRITE;
8585    }
8586
8587    switch (ap) {
8588    case 0:
8589        if (arm_feature(env, ARM_FEATURE_V7)) {
8590            return 0;
8591        }
8592        switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
8593        case SCTLR_S:
8594            return is_user ? 0 : PAGE_READ;
8595        case SCTLR_R:
8596            return PAGE_READ;
8597        default:
8598            return 0;
8599        }
8600    case 1:
8601        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8602    case 2:
8603        if (is_user) {
8604            return PAGE_READ;
8605        } else {
8606            return PAGE_READ | PAGE_WRITE;
8607        }
8608    case 3:
8609        return PAGE_READ | PAGE_WRITE;
8610    case 4: /* Reserved.  */
8611        return 0;
8612    case 5:
8613        return is_user ? 0 : PAGE_READ;
8614    case 6:
8615        return PAGE_READ;
8616    case 7:
8617        if (!arm_feature(env, ARM_FEATURE_V6K)) {
8618            return 0;
8619        }
8620        return PAGE_READ;
8621    default:
8622        g_assert_not_reached();
8623    }
8624}
8625
8626/* Translate section/page access permissions to page
8627 * R/W protection flags.
8628 *
8629 * @ap:      The 2-bit simple AP (AP[2:1])
8630 * @is_user: TRUE if accessing from PL0
8631 */
8632static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
8633{
8634    switch (ap) {
8635    case 0:
8636        return is_user ? 0 : PAGE_READ | PAGE_WRITE;
8637    case 1:
8638        return PAGE_READ | PAGE_WRITE;
8639    case 2:
8640        return is_user ? 0 : PAGE_READ;
8641    case 3:
8642        return PAGE_READ;
8643    default:
8644        g_assert_not_reached();
8645    }
8646}
8647
8648static inline int
8649simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
8650{
8651    return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
8652}
8653
8654/* Translate S2 section/page access permissions to protection flags
8655 *
8656 * @env:     CPUARMState
8657 * @s2ap:    The 2-bit stage2 access permissions (S2AP)
8658 * @xn:      XN (execute-never) bit
8659 */
8660static int get_S2prot(CPUARMState *env, int s2ap, int xn)
8661{
8662    int prot = 0;
8663
8664    if (s2ap & 1) {
8665        prot |= PAGE_READ;
8666    }
8667    if (s2ap & 2) {
8668        prot |= PAGE_WRITE;
8669    }
8670    if (!xn) {
8671        if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
8672            prot |= PAGE_EXEC;
8673        }
8674    }
8675    return prot;
8676}
8677
8678/* Translate section/page access permissions to protection flags
8679 *
8680 * @env:     CPUARMState
8681 * @mmu_idx: MMU index indicating required translation regime
8682 * @is_aa64: TRUE if AArch64
8683 * @ap:      The 2-bit simple AP (AP[2:1])
8684 * @ns:      NS (non-secure) bit
8685 * @xn:      XN (execute-never) bit
8686 * @pxn:     PXN (privileged execute-never) bit
8687 */
8688static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
8689                      int ap, int ns, int xn, int pxn)
8690{
8691    bool is_user = regime_is_user(env, mmu_idx);
8692    int prot_rw, user_rw;
8693    bool have_wxn;
8694    int wxn = 0;
8695
8696    assert(mmu_idx != ARMMMUIdx_S2NS);
8697
8698    user_rw = simple_ap_to_rw_prot_is_user(ap, true);
8699    if (is_user) {
8700        prot_rw = user_rw;
8701    } else {
8702        prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
8703    }
8704
8705    if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
8706        return prot_rw;
8707    }
8708
8709    /* TODO have_wxn should be replaced with
8710     *   ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
8711     * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
8712     * compatible processors have EL2, which is required for [U]WXN.
8713     */
8714    have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
8715
8716    if (have_wxn) {
8717        wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
8718    }
8719
8720    if (is_aa64) {
8721        switch (regime_el(env, mmu_idx)) {
8722        case 1:
8723            if (!is_user) {
8724                xn = pxn || (user_rw & PAGE_WRITE);
8725            }
8726            break;
8727        case 2:
8728        case 3:
8729            break;
8730        }
8731    } else if (arm_feature(env, ARM_FEATURE_V7)) {
8732        switch (regime_el(env, mmu_idx)) {
8733        case 1:
8734        case 3:
8735            if (is_user) {
8736                xn = xn || !(user_rw & PAGE_READ);
8737            } else {
8738                int uwxn = 0;
8739                if (have_wxn) {
8740                    uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
8741                }
8742                xn = xn || !(prot_rw & PAGE_READ) || pxn ||
8743                     (uwxn && (user_rw & PAGE_WRITE));
8744            }
8745            break;
8746        case 2:
8747            break;
8748        }
8749    } else {
8750        xn = wxn = 0;
8751    }
8752
8753    if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
8754        return prot_rw;
8755    }
8756    return prot_rw | PAGE_EXEC;
8757}
8758
8759static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
8760                                     uint32_t *table, uint32_t address)
8761{
8762    /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
8763    TCR *tcr = regime_tcr(env, mmu_idx);
8764
8765    if (address & tcr->mask) {
8766        if (tcr->raw_tcr & TTBCR_PD1) {
8767            /* Translation table walk disabled for TTBR1 */
8768            return false;
8769        }
8770        *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
8771    } else {
8772        if (tcr->raw_tcr & TTBCR_PD0) {
8773            /* Translation table walk disabled for TTBR0 */
8774            return false;
8775        }
8776        *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
8777    }
8778    *table |= (address >> 18) & 0x3ffc;
8779    return true;
8780}
8781
8782/* Translate a S1 pagetable walk through S2 if needed.  */
8783static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
8784                               hwaddr addr, MemTxAttrs txattrs,
8785                               ARMMMUFaultInfo *fi)
8786{
8787    if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
8788        !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
8789        target_ulong s2size;
8790        hwaddr s2pa;
8791        int s2prot;
8792        int ret;
8793        ARMCacheAttrs cacheattrs = {};
8794        ARMCacheAttrs *pcacheattrs = NULL;
8795
8796        if (env->cp15.hcr_el2 & HCR_PTW) {
8797            /*
8798             * PTW means we must fault if this S1 walk touches S2 Device
8799             * memory; otherwise we don't care about the attributes and can
8800             * save the S2 translation the effort of computing them.
8801             */
8802            pcacheattrs = &cacheattrs;
8803        }
8804
8805        ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
8806                                 &txattrs, &s2prot, &s2size, fi, pcacheattrs);
8807        if (ret) {
8808            assert(fi->type != ARMFault_None);
8809            fi->s2addr = addr;
8810            fi->stage2 = true;
8811            fi->s1ptw = true;
8812            return ~0;
8813        }
8814        if (pcacheattrs && (pcacheattrs->attrs & 0xf0) == 0) {
8815            /* Access was to Device memory: generate Permission fault */
8816            fi->type = ARMFault_Permission;
8817            fi->s2addr = addr;
8818            fi->stage2 = true;
8819            fi->s1ptw = true;
8820            return ~0;
8821        }
8822        addr = s2pa;
8823    }
8824    return addr;
8825}
8826
8827/* All loads done in the course of a page table walk go through here. */
8828static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8829                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8830{
8831    ARMCPU *cpu = ARM_CPU(cs);
8832    CPUARMState *env = &cpu->env;
8833    MemTxAttrs attrs = {};
8834    MemTxResult result = MEMTX_OK;
8835    AddressSpace *as;
8836    uint32_t data;
8837
8838    attrs.secure = is_secure;
8839    as = arm_addressspace(cs, attrs);
8840    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8841    if (fi->s1ptw) {
8842        return 0;
8843    }
8844    if (regime_translation_big_endian(env, mmu_idx)) {
8845        data = address_space_ldl_be(as, addr, attrs, &result);
8846    } else {
8847        data = address_space_ldl_le(as, addr, attrs, &result);
8848    }
8849    if (result == MEMTX_OK) {
8850        return data;
8851    }
8852    fi->type = ARMFault_SyncExternalOnWalk;
8853    fi->ea = arm_extabort_type(result);
8854    return 0;
8855}
8856
8857static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
8858                            ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi)
8859{
8860    ARMCPU *cpu = ARM_CPU(cs);
8861    CPUARMState *env = &cpu->env;
8862    MemTxAttrs attrs = {};
8863    MemTxResult result = MEMTX_OK;
8864    AddressSpace *as;
8865    uint64_t data;
8866
8867    attrs.secure = is_secure;
8868    as = arm_addressspace(cs, attrs);
8869    addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fi);
8870    if (fi->s1ptw) {
8871        return 0;
8872    }
8873    if (regime_translation_big_endian(env, mmu_idx)) {
8874        data = address_space_ldq_be(as, addr, attrs, &result);
8875    } else {
8876        data = address_space_ldq_le(as, addr, attrs, &result);
8877    }
8878    if (result == MEMTX_OK) {
8879        return data;
8880    }
8881    fi->type = ARMFault_SyncExternalOnWalk;
8882    fi->ea = arm_extabort_type(result);
8883    return 0;
8884}
8885
8886static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
8887                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
8888                             hwaddr *phys_ptr, int *prot,
8889                             target_ulong *page_size,
8890                             ARMMMUFaultInfo *fi)
8891{
8892    CPUState *cs = env_cpu(env);
8893    int level = 1;
8894    uint32_t table;
8895    uint32_t desc;
8896    int type;
8897    int ap;
8898    int domain = 0;
8899    int domain_prot;
8900    hwaddr phys_addr;
8901    uint32_t dacr;
8902
8903    /* Pagetable walk.  */
8904    /* Lookup l1 descriptor.  */
8905    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
8906        /* Section translation fault if page walk is disabled by PD0 or PD1 */
8907        fi->type = ARMFault_Translation;
8908        goto do_fault;
8909    }
8910    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8911                       mmu_idx, fi);
8912    if (fi->type != ARMFault_None) {
8913        goto do_fault;
8914    }
8915    type = (desc & 3);
8916    domain = (desc >> 5) & 0x0f;
8917    if (regime_el(env, mmu_idx) == 1) {
8918        dacr = env->cp15.dacr_ns;
8919    } else {
8920        dacr = env->cp15.dacr_s;
8921    }
8922    domain_prot = (dacr >> (domain * 2)) & 3;
8923    if (type == 0) {
8924        /* Section translation fault.  */
8925        fi->type = ARMFault_Translation;
8926        goto do_fault;
8927    }
8928    if (type != 2) {
8929        level = 2;
8930    }
8931    if (domain_prot == 0 || domain_prot == 2) {
8932        fi->type = ARMFault_Domain;
8933        goto do_fault;
8934    }
8935    if (type == 2) {
8936        /* 1Mb section.  */
8937        phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
8938        ap = (desc >> 10) & 3;
8939        *page_size = 1024 * 1024;
8940    } else {
8941        /* Lookup l2 entry.  */
8942        if (type == 1) {
8943            /* Coarse pagetable.  */
8944            table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
8945        } else {
8946            /* Fine pagetable.  */
8947            table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
8948        }
8949        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
8950                           mmu_idx, fi);
8951        if (fi->type != ARMFault_None) {
8952            goto do_fault;
8953        }
8954        switch (desc & 3) {
8955        case 0: /* Page translation fault.  */
8956            fi->type = ARMFault_Translation;
8957            goto do_fault;
8958        case 1: /* 64k page.  */
8959            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
8960            ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
8961            *page_size = 0x10000;
8962            break;
8963        case 2: /* 4k page.  */
8964            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8965            ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
8966            *page_size = 0x1000;
8967            break;
8968        case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
8969            if (type == 1) {
8970                /* ARMv6/XScale extended small page format */
8971                if (arm_feature(env, ARM_FEATURE_XSCALE)
8972                    || arm_feature(env, ARM_FEATURE_V6)) {
8973                    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
8974                    *page_size = 0x1000;
8975                } else {
8976                    /* UNPREDICTABLE in ARMv5; we choose to take a
8977                     * page translation fault.
8978                     */
8979                    fi->type = ARMFault_Translation;
8980                    goto do_fault;
8981                }
8982            } else {
8983                phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
8984                *page_size = 0x400;
8985            }
8986            ap = (desc >> 4) & 3;
8987            break;
8988        default:
8989            /* Never happens, but compiler isn't smart enough to tell.  */
8990            abort();
8991        }
8992    }
8993    *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
8994    *prot |= *prot ? PAGE_EXEC : 0;
8995    if (!(*prot & (1 << access_type))) {
8996        /* Access permission fault.  */
8997        fi->type = ARMFault_Permission;
8998        goto do_fault;
8999    }
9000    *phys_ptr = phys_addr;
9001    return false;
9002do_fault:
9003    fi->domain = domain;
9004    fi->level = level;
9005    return true;
9006}
9007
9008static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
9009                             MMUAccessType access_type, ARMMMUIdx mmu_idx,
9010                             hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
9011                             target_ulong *page_size, ARMMMUFaultInfo *fi)
9012{
9013    CPUState *cs = env_cpu(env);
9014    int level = 1;
9015    uint32_t table;
9016    uint32_t desc;
9017    uint32_t xn;
9018    uint32_t pxn = 0;
9019    int type;
9020    int ap;
9021    int domain = 0;
9022    int domain_prot;
9023    hwaddr phys_addr;
9024    uint32_t dacr;
9025    bool ns;
9026
9027    /* Pagetable walk.  */
9028    /* Lookup l1 descriptor.  */
9029    if (!get_level1_table_address(env, mmu_idx, &table, address)) {
9030        /* Section translation fault if page walk is disabled by PD0 or PD1 */
9031        fi->type = ARMFault_Translation;
9032        goto do_fault;
9033    }
9034    desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9035                       mmu_idx, fi);
9036    if (fi->type != ARMFault_None) {
9037        goto do_fault;
9038    }
9039    type = (desc & 3);
9040    if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
9041        /* Section translation fault, or attempt to use the encoding
9042         * which is Reserved on implementations without PXN.
9043         */
9044        fi->type = ARMFault_Translation;
9045        goto do_fault;
9046    }
9047    if ((type == 1) || !(desc & (1 << 18))) {
9048        /* Page or Section.  */
9049        domain = (desc >> 5) & 0x0f;
9050    }
9051    if (regime_el(env, mmu_idx) == 1) {
9052        dacr = env->cp15.dacr_ns;
9053    } else {
9054        dacr = env->cp15.dacr_s;
9055    }
9056    if (type == 1) {
9057        level = 2;
9058    }
9059    domain_prot = (dacr >> (domain * 2)) & 3;
9060    if (domain_prot == 0 || domain_prot == 2) {
9061        /* Section or Page domain fault */
9062        fi->type = ARMFault_Domain;
9063        goto do_fault;
9064    }
9065    if (type != 1) {
9066        if (desc & (1 << 18)) {
9067            /* Supersection.  */
9068            phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
9069            phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
9070            phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
9071            *page_size = 0x1000000;
9072        } else {
9073            /* Section.  */
9074            phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
9075            *page_size = 0x100000;
9076        }
9077        ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
9078        xn = desc & (1 << 4);
9079        pxn = desc & 1;
9080        ns = extract32(desc, 19, 1);
9081    } else {
9082        if (arm_feature(env, ARM_FEATURE_PXN)) {
9083            pxn = (desc >> 2) & 1;
9084        }
9085        ns = extract32(desc, 3, 1);
9086        /* Lookup l2 entry.  */
9087        table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
9088        desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
9089                           mmu_idx, fi);
9090        if (fi->type != ARMFault_None) {
9091            goto do_fault;
9092        }
9093        ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
9094        switch (desc & 3) {
9095        case 0: /* Page translation fault.  */
9096            fi->type = ARMFault_Translation;
9097            goto do_fault;
9098        case 1: /* 64k page.  */
9099            phys_addr = (desc & 0xffff0000) | (address & 0xffff);
9100            xn = desc & (1 << 15);
9101            *page_size = 0x10000;
9102            break;
9103        case 2: case 3: /* 4k page.  */
9104            phys_addr = (desc & 0xfffff000) | (address & 0xfff);
9105            xn = desc & 1;
9106            *page_size = 0x1000;
9107            break;
9108        default:
9109            /* Never happens, but compiler isn't smart enough to tell.  */
9110            abort();
9111        }
9112    }
9113    if (domain_prot == 3) {
9114        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9115    } else {
9116        if (pxn && !regime_is_user(env, mmu_idx)) {
9117            xn = 1;
9118        }
9119        if (xn && access_type == MMU_INST_FETCH) {
9120            fi->type = ARMFault_Permission;
9121            goto do_fault;
9122        }
9123
9124        if (arm_feature(env, ARM_FEATURE_V6K) &&
9125                (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
9126            /* The simplified model uses AP[0] as an access control bit.  */
9127            if ((ap & 1) == 0) {
9128                /* Access flag fault.  */
9129                fi->type = ARMFault_AccessFlag;
9130                goto do_fault;
9131            }
9132            *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
9133        } else {
9134            *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
9135        }
9136        if (*prot && !xn) {
9137            *prot |= PAGE_EXEC;
9138        }
9139        if (!(*prot & (1 << access_type))) {
9140            /* Access permission fault.  */
9141            fi->type = ARMFault_Permission;
9142            goto do_fault;
9143        }
9144    }
9145    if (ns) {
9146        /* The NS bit will (as required by the architecture) have no effect if
9147         * the CPU doesn't support TZ or this is a non-secure translation
9148         * regime, because the attribute will already be non-secure.
9149         */
9150        attrs->secure = false;
9151    }
9152    *phys_ptr = phys_addr;
9153    return false;
9154do_fault:
9155    fi->domain = domain;
9156    fi->level = level;
9157    return true;
9158}
9159
9160/*
9161 * check_s2_mmu_setup
9162 * @cpu:        ARMCPU
9163 * @is_aa64:    True if the translation regime is in AArch64 state
9164 * @startlevel: Suggested starting level
9165 * @inputsize:  Bitsize of IPAs
9166 * @stride:     Page-table stride (See the ARM ARM)
9167 *
9168 * Returns true if the suggested S2 translation parameters are OK and
9169 * false otherwise.
9170 */
9171static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
9172                               int inputsize, int stride)
9173{
9174    const int grainsize = stride + 3;
9175    int startsizecheck;
9176
9177    /* Negative levels are never allowed.  */
9178    if (level < 0) {
9179        return false;
9180    }
9181
9182    startsizecheck = inputsize - ((3 - level) * stride + grainsize);
9183    if (startsizecheck < 1 || startsizecheck > stride + 4) {
9184        return false;
9185    }
9186
9187    if (is_aa64) {
9188        CPUARMState *env = &cpu->env;
9189        unsigned int pamax = arm_pamax(cpu);
9190
9191        switch (stride) {
9192        case 13: /* 64KB Pages.  */
9193            if (level == 0 || (level == 1 && pamax <= 42)) {
9194                return false;
9195            }
9196            break;
9197        case 11: /* 16KB Pages.  */
9198            if (level == 0 || (level == 1 && pamax <= 40)) {
9199                return false;
9200            }
9201            break;
9202        case 9: /* 4KB Pages.  */
9203            if (level == 0 && pamax <= 42) {
9204                return false;
9205            }
9206            break;
9207        default:
9208            g_assert_not_reached();
9209        }
9210
9211        /* Inputsize checks.  */
9212        if (inputsize > pamax &&
9213            (arm_el_is_aa64(env, 1) || inputsize > 40)) {
9214            /* This is CONSTRAINED UNPREDICTABLE and we choose to fault.  */
9215            return false;
9216        }
9217    } else {
9218        /* AArch32 only supports 4KB pages. Assert on that.  */
9219        assert(stride == 9);
9220
9221        if (level == 0) {
9222            return false;
9223        }
9224    }
9225    return true;
9226}
9227
9228/* Translate from the 4-bit stage 2 representation of
9229 * memory attributes (without cache-allocation hints) to
9230 * the 8-bit representation of the stage 1 MAIR registers
9231 * (which includes allocation hints).
9232 *
9233 * ref: shared/translation/attrs/S2AttrDecode()
9234 *      .../S2ConvertAttrsHints()
9235 */
9236static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
9237{
9238    uint8_t hiattr = extract32(s2attrs, 2, 2);
9239    uint8_t loattr = extract32(s2attrs, 0, 2);
9240    uint8_t hihint = 0, lohint = 0;
9241
9242    if (hiattr != 0) { /* normal memory */
9243        if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
9244            hiattr = loattr = 1; /* non-cacheable */
9245        } else {
9246            if (hiattr != 1) { /* Write-through or write-back */
9247                hihint = 3; /* RW allocate */
9248            }
9249            if (loattr != 1) { /* Write-through or write-back */
9250                lohint = 3; /* RW allocate */
9251            }
9252        }
9253    }
9254
9255    return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
9256}
9257#endif /* !CONFIG_USER_ONLY */
9258
9259ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va,
9260                                        ARMMMUIdx mmu_idx)
9261{
9262    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
9263    uint32_t el = regime_el(env, mmu_idx);
9264    bool tbi, tbid, epd, hpd, using16k, using64k;
9265    int select, tsz;
9266
9267    /*
9268     * Bit 55 is always between the two regions, and is canonical for
9269     * determining if address tagging is enabled.
9270     */
9271    select = extract64(va, 55, 1);
9272
9273    if (el > 1) {
9274        tsz = extract32(tcr, 0, 6);
9275        using64k = extract32(tcr, 14, 1);
9276        using16k = extract32(tcr, 15, 1);
9277        if (mmu_idx == ARMMMUIdx_S2NS) {
9278            /* VTCR_EL2 */
9279            tbi = tbid = hpd = false;
9280        } else {
9281            tbi = extract32(tcr, 20, 1);
9282            hpd = extract32(tcr, 24, 1);
9283            tbid = extract32(tcr, 29, 1);
9284        }
9285        epd = false;
9286    } else if (!select) {
9287        tsz = extract32(tcr, 0, 6);
9288        epd = extract32(tcr, 7, 1);
9289        using64k = extract32(tcr, 14, 1);
9290        using16k = extract32(tcr, 15, 1);
9291        tbi = extract64(tcr, 37, 1);
9292        hpd = extract64(tcr, 41, 1);
9293        tbid = extract64(tcr, 51, 1);
9294    } else {
9295        int tg = extract32(tcr, 30, 2);
9296        using16k = tg == 1;
9297        using64k = tg == 3;
9298        tsz = extract32(tcr, 16, 6);
9299        epd = extract32(tcr, 23, 1);
9300        tbi = extract64(tcr, 38, 1);
9301        hpd = extract64(tcr, 42, 1);
9302        tbid = extract64(tcr, 52, 1);
9303    }
9304    tsz = MIN(tsz, 39);  /* TODO: ARMv8.4-TTST */
9305    tsz = MAX(tsz, 16);  /* TODO: ARMv8.2-LVA  */
9306
9307    return (ARMVAParameters) {
9308        .tsz = tsz,
9309        .select = select,
9310        .tbi = tbi,
9311        .tbid = tbid,
9312        .epd = epd,
9313        .hpd = hpd,
9314        .using16k = using16k,
9315        .using64k = using64k,
9316    };
9317}
9318
9319ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
9320                                   ARMMMUIdx mmu_idx, bool data)
9321{
9322    ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx);
9323
9324    /* Present TBI as a composite with TBID.  */
9325    ret.tbi &= (data || !ret.tbid);
9326    return ret;
9327}
9328
9329#ifndef CONFIG_USER_ONLY
9330static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
9331                                          ARMMMUIdx mmu_idx)
9332{
9333    uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
9334    uint32_t el = regime_el(env, mmu_idx);
9335    int select, tsz;
9336    bool epd, hpd;
9337
9338    if (mmu_idx == ARMMMUIdx_S2NS) {
9339        /* VTCR */
9340        bool sext = extract32(tcr, 4, 1);
9341        bool sign = extract32(tcr, 3, 1);
9342
9343        /*
9344         * If the sign-extend bit is not the same as t0sz[3], the result
9345         * is unpredictable. Flag this as a guest error.
9346         */
9347        if (sign != sext) {
9348            qemu_log_mask(LOG_GUEST_ERROR,
9349                          "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
9350        }
9351        tsz = sextract32(tcr, 0, 4) + 8;
9352        select = 0;
9353        hpd = false;
9354        epd = false;
9355    } else if (el == 2) {
9356        /* HTCR */
9357        tsz = extract32(tcr, 0, 3);
9358        select = 0;
9359        hpd = extract64(tcr, 24, 1);
9360        epd = false;
9361    } else {
9362        int t0sz = extract32(tcr, 0, 3);
9363        int t1sz = extract32(tcr, 16, 3);
9364
9365        if (t1sz == 0) {
9366            select = va > (0xffffffffu >> t0sz);
9367        } else {
9368            /* Note that we will detect errors later.  */
9369            select = va >= ~(0xffffffffu >> t1sz);
9370        }
9371        if (!select) {
9372            tsz = t0sz;
9373            epd = extract32(tcr, 7, 1);
9374            hpd = extract64(tcr, 41, 1);
9375        } else {
9376            tsz = t1sz;
9377            epd = extract32(tcr, 23, 1);
9378            hpd = extract64(tcr, 42, 1);
9379        }
9380        /* For aarch32, hpd0 is not enabled without t2e as well.  */
9381        hpd &= extract32(tcr, 6, 1);
9382    }
9383
9384    return (ARMVAParameters) {
9385        .tsz = tsz,
9386        .select = select,
9387        .epd = epd,
9388        .hpd = hpd,
9389    };
9390}
9391
9392static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
9393                               MMUAccessType access_type, ARMMMUIdx mmu_idx,
9394                               hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
9395                               target_ulong *page_size_ptr,
9396                               ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
9397{
9398    ARMCPU *cpu = env_archcpu(env);
9399    CPUState *cs = CPU(cpu);
9400    /* Read an LPAE long-descriptor translation table. */
9401    ARMFaultType fault_type = ARMFault_Translation;
9402    uint32_t level;
9403    ARMVAParameters param;
9404    uint64_t ttbr;
9405    hwaddr descaddr, indexmask, indexmask_grainsize;
9406    uint32_t tableattrs;
9407    target_ulong page_size;
9408    uint32_t attrs;
9409    int32_t stride;
9410    int addrsize, inputsize;
9411    TCR *tcr = regime_tcr(env, mmu_idx);
9412    int ap, ns, xn, pxn;
9413    uint32_t el = regime_el(env, mmu_idx);
9414    bool ttbr1_valid;
9415    uint64_t descaddrmask;
9416    bool aarch64 = arm_el_is_aa64(env, el);
9417    bool guarded = false;
9418
9419    /* TODO:
9420     * This code does not handle the different format TCR for VTCR_EL2.
9421     * This code also does not support shareability levels.
9422     * Attribute and permission bit handling should also be checked when adding
9423     * support for those page table walks.
9424     */
9425    if (aarch64) {
9426        param = aa64_va_parameters(env, address, mmu_idx,
9427                                   access_type != MMU_INST_FETCH);
9428        level = 0;
9429        /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
9430         * invalid.
9431         */
9432        ttbr1_valid = (el < 2);
9433        addrsize = 64 - 8 * param.tbi;
9434        inputsize = 64 - param.tsz;
9435    } else {
9436        param = aa32_va_parameters(env, address, mmu_idx);
9437        level = 1;
9438        /* There is no TTBR1 for EL2 */
9439        ttbr1_valid = (el != 2);
9440        addrsize = (mmu_idx == ARMMMUIdx_S2NS ? 40 : 32);
9441        inputsize = addrsize - param.tsz;
9442    }
9443
9444    /*
9445     * We determined the region when collecting the parameters, but we
9446     * have not yet validated that the address is valid for the region.
9447     * Extract the top bits and verify that they all match select.
9448     *
9449     * For aa32, if inputsize == addrsize, then we have selected the
9450     * region by exclusion in aa32_va_parameters and there is no more
9451     * validation to do here.
9452     */
9453    if (inputsize < addrsize) {
9454        target_ulong top_bits = sextract64(address, inputsize,
9455                                           addrsize - inputsize);
9456        if (-top_bits != param.select || (param.select && !ttbr1_valid)) {
9457            /* The gap between the two regions is a Translation fault */
9458            fault_type = ARMFault_Translation;
9459            goto do_fault;
9460        }
9461    }
9462
9463    if (param.using64k) {
9464        stride = 13;
9465    } else if (param.using16k) {
9466        stride = 11;
9467    } else {
9468        stride = 9;
9469    }
9470
9471    /* Note that QEMU ignores shareability and cacheability attributes,
9472     * so we don't need to do anything with the SH, ORGN, IRGN fields
9473     * in the TTBCR.  Similarly, TTBCR:A1 selects whether we get the
9474     * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
9475     * implement any ASID-like capability so we can ignore it (instead
9476     * we will always flush the TLB any time the ASID is changed).
9477     */
9478    ttbr = regime_ttbr(env, mmu_idx, param.select);
9479
9480    /* Here we should have set up all the parameters for the translation:
9481     * inputsize, ttbr, epd, stride, tbi
9482     */
9483
9484    if (param.epd) {
9485        /* Translation table walk disabled => Translation fault on TLB miss
9486         * Note: This is always 0 on 64-bit EL2 and EL3.
9487         */
9488        goto do_fault;
9489    }
9490
9491    if (mmu_idx != ARMMMUIdx_S2NS) {
9492        /* The starting level depends on the virtual address size (which can
9493         * be up to 48 bits) and the translation granule size. It indicates
9494         * the number of strides (stride bits at a time) needed to
9495         * consume the bits of the input address. In the pseudocode this is:
9496         *  level = 4 - RoundUp((inputsize - grainsize) / stride)
9497         * where their 'inputsize' is our 'inputsize', 'grainsize' is
9498         * our 'stride + 3' and 'stride' is our 'stride'.
9499         * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
9500         * = 4 - (inputsize - stride - 3 + stride - 1) / stride
9501         * = 4 - (inputsize - 4) / stride;
9502         */
9503        level = 4 - (inputsize - 4) / stride;
9504    } else {
9505        /* For stage 2 translations the starting level is specified by the
9506         * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
9507         */
9508        uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
9509        uint32_t startlevel;
9510        bool ok;
9511
9512        if (!aarch64 || stride == 9) {
9513            /* AArch32 or 4KB pages */
9514            startlevel = 2 - sl0;
9515        } else {
9516            /* 16KB or 64KB pages */
9517            startlevel = 3 - sl0;
9518        }
9519
9520        /* Check that the starting level is valid. */
9521        ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
9522                                inputsize, stride);
9523        if (!ok) {
9524            fault_type = ARMFault_Translation;
9525            goto do_fault;
9526        }
9527        level = startlevel;
9528    }
9529
9530    indexmask_grainsize = (1ULL << (stride + 3)) - 1;
9531    indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
9532
9533    /* Now we can extract the actual base address from the TTBR */
9534    descaddr = extract64(ttbr, 0, 48);
9535    descaddr &= ~indexmask;
9536
9537    /* The address field in the descriptor goes up to bit 39 for ARMv7
9538     * but up to bit 47 for ARMv8, but we use the descaddrmask
9539     * up to bit 39 for AArch32, because we don't need other bits in that case
9540     * to construct next descriptor address (anyway they should be all zeroes).
9541     */
9542    descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
9543                   ~indexmask_grainsize;
9544
9545    /* Secure accesses start with the page table in secure memory and
9546     * can be downgraded to non-secure at any step. Non-secure accesses
9547     * remain non-secure. We implement this by just ORing in the NSTable/NS
9548     * bits at each step.
9549     */
9550    tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
9551    for (;;) {
9552        uint64_t descriptor;
9553        bool nstable;
9554
9555        descaddr |= (address >> (stride * (4 - level))) & indexmask;
9556        descaddr &= ~7ULL;
9557        nstable = extract32(tableattrs, 4, 1);
9558        descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fi);
9559        if (fi->type != ARMFault_None) {
9560            goto do_fault;
9561        }
9562
9563        if (!(descriptor & 1) ||
9564            (!(descriptor & 2) && (level == 3))) {
9565            /* Invalid, or the Reserved level 3 encoding */
9566            goto do_fault;
9567        }
9568        descaddr = descriptor & descaddrmask;
9569
9570        if ((descriptor & 2) && (level < 3)) {
9571            /* Table entry. The top five bits are attributes which may
9572             * propagate down through lower levels of the table (and
9573             * which are all arranged so that 0 means "no effect", so
9574             * we can gather them up by ORing in the bits at each level).
9575             */
9576            tableattrs |= extract64(descriptor, 59, 5);
9577            level++;
9578            indexmask = indexmask_grainsize;
9579            continue;
9580        }
9581        /* Block entry at level 1 or 2, or page entry at level 3.
9582         * These are basically the same thing, although the number
9583         * of bits we pull in from the vaddr varies.
9584         */
9585        page_size = (1ULL << ((stride * (4 - level)) + 3));
9586        descaddr |= (address & (page_size - 1));
9587        /* Extract attributes from the descriptor */
9588        attrs = extract64(descriptor, 2, 10)
9589            | (extract64(descriptor, 52, 12) << 10);
9590
9591        if (mmu_idx == ARMMMUIdx_S2NS) {
9592            /* Stage 2 table descriptors do not include any attribute fields */
9593            break;
9594        }
9595        /* Merge in attributes from table descriptors */
9596        attrs |= nstable << 3; /* NS */
9597        guarded = extract64(descriptor, 50, 1);  /* GP */
9598        if (param.hpd) {
9599            /* HPD disables all the table attributes except NSTable.  */
9600            break;
9601        }
9602        attrs |= extract32(tableattrs, 0, 2) << 11;     /* XN, PXN */
9603        /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
9604         * means "force PL1 access only", which means forcing AP[1] to 0.
9605         */
9606        attrs &= ~(extract32(tableattrs, 2, 1) << 4);   /* !APT[0] => AP[1] */
9607        attrs |= extract32(tableattrs, 3, 1) << 5;      /* APT[1] => AP[2] */
9608        break;
9609    }
9610    /* Here descaddr is the final physical address, and attributes
9611     * are all in attrs.
9612     */
9613    fault_type = ARMFault_AccessFlag;
9614    if ((attrs & (1 << 8)) == 0) {
9615        /* Access flag */
9616        goto do_fault;
9617    }
9618
9619    ap = extract32(attrs, 4, 2);
9620    xn = extract32(attrs, 12, 1);
9621
9622    if (mmu_idx == ARMMMUIdx_S2NS) {
9623        ns = true;
9624        *prot = get_S2prot(env, ap, xn);
9625    } else {
9626        ns = extract32(attrs, 3, 1);
9627        pxn = extract32(attrs, 11, 1);
9628        *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
9629    }
9630
9631    fault_type = ARMFault_Permission;
9632    if (!(*prot & (1 << access_type))) {
9633        goto do_fault;
9634    }
9635
9636    if (ns) {
9637        /* The NS bit will (as required by the architecture) have no effect if
9638         * the CPU doesn't support TZ or this is a non-secure translation
9639         * regime, because the attribute will already be non-secure.
9640         */
9641        txattrs->secure = false;
9642    }
9643    /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB.  */
9644    if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
9645        txattrs->target_tlb_bit0 = true;
9646    }
9647
9648    if (cacheattrs != NULL) {
9649        if (mmu_idx == ARMMMUIdx_S2NS) {
9650            cacheattrs->attrs = convert_stage2_attrs(env,
9651                                                     extract32(attrs, 0, 4));
9652        } else {
9653            /* Index into MAIR registers for cache attributes */
9654            uint8_t attrindx = extract32(attrs, 0, 3);
9655            uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
9656            assert(attrindx <= 7);
9657            cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
9658        }
9659        cacheattrs->shareability = extract32(attrs, 6, 2);
9660    }
9661
9662    *phys_ptr = descaddr;
9663    *page_size_ptr = page_size;
9664    return false;
9665
9666do_fault:
9667    fi->type = fault_type;
9668    fi->level = level;
9669    /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2.  */
9670    fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
9671    return true;
9672}
9673
9674static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
9675                                                ARMMMUIdx mmu_idx,
9676                                                int32_t address, int *prot)
9677{
9678    if (!arm_feature(env, ARM_FEATURE_M)) {
9679        *prot = PAGE_READ | PAGE_WRITE;
9680        switch (address) {
9681        case 0xF0000000 ... 0xFFFFFFFF:
9682            if (regime_sctlr(env, mmu_idx) & SCTLR_V) {
9683                /* hivecs execing is ok */
9684                *prot |= PAGE_EXEC;
9685            }
9686            break;
9687        case 0x00000000 ... 0x7FFFFFFF:
9688            *prot |= PAGE_EXEC;
9689            break;
9690        }
9691    } else {
9692        /* Default system address map for M profile cores.
9693         * The architecture specifies which regions are execute-never;
9694         * at the MPU level no other checks are defined.
9695         */
9696        switch (address) {
9697        case 0x00000000 ... 0x1fffffff: /* ROM */
9698        case 0x20000000 ... 0x3fffffff: /* SRAM */
9699        case 0x60000000 ... 0x7fffffff: /* RAM */
9700        case 0x80000000 ... 0x9fffffff: /* RAM */
9701            *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
9702            break;
9703        case 0x40000000 ... 0x5fffffff: /* Peripheral */
9704        case 0xa0000000 ... 0xbfffffff: /* Device */
9705        case 0xc0000000 ... 0xdfffffff: /* Device */
9706        case 0xe0000000 ... 0xffffffff: /* System */
9707            *prot = PAGE_READ | PAGE_WRITE;
9708            break;
9709        default:
9710            g_assert_not_reached();
9711        }
9712    }
9713}
9714
9715static bool pmsav7_use_background_region(ARMCPU *cpu,
9716                                         ARMMMUIdx mmu_idx, bool is_user)
9717{
9718    /* Return true if we should use the default memory map as a
9719     * "background" region if there are no hits against any MPU regions.
9720     */
9721    CPUARMState *env = &cpu->env;
9722
9723    if (is_user) {
9724        return false;
9725    }
9726
9727    if (arm_feature(env, ARM_FEATURE_M)) {
9728        return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
9729            & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
9730    } else {
9731        return regime_sctlr(env, mmu_idx) & SCTLR_BR;
9732    }
9733}
9734
9735static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
9736{
9737    /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
9738    return arm_feature(env, ARM_FEATURE_M) &&
9739        extract32(address, 20, 12) == 0xe00;
9740}
9741
9742static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
9743{
9744    /* True if address is in the M profile system region
9745     * 0xe0000000 - 0xffffffff
9746     */
9747    return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
9748}
9749
9750static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
9751                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
9752                                 hwaddr *phys_ptr, int *prot,
9753                                 target_ulong *page_size,
9754                                 ARMMMUFaultInfo *fi)
9755{
9756    ARMCPU *cpu = env_archcpu(env);
9757    int n;
9758    bool is_user = regime_is_user(env, mmu_idx);
9759
9760    *phys_ptr = address;
9761    *page_size = TARGET_PAGE_SIZE;
9762    *prot = 0;
9763
9764    if (regime_translation_disabled(env, mmu_idx) ||
9765        m_is_ppb_region(env, address)) {
9766        /* MPU disabled or M profile PPB access: use default memory map.
9767         * The other case which uses the default memory map in the
9768         * v7M ARM ARM pseudocode is exception vector reads from the vector
9769         * table. In QEMU those accesses are done in arm_v7m_load_vector(),
9770         * which always does a direct read using address_space_ldl(), rather
9771         * than going via this function, so we don't need to check that here.
9772         */
9773        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9774    } else { /* MPU enabled */
9775        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
9776            /* region search */
9777            uint32_t base = env->pmsav7.drbar[n];
9778            uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
9779            uint32_t rmask;
9780            bool srdis = false;
9781
9782            if (!(env->pmsav7.drsr[n] & 0x1)) {
9783                continue;
9784            }
9785
9786            if (!rsize) {
9787                qemu_log_mask(LOG_GUEST_ERROR,
9788                              "DRSR[%d]: Rsize field cannot be 0\n", n);
9789                continue;
9790            }
9791            rsize++;
9792            rmask = (1ull << rsize) - 1;
9793
9794            if (base & rmask) {
9795                qemu_log_mask(LOG_GUEST_ERROR,
9796                              "DRBAR[%d]: 0x%" PRIx32 " misaligned "
9797                              "to DRSR region size, mask = 0x%" PRIx32 "\n",
9798                              n, base, rmask);
9799                continue;
9800            }
9801
9802            if (address < base || address > base + rmask) {
9803                /*
9804                 * Address not in this region. We must check whether the
9805                 * region covers addresses in the same page as our address.
9806                 * In that case we must not report a size that covers the
9807                 * whole page for a subsequent hit against a different MPU
9808                 * region or the background region, because it would result in
9809                 * incorrect TLB hits for subsequent accesses to addresses that
9810                 * are in this MPU region.
9811                 */
9812                if (ranges_overlap(base, rmask,
9813                                   address & TARGET_PAGE_MASK,
9814                                   TARGET_PAGE_SIZE)) {
9815                    *page_size = 1;
9816                }
9817                continue;
9818            }
9819
9820            /* Region matched */
9821
9822            if (rsize >= 8) { /* no subregions for regions < 256 bytes */
9823                int i, snd;
9824                uint32_t srdis_mask;
9825
9826                rsize -= 3; /* sub region size (power of 2) */
9827                snd = ((address - base) >> rsize) & 0x7;
9828                srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
9829
9830                srdis_mask = srdis ? 0x3 : 0x0;
9831                for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
9832                    /* This will check in groups of 2, 4 and then 8, whether
9833                     * the subregion bits are consistent. rsize is incremented
9834                     * back up to give the region size, considering consistent
9835                     * adjacent subregions as one region. Stop testing if rsize
9836                     * is already big enough for an entire QEMU page.
9837                     */
9838                    int snd_rounded = snd & ~(i - 1);
9839                    uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
9840                                                     snd_rounded + 8, i);
9841                    if (srdis_mask ^ srdis_multi) {
9842                        break;
9843                    }
9844                    srdis_mask = (srdis_mask << i) | srdis_mask;
9845                    rsize++;
9846                }
9847            }
9848            if (srdis) {
9849                continue;
9850            }
9851            if (rsize < TARGET_PAGE_BITS) {
9852                *page_size = 1 << rsize;
9853            }
9854            break;
9855        }
9856
9857        if (n == -1) { /* no hits */
9858            if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
9859                /* background fault */
9860                fi->type = ARMFault_Background;
9861                return true;
9862            }
9863            get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
9864        } else { /* a MPU hit! */
9865            uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
9866            uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
9867
9868            if (m_is_system_region(env, address)) {
9869                /* System space is always execute never */
9870                xn = 1;
9871            }
9872
9873            if (is_user) { /* User mode AP bit decoding */
9874                switch (ap) {
9875                case 0:
9876                case 1:
9877                case 5:
9878                    break; /* no access */
9879                case 3:
9880                    *prot |= PAGE_WRITE;
9881                    /* fall through */
9882                case 2:
9883                case 6:
9884                    *prot |= PAGE_READ | PAGE_EXEC;
9885                    break;
9886                case 7:
9887                    /* for v7M, same as 6; for R profile a reserved value */
9888                    if (arm_feature(env, ARM_FEATURE_M)) {
9889                        *prot |= PAGE_READ | PAGE_EXEC;
9890                        break;
9891                    }
9892                    /* fall through */
9893                default:
9894                    qemu_log_mask(LOG_GUEST_ERROR,
9895                                  "DRACR[%d]: Bad value for AP bits: 0x%"
9896                                  PRIx32 "\n", n, ap);
9897                }
9898            } else { /* Priv. mode AP bits decoding */
9899                switch (ap) {
9900                case 0:
9901                    break; /* no access */
9902                case 1:
9903                case 2:
9904                case 3:
9905                    *prot |= PAGE_WRITE;
9906                    /* fall through */
9907                case 5:
9908                case 6:
9909                    *prot |= PAGE_READ | PAGE_EXEC;
9910                    break;
9911                case 7:
9912                    /* for v7M, same as 6; for R profile a reserved value */
9913                    if (arm_feature(env, ARM_FEATURE_M)) {
9914                        *prot |= PAGE_READ | PAGE_EXEC;
9915                        break;
9916                    }
9917                    /* fall through */
9918                default:
9919                    qemu_log_mask(LOG_GUEST_ERROR,
9920                                  "DRACR[%d]: Bad value for AP bits: 0x%"
9921                                  PRIx32 "\n", n, ap);
9922                }
9923            }
9924
9925            /* execute never */
9926            if (xn) {
9927                *prot &= ~PAGE_EXEC;
9928            }
9929        }
9930    }
9931
9932    fi->type = ARMFault_Permission;
9933    fi->level = 1;
9934    return !(*prot & (1 << access_type));
9935}
9936
9937static bool v8m_is_sau_exempt(CPUARMState *env,
9938                              uint32_t address, MMUAccessType access_type)
9939{
9940    /* The architecture specifies that certain address ranges are
9941     * exempt from v8M SAU/IDAU checks.
9942     */
9943    return
9944        (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
9945        (address >= 0xe0000000 && address <= 0xe0002fff) ||
9946        (address >= 0xe000e000 && address <= 0xe000efff) ||
9947        (address >= 0xe002e000 && address <= 0xe002efff) ||
9948        (address >= 0xe0040000 && address <= 0xe0041fff) ||
9949        (address >= 0xe00ff000 && address <= 0xe00fffff);
9950}
9951
9952void v8m_security_lookup(CPUARMState *env, uint32_t address,
9953                                MMUAccessType access_type, ARMMMUIdx mmu_idx,
9954                                V8M_SAttributes *sattrs)
9955{
9956    /* Look up the security attributes for this address. Compare the
9957     * pseudocode SecurityCheck() function.
9958     * We assume the caller has zero-initialized *sattrs.
9959     */
9960    ARMCPU *cpu = env_archcpu(env);
9961    int r;
9962    bool idau_exempt = false, idau_ns = true, idau_nsc = true;
9963    int idau_region = IREGION_NOTVALID;
9964    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
9965    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
9966
9967    if (cpu->idau) {
9968        IDAUInterfaceClass *iic = IDAU_INTERFACE_GET_CLASS(cpu->idau);
9969        IDAUInterface *ii = IDAU_INTERFACE(cpu->idau);
9970
9971        iic->check(ii, address, &idau_region, &idau_exempt, &idau_ns,
9972                   &idau_nsc);
9973    }
9974
9975    if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
9976        /* 0xf0000000..0xffffffff is always S for insn fetches */
9977        return;
9978    }
9979
9980    if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
9981        sattrs->ns = !regime_is_secure(env, mmu_idx);
9982        return;
9983    }
9984
9985    if (idau_region != IREGION_NOTVALID) {
9986        sattrs->irvalid = true;
9987        sattrs->iregion = idau_region;
9988    }
9989
9990    switch (env->sau.ctrl & 3) {
9991    case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
9992        break;
9993    case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
9994        sattrs->ns = true;
9995        break;
9996    default: /* SAU.ENABLE == 1 */
9997        for (r = 0; r < cpu->sau_sregion; r++) {
9998            if (env->sau.rlar[r] & 1) {
9999                uint32_t base = env->sau.rbar[r] & ~0x1f;
10000                uint32_t limit = env->sau.rlar[r] | 0x1f;
10001
10002                if (base <= address && limit >= address) {
10003                    if (base > addr_page_base || limit < addr_page_limit) {
10004                        sattrs->subpage = true;
10005                    }
10006                    if (sattrs->srvalid) {
10007                        /* If we hit in more than one region then we must report
10008                         * as Secure, not NS-Callable, with no valid region
10009                         * number info.
10010                         */
10011                        sattrs->ns = false;
10012                        sattrs->nsc = false;
10013                        sattrs->sregion = 0;
10014                        sattrs->srvalid = false;
10015                        break;
10016                    } else {
10017                        if (env->sau.rlar[r] & 2) {
10018                            sattrs->nsc = true;
10019                        } else {
10020                            sattrs->ns = true;
10021                        }
10022                        sattrs->srvalid = true;
10023                        sattrs->sregion = r;
10024                    }
10025                } else {
10026                    /*
10027                     * Address not in this region. We must check whether the
10028                     * region covers addresses in the same page as our address.
10029                     * In that case we must not report a size that covers the
10030                     * whole page for a subsequent hit against a different MPU
10031                     * region or the background region, because it would result
10032                     * in incorrect TLB hits for subsequent accesses to
10033                     * addresses that are in this MPU region.
10034                     */
10035                    if (limit >= base &&
10036                        ranges_overlap(base, limit - base + 1,
10037                                       addr_page_base,
10038                                       TARGET_PAGE_SIZE)) {
10039                        sattrs->subpage = true;
10040                    }
10041                }
10042            }
10043        }
10044        break;
10045    }
10046
10047    /*
10048     * The IDAU will override the SAU lookup results if it specifies
10049     * higher security than the SAU does.
10050     */
10051    if (!idau_ns) {
10052        if (sattrs->ns || (!idau_nsc && sattrs->nsc)) {
10053            sattrs->ns = false;
10054            sattrs->nsc = idau_nsc;
10055        }
10056    }
10057}
10058
10059bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
10060                              MMUAccessType access_type, ARMMMUIdx mmu_idx,
10061                              hwaddr *phys_ptr, MemTxAttrs *txattrs,
10062                              int *prot, bool *is_subpage,
10063                              ARMMMUFaultInfo *fi, uint32_t *mregion)
10064{
10065    /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
10066     * that a full phys-to-virt translation does).
10067     * mregion is (if not NULL) set to the region number which matched,
10068     * or -1 if no region number is returned (MPU off, address did not
10069     * hit a region, address hit in multiple regions).
10070     * We set is_subpage to true if the region hit doesn't cover the
10071     * entire TARGET_PAGE the address is within.
10072     */
10073    ARMCPU *cpu = env_archcpu(env);
10074    bool is_user = regime_is_user(env, mmu_idx);
10075    uint32_t secure = regime_is_secure(env, mmu_idx);
10076    int n;
10077    int matchregion = -1;
10078    bool hit = false;
10079    uint32_t addr_page_base = address & TARGET_PAGE_MASK;
10080    uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
10081
10082    *is_subpage = false;
10083    *phys_ptr = address;
10084    *prot = 0;
10085    if (mregion) {
10086        *mregion = -1;
10087    }
10088
10089    /* Unlike the ARM ARM pseudocode, we don't need to check whether this
10090     * was an exception vector read from the vector table (which is always
10091     * done using the default system address map), because those accesses
10092     * are done in arm_v7m_load_vector(), which always does a direct
10093     * read using address_space_ldl(), rather than going via this function.
10094     */
10095    if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
10096        hit = true;
10097    } else if (m_is_ppb_region(env, address)) {
10098        hit = true;
10099    } else {
10100        if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
10101            hit = true;
10102        }
10103
10104        for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
10105            /* region search */
10106            /* Note that the base address is bits [31:5] from the register
10107             * with bits [4:0] all zeroes, but the limit address is bits
10108             * [31:5] from the register with bits [4:0] all ones.
10109             */
10110            uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
10111            uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
10112
10113            if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
10114                /* Region disabled */
10115                continue;
10116            }
10117
10118            if (address < base || address > limit) {
10119                /*
10120                 * Address not in this region. We must check whether the
10121                 * region covers addresses in the same page as our address.
10122                 * In that case we must not report a size that covers the
10123                 * whole page for a subsequent hit against a different MPU
10124                 * region or the background region, because it would result in
10125                 * incorrect TLB hits for subsequent accesses to addresses that
10126                 * are in this MPU region.
10127                 */
10128                if (limit >= base &&
10129                    ranges_overlap(base, limit - base + 1,
10130                                   addr_page_base,
10131                                   TARGET_PAGE_SIZE)) {
10132                    *is_subpage = true;
10133                }
10134                continue;
10135            }
10136
10137            if (base > addr_page_base || limit < addr_page_limit) {
10138                *is_subpage = true;
10139            }
10140
10141            if (matchregion != -1) {
10142                /* Multiple regions match -- always a failure (unlike
10143                 * PMSAv7 where highest-numbered-region wins)
10144                 */
10145                fi->type = ARMFault_Permission;
10146                fi->level = 1;
10147                return true;
10148            }
10149
10150            matchregion = n;
10151            hit = true;
10152        }
10153    }
10154
10155    if (!hit) {
10156        /* background fault */
10157        fi->type = ARMFault_Background;
10158        return true;
10159    }
10160
10161    if (matchregion == -1) {
10162        /* hit using the background region */
10163        get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
10164    } else {
10165        uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
10166        uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
10167
10168        if (m_is_system_region(env, address)) {
10169            /* System space is always execute never */
10170            xn = 1;
10171        }
10172
10173        *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
10174        if (*prot && !xn) {
10175            *prot |= PAGE_EXEC;
10176        }
10177        /* We don't need to look the attribute up in the MAIR0/MAIR1
10178         * registers because that only tells us about cacheability.
10179         */
10180        if (mregion) {
10181            *mregion = matchregion;
10182        }
10183    }
10184
10185    fi->type = ARMFault_Permission;
10186    fi->level = 1;
10187    return !(*prot & (1 << access_type));
10188}
10189
10190
10191static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
10192                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10193                                 hwaddr *phys_ptr, MemTxAttrs *txattrs,
10194                                 int *prot, target_ulong *page_size,
10195                                 ARMMMUFaultInfo *fi)
10196{
10197    uint32_t secure = regime_is_secure(env, mmu_idx);
10198    V8M_SAttributes sattrs = {};
10199    bool ret;
10200    bool mpu_is_subpage;
10201
10202    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
10203        v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
10204        if (access_type == MMU_INST_FETCH) {
10205            /* Instruction fetches always use the MMU bank and the
10206             * transaction attribute determined by the fetch address,
10207             * regardless of CPU state. This is painful for QEMU
10208             * to handle, because it would mean we need to encode
10209             * into the mmu_idx not just the (user, negpri) information
10210             * for the current security state but also that for the
10211             * other security state, which would balloon the number
10212             * of mmu_idx values needed alarmingly.
10213             * Fortunately we can avoid this because it's not actually
10214             * possible to arbitrarily execute code from memory with
10215             * the wrong security attribute: it will always generate
10216             * an exception of some kind or another, apart from the
10217             * special case of an NS CPU executing an SG instruction
10218             * in S&NSC memory. So we always just fail the translation
10219             * here and sort things out in the exception handler
10220             * (including possibly emulating an SG instruction).
10221             */
10222            if (sattrs.ns != !secure) {
10223                if (sattrs.nsc) {
10224                    fi->type = ARMFault_QEMU_NSCExec;
10225                } else {
10226                    fi->type = ARMFault_QEMU_SFault;
10227                }
10228                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10229                *phys_ptr = address;
10230                *prot = 0;
10231                return true;
10232            }
10233        } else {
10234            /* For data accesses we always use the MMU bank indicated
10235             * by the current CPU state, but the security attributes
10236             * might downgrade a secure access to nonsecure.
10237             */
10238            if (sattrs.ns) {
10239                txattrs->secure = false;
10240            } else if (!secure) {
10241                /* NS access to S memory must fault.
10242                 * Architecturally we should first check whether the
10243                 * MPU information for this address indicates that we
10244                 * are doing an unaligned access to Device memory, which
10245                 * should generate a UsageFault instead. QEMU does not
10246                 * currently check for that kind of unaligned access though.
10247                 * If we added it we would need to do so as a special case
10248                 * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
10249                 */
10250                fi->type = ARMFault_QEMU_SFault;
10251                *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
10252                *phys_ptr = address;
10253                *prot = 0;
10254                return true;
10255            }
10256        }
10257    }
10258
10259    ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
10260                            txattrs, prot, &mpu_is_subpage, fi, NULL);
10261    *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
10262    return ret;
10263}
10264
10265static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
10266                                 MMUAccessType access_type, ARMMMUIdx mmu_idx,
10267                                 hwaddr *phys_ptr, int *prot,
10268                                 ARMMMUFaultInfo *fi)
10269{
10270    int n;
10271    uint32_t mask;
10272    uint32_t base;
10273    bool is_user = regime_is_user(env, mmu_idx);
10274
10275    if (regime_translation_disabled(env, mmu_idx)) {
10276        /* MPU disabled.  */
10277        *phys_ptr = address;
10278        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10279        return false;
10280    }
10281
10282    *phys_ptr = address;
10283    for (n = 7; n >= 0; n--) {
10284        base = env->cp15.c6_region[n];
10285        if ((base & 1) == 0) {
10286            continue;
10287        }
10288        mask = 1 << ((base >> 1) & 0x1f);
10289        /* Keep this shift separate from the above to avoid an
10290           (undefined) << 32.  */
10291        mask = (mask << 1) - 1;
10292        if (((base ^ address) & ~mask) == 0) {
10293            break;
10294        }
10295    }
10296    if (n < 0) {
10297        fi->type = ARMFault_Background;
10298        return true;
10299    }
10300
10301    if (access_type == MMU_INST_FETCH) {
10302        mask = env->cp15.pmsav5_insn_ap;
10303    } else {
10304        mask = env->cp15.pmsav5_data_ap;
10305    }
10306    mask = (mask >> (n * 4)) & 0xf;
10307    switch (mask) {
10308    case 0:
10309        fi->type = ARMFault_Permission;
10310        fi->level = 1;
10311        return true;
10312    case 1:
10313        if (is_user) {
10314            fi->type = ARMFault_Permission;
10315            fi->level = 1;
10316            return true;
10317        }
10318        *prot = PAGE_READ | PAGE_WRITE;
10319        break;
10320    case 2:
10321        *prot = PAGE_READ;
10322        if (!is_user) {
10323            *prot |= PAGE_WRITE;
10324        }
10325        break;
10326    case 3:
10327        *prot = PAGE_READ | PAGE_WRITE;
10328        break;
10329    case 5:
10330        if (is_user) {
10331            fi->type = ARMFault_Permission;
10332            fi->level = 1;
10333            return true;
10334        }
10335        *prot = PAGE_READ;
10336        break;
10337    case 6:
10338        *prot = PAGE_READ;
10339        break;
10340    default:
10341        /* Bad permission.  */
10342        fi->type = ARMFault_Permission;
10343        fi->level = 1;
10344        return true;
10345    }
10346    *prot |= PAGE_EXEC;
10347    return false;
10348}
10349
10350/* Combine either inner or outer cacheability attributes for normal
10351 * memory, according to table D4-42 and pseudocode procedure
10352 * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
10353 *
10354 * NB: only stage 1 includes allocation hints (RW bits), leading to
10355 * some asymmetry.
10356 */
10357static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
10358{
10359    if (s1 == 4 || s2 == 4) {
10360        /* non-cacheable has precedence */
10361        return 4;
10362    } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
10363        /* stage 1 write-through takes precedence */
10364        return s1;
10365    } else if (extract32(s2, 2, 2) == 2) {
10366        /* stage 2 write-through takes precedence, but the allocation hint
10367         * is still taken from stage 1
10368         */
10369        return (2 << 2) | extract32(s1, 0, 2);
10370    } else { /* write-back */
10371        return s1;
10372    }
10373}
10374
10375/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
10376 * and CombineS1S2Desc()
10377 *
10378 * @s1:      Attributes from stage 1 walk
10379 * @s2:      Attributes from stage 2 walk
10380 */
10381static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
10382{
10383    uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
10384    uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
10385    ARMCacheAttrs ret;
10386
10387    /* Combine shareability attributes (table D4-43) */
10388    if (s1.shareability == 2 || s2.shareability == 2) {
10389        /* if either are outer-shareable, the result is outer-shareable */
10390        ret.shareability = 2;
10391    } else if (s1.shareability == 3 || s2.shareability == 3) {
10392        /* if either are inner-shareable, the result is inner-shareable */
10393        ret.shareability = 3;
10394    } else {
10395        /* both non-shareable */
10396        ret.shareability = 0;
10397    }
10398
10399    /* Combine memory type and cacheability attributes */
10400    if (s1hi == 0 || s2hi == 0) {
10401        /* Device has precedence over normal */
10402        if (s1lo == 0 || s2lo == 0) {
10403            /* nGnRnE has precedence over anything */
10404            ret.attrs = 0;
10405        } else if (s1lo == 4 || s2lo == 4) {
10406            /* non-Reordering has precedence over Reordering */
10407            ret.attrs = 4;  /* nGnRE */
10408        } else if (s1lo == 8 || s2lo == 8) {
10409            /* non-Gathering has precedence over Gathering */
10410            ret.attrs = 8;  /* nGRE */
10411        } else {
10412            ret.attrs = 0xc; /* GRE */
10413        }
10414
10415        /* Any location for which the resultant memory type is any
10416         * type of Device memory is always treated as Outer Shareable.
10417         */
10418        ret.shareability = 2;
10419    } else { /* Normal memory */
10420        /* Outer/inner cacheability combine independently */
10421        ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
10422                  | combine_cacheattr_nibble(s1lo, s2lo);
10423
10424        if (ret.attrs == 0x44) {
10425            /* Any location for which the resultant memory type is Normal
10426             * Inner Non-cacheable, Outer Non-cacheable is always treated
10427             * as Outer Shareable.
10428             */
10429            ret.shareability = 2;
10430        }
10431    }
10432
10433    return ret;
10434}
10435
10436
10437/* get_phys_addr - get the physical address for this virtual address
10438 *
10439 * Find the physical address corresponding to the given virtual address,
10440 * by doing a translation table walk on MMU based systems or using the
10441 * MPU state on MPU based systems.
10442 *
10443 * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
10444 * prot and page_size may not be filled in, and the populated fsr value provides
10445 * information on why the translation aborted, in the format of a
10446 * DFSR/IFSR fault register, with the following caveats:
10447 *  * we honour the short vs long DFSR format differences.
10448 *  * the WnR bit is never set (the caller must do this).
10449 *  * for PSMAv5 based systems we don't bother to return a full FSR format
10450 *    value.
10451 *
10452 * @env: CPUARMState
10453 * @address: virtual address to get physical address for
10454 * @access_type: 0 for read, 1 for write, 2 for execute
10455 * @mmu_idx: MMU index indicating required translation regime
10456 * @phys_ptr: set to the physical address corresponding to the virtual address
10457 * @attrs: set to the memory transaction attributes to use
10458 * @prot: set to the permissions for the page containing phys_ptr
10459 * @page_size: set to the size of the page containing phys_ptr
10460 * @fi: set to fault info if the translation fails
10461 * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
10462 */
10463bool get_phys_addr(CPUARMState *env, target_ulong address,
10464                   MMUAccessType access_type, ARMMMUIdx mmu_idx,
10465                   hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
10466                   target_ulong *page_size,
10467                   ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
10468{
10469    if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
10470        /* Call ourselves recursively to do the stage 1 and then stage 2
10471         * translations.
10472         */
10473        if (arm_feature(env, ARM_FEATURE_EL2)) {
10474            hwaddr ipa;
10475            int s2_prot;
10476            int ret;
10477            ARMCacheAttrs cacheattrs2 = {};
10478
10479            ret = get_phys_addr(env, address, access_type,
10480                                stage_1_mmu_idx(mmu_idx), &ipa, attrs,
10481                                prot, page_size, fi, cacheattrs);
10482
10483            /* If S1 fails or S2 is disabled, return early.  */
10484            if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
10485                *phys_ptr = ipa;
10486                return ret;
10487            }
10488
10489            /* S1 is done. Now do S2 translation.  */
10490            ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
10491                                     phys_ptr, attrs, &s2_prot,
10492                                     page_size, fi,
10493                                     cacheattrs != NULL ? &cacheattrs2 : NULL);
10494            fi->s2addr = ipa;
10495            /* Combine the S1 and S2 perms.  */
10496            *prot &= s2_prot;
10497
10498            /* Combine the S1 and S2 cache attributes, if needed */
10499            if (!ret && cacheattrs != NULL) {
10500                if (env->cp15.hcr_el2 & HCR_DC) {
10501                    /*
10502                     * HCR.DC forces the first stage attributes to
10503                     *  Normal Non-Shareable,
10504                     *  Inner Write-Back Read-Allocate Write-Allocate,
10505                     *  Outer Write-Back Read-Allocate Write-Allocate.
10506                     */
10507                    cacheattrs->attrs = 0xff;
10508                    cacheattrs->shareability = 0;
10509                }
10510                *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
10511            }
10512
10513            return ret;
10514        } else {
10515            /*
10516             * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
10517             */
10518            mmu_idx = stage_1_mmu_idx(mmu_idx);
10519        }
10520    }
10521
10522    /* The page table entries may downgrade secure to non-secure, but
10523     * cannot upgrade an non-secure translation regime's attributes
10524     * to secure.
10525     */
10526    attrs->secure = regime_is_secure(env, mmu_idx);
10527    attrs->user = regime_is_user(env, mmu_idx);
10528
10529    /* Fast Context Switch Extension. This doesn't exist at all in v8.
10530     * In v7 and earlier it affects all stage 1 translations.
10531     */
10532    if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
10533        && !arm_feature(env, ARM_FEATURE_V8)) {
10534        if (regime_el(env, mmu_idx) == 3) {
10535            address += env->cp15.fcseidr_s;
10536        } else {
10537            address += env->cp15.fcseidr_ns;
10538        }
10539    }
10540
10541    if (arm_feature(env, ARM_FEATURE_PMSA)) {
10542        bool ret;
10543        *page_size = TARGET_PAGE_SIZE;
10544
10545        if (arm_feature(env, ARM_FEATURE_V8)) {
10546            /* PMSAv8 */
10547            ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
10548                                       phys_ptr, attrs, prot, page_size, fi);
10549        } else if (arm_feature(env, ARM_FEATURE_V7)) {
10550            /* PMSAv7 */
10551            ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
10552                                       phys_ptr, prot, page_size, fi);
10553        } else {
10554            /* Pre-v7 MPU */
10555            ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
10556                                       phys_ptr, prot, fi);
10557        }
10558        qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
10559                      " mmu_idx %u -> %s (prot %c%c%c)\n",
10560                      access_type == MMU_DATA_LOAD ? "reading" :
10561                      (access_type == MMU_DATA_STORE ? "writing" : "execute"),
10562                      (uint32_t)address, mmu_idx,
10563                      ret ? "Miss" : "Hit",
10564                      *prot & PAGE_READ ? 'r' : '-',
10565                      *prot & PAGE_WRITE ? 'w' : '-',
10566                      *prot & PAGE_EXEC ? 'x' : '-');
10567
10568        return ret;
10569    }
10570
10571    /* Definitely a real MMU, not an MPU */
10572
10573    if (regime_translation_disabled(env, mmu_idx)) {
10574        /* MMU disabled. */
10575        *phys_ptr = address;
10576        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
10577        *page_size = TARGET_PAGE_SIZE;
10578        return 0;
10579    }
10580
10581    if (regime_using_lpae_format(env, mmu_idx)) {
10582        return get_phys_addr_lpae(env, address, access_type, mmu_idx,
10583                                  phys_ptr, attrs, prot, page_size,
10584                                  fi, cacheattrs);
10585    } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
10586        return get_phys_addr_v6(env, address, access_type, mmu_idx,
10587                                phys_ptr, attrs, prot, page_size, fi);
10588    } else {
10589        return get_phys_addr_v5(env, address, access_type, mmu_idx,
10590                                    phys_ptr, prot, page_size, fi);
10591    }
10592}
10593
10594hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
10595                                         MemTxAttrs *attrs)
10596{
10597    ARMCPU *cpu = ARM_CPU(cs);
10598    CPUARMState *env = &cpu->env;
10599    hwaddr phys_addr;
10600    target_ulong page_size;
10601    int prot;
10602    bool ret;
10603    ARMMMUFaultInfo fi = {};
10604    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
10605
10606    *attrs = (MemTxAttrs) {};
10607
10608    ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
10609                        attrs, &prot, &page_size, &fi, NULL);
10610
10611    if (ret) {
10612        return -1;
10613    }
10614    return phys_addr;
10615}
10616
10617#endif
10618
10619/* Note that signed overflow is undefined in C.  The following routines are
10620   careful to use unsigned types where modulo arithmetic is required.
10621   Failure to do so _will_ break on newer gcc.  */
10622
10623/* Signed saturating arithmetic.  */
10624
10625/* Perform 16-bit signed saturating addition.  */
10626static inline uint16_t add16_sat(uint16_t a, uint16_t b)
10627{
10628    uint16_t res;
10629
10630    res = a + b;
10631    if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
10632        if (a & 0x8000)
10633            res = 0x8000;
10634        else
10635            res = 0x7fff;
10636    }
10637    return res;
10638}
10639
10640/* Perform 8-bit signed saturating addition.  */
10641static inline uint8_t add8_sat(uint8_t a, uint8_t b)
10642{
10643    uint8_t res;
10644
10645    res = a + b;
10646    if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
10647        if (a & 0x80)
10648            res = 0x80;
10649        else
10650            res = 0x7f;
10651    }
10652    return res;
10653}
10654
10655/* Perform 16-bit signed saturating subtraction.  */
10656static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
10657{
10658    uint16_t res;
10659
10660    res = a - b;
10661    if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
10662        if (a & 0x8000)
10663            res = 0x8000;
10664        else
10665            res = 0x7fff;
10666    }
10667    return res;
10668}
10669
10670/* Perform 8-bit signed saturating subtraction.  */
10671static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
10672{
10673    uint8_t res;
10674
10675    res = a - b;
10676    if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
10677        if (a & 0x80)
10678            res = 0x80;
10679        else
10680            res = 0x7f;
10681    }
10682    return res;
10683}
10684
10685#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
10686#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
10687#define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
10688#define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
10689#define PFX q
10690
10691#include "op_addsub.h"
10692
10693/* Unsigned saturating arithmetic.  */
10694static inline uint16_t add16_usat(uint16_t a, uint16_t b)
10695{
10696    uint16_t res;
10697    res = a + b;
10698    if (res < a)
10699        res = 0xffff;
10700    return res;
10701}
10702
10703static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
10704{
10705    if (a > b)
10706        return a - b;
10707    else
10708        return 0;
10709}
10710
10711static inline uint8_t add8_usat(uint8_t a, uint8_t b)
10712{
10713    uint8_t res;
10714    res = a + b;
10715    if (res < a)
10716        res = 0xff;
10717    return res;
10718}
10719
10720static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
10721{
10722    if (a > b)
10723        return a - b;
10724    else
10725        return 0;
10726}
10727
10728#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
10729#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
10730#define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
10731#define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
10732#define PFX uq
10733
10734#include "op_addsub.h"
10735
10736/* Signed modulo arithmetic.  */
10737#define SARITH16(a, b, n, op) do { \
10738    int32_t sum; \
10739    sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
10740    RESULT(sum, n, 16); \
10741    if (sum >= 0) \
10742        ge |= 3 << (n * 2); \
10743    } while(0)
10744
10745#define SARITH8(a, b, n, op) do { \
10746    int32_t sum; \
10747    sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
10748    RESULT(sum, n, 8); \
10749    if (sum >= 0) \
10750        ge |= 1 << n; \
10751    } while(0)
10752
10753
10754#define ADD16(a, b, n) SARITH16(a, b, n, +)
10755#define SUB16(a, b, n) SARITH16(a, b, n, -)
10756#define ADD8(a, b, n)  SARITH8(a, b, n, +)
10757#define SUB8(a, b, n)  SARITH8(a, b, n, -)
10758#define PFX s
10759#define ARITH_GE
10760
10761#include "op_addsub.h"
10762
10763/* Unsigned modulo arithmetic.  */
10764#define ADD16(a, b, n) do { \
10765    uint32_t sum; \
10766    sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
10767    RESULT(sum, n, 16); \
10768    if ((sum >> 16) == 1) \
10769        ge |= 3 << (n * 2); \
10770    } while(0)
10771
10772#define ADD8(a, b, n) do { \
10773    uint32_t sum; \
10774    sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
10775    RESULT(sum, n, 8); \
10776    if ((sum >> 8) == 1) \
10777        ge |= 1 << n; \
10778    } while(0)
10779
10780#define SUB16(a, b, n) do { \
10781    uint32_t sum; \
10782    sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
10783    RESULT(sum, n, 16); \
10784    if ((sum >> 16) == 0) \
10785        ge |= 3 << (n * 2); \
10786    } while(0)
10787
10788#define SUB8(a, b, n) do { \
10789    uint32_t sum; \
10790    sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
10791    RESULT(sum, n, 8); \
10792    if ((sum >> 8) == 0) \
10793        ge |= 1 << n; \
10794    } while(0)
10795
10796#define PFX u
10797#define ARITH_GE
10798
10799#include "op_addsub.h"
10800
10801/* Halved signed arithmetic.  */
10802#define ADD16(a, b, n) \
10803  RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
10804#define SUB16(a, b, n) \
10805  RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
10806#define ADD8(a, b, n) \
10807  RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
10808#define SUB8(a, b, n) \
10809  RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
10810#define PFX sh
10811
10812#include "op_addsub.h"
10813
10814/* Halved unsigned arithmetic.  */
10815#define ADD16(a, b, n) \
10816  RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10817#define SUB16(a, b, n) \
10818  RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
10819#define ADD8(a, b, n) \
10820  RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10821#define SUB8(a, b, n) \
10822  RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
10823#define PFX uh
10824
10825#include "op_addsub.h"
10826
10827static inline uint8_t do_usad(uint8_t a, uint8_t b)
10828{
10829    if (a > b)
10830        return a - b;
10831    else
10832        return b - a;
10833}
10834
10835/* Unsigned sum of absolute byte differences.  */
10836uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
10837{
10838    uint32_t sum;
10839    sum = do_usad(a, b);
10840    sum += do_usad(a >> 8, b >> 8);
10841    sum += do_usad(a >> 16, b >>16);
10842    sum += do_usad(a >> 24, b >> 24);
10843    return sum;
10844}
10845
10846/* For ARMv6 SEL instruction.  */
10847uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
10848{
10849    uint32_t mask;
10850
10851    mask = 0;
10852    if (flags & 1)
10853        mask |= 0xff;
10854    if (flags & 2)
10855        mask |= 0xff00;
10856    if (flags & 4)
10857        mask |= 0xff0000;
10858    if (flags & 8)
10859        mask |= 0xff000000;
10860    return (a & mask) | (b & ~mask);
10861}
10862
10863/* CRC helpers.
10864 * The upper bytes of val (above the number specified by 'bytes') must have
10865 * been zeroed out by the caller.
10866 */
10867uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
10868{
10869    uint8_t buf[4];
10870
10871    stl_le_p(buf, val);
10872
10873    /* zlib crc32 converts the accumulator and output to one's complement.  */
10874    return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
10875}
10876
10877uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
10878{
10879    uint8_t buf[4];
10880
10881    stl_le_p(buf, val);
10882
10883    /* Linux crc32c converts the output to one's complement.  */
10884    return crc32c(acc, buf, bytes) ^ 0xffffffff;
10885}
10886
10887/* Return the exception level to which FP-disabled exceptions should
10888 * be taken, or 0 if FP is enabled.
10889 */
10890int fp_exception_el(CPUARMState *env, int cur_el)
10891{
10892#ifndef CONFIG_USER_ONLY
10893    int fpen;
10894
10895    /* CPACR and the CPTR registers don't exist before v6, so FP is
10896     * always accessible
10897     */
10898    if (!arm_feature(env, ARM_FEATURE_V6)) {
10899        return 0;
10900    }
10901
10902    if (arm_feature(env, ARM_FEATURE_M)) {
10903        /* CPACR can cause a NOCP UsageFault taken to current security state */
10904        if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) {
10905            return 1;
10906        }
10907
10908        if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) {
10909            if (!extract32(env->v7m.nsacr, 10, 1)) {
10910                /* FP insns cause a NOCP UsageFault taken to Secure */
10911                return 3;
10912            }
10913        }
10914
10915        return 0;
10916    }
10917
10918    /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
10919     * 0, 2 : trap EL0 and EL1/PL1 accesses
10920     * 1    : trap only EL0 accesses
10921     * 3    : trap no accesses
10922     */
10923    fpen = extract32(env->cp15.cpacr_el1, 20, 2);
10924    switch (fpen) {
10925    case 0:
10926    case 2:
10927        if (cur_el == 0 || cur_el == 1) {
10928            /* Trap to PL1, which might be EL1 or EL3 */
10929            if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
10930                return 3;
10931            }
10932            return 1;
10933        }
10934        if (cur_el == 3 && !is_a64(env)) {
10935            /* Secure PL1 running at EL3 */
10936            return 3;
10937        }
10938        break;
10939    case 1:
10940        if (cur_el == 0) {
10941            return 1;
10942        }
10943        break;
10944    case 3:
10945        break;
10946    }
10947
10948    /*
10949     * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode
10950     * to control non-secure access to the FPU. It doesn't have any
10951     * effect if EL3 is AArch64 or if EL3 doesn't exist at all.
10952     */
10953    if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
10954         cur_el <= 2 && !arm_is_secure_below_el3(env))) {
10955        if (!extract32(env->cp15.nsacr, 10, 1)) {
10956            /* FP insns act as UNDEF */
10957            return cur_el == 2 ? 2 : 1;
10958        }
10959    }
10960
10961    /* For the CPTR registers we don't need to guard with an ARM_FEATURE
10962     * check because zero bits in the registers mean "don't trap".
10963     */
10964
10965    /* CPTR_EL2 : present in v7VE or v8 */
10966    if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
10967        && !arm_is_secure_below_el3(env)) {
10968        /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
10969        return 2;
10970    }
10971
10972    /* CPTR_EL3 : present in v8 */
10973    if (extract32(env->cp15.cptr_el[3], 10, 1)) {
10974        /* Trap all FP ops to EL3 */
10975        return 3;
10976    }
10977#endif
10978    return 0;
10979}
10980
10981#ifndef CONFIG_TCG
10982ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
10983{
10984    g_assert_not_reached();
10985}
10986#endif
10987
10988ARMMMUIdx arm_mmu_idx(CPUARMState *env)
10989{
10990    int el;
10991
10992    if (arm_feature(env, ARM_FEATURE_M)) {
10993        return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure);
10994    }
10995
10996    el = arm_current_el(env);
10997    if (el < 2 && arm_is_secure_below_el3(env)) {
10998        return ARMMMUIdx_S1SE0 + el;
10999    } else {
11000        return ARMMMUIdx_S12NSE0 + el;
11001    }
11002}
11003
11004int cpu_mmu_index(CPUARMState *env, bool ifetch)
11005{
11006    return arm_to_core_mmu_idx(arm_mmu_idx(env));
11007}
11008
11009#ifndef CONFIG_USER_ONLY
11010ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
11011{
11012    return stage_1_mmu_idx(arm_mmu_idx(env));
11013}
11014#endif
11015
11016void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
11017                          target_ulong *cs_base, uint32_t *pflags)
11018{
11019    ARMMMUIdx mmu_idx = arm_mmu_idx(env);
11020    int current_el = arm_current_el(env);
11021    int fp_el = fp_exception_el(env, current_el);
11022    uint32_t flags = 0;
11023
11024    if (is_a64(env)) {
11025        ARMCPU *cpu = env_archcpu(env);
11026        uint64_t sctlr;
11027
11028        *pc = env->pc;
11029        flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
11030
11031        /* Get control bits for tagged addresses.  */
11032        {
11033            ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
11034            ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
11035            int tbii, tbid;
11036
11037            /* FIXME: ARMv8.1-VHE S2 translation regime.  */
11038            if (regime_el(env, stage1) < 2) {
11039                ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
11040                tbid = (p1.tbi << 1) | p0.tbi;
11041                tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
11042            } else {
11043                tbid = p0.tbi;
11044                tbii = tbid & !p0.tbid;
11045            }
11046
11047            flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
11048            flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
11049        }
11050
11051        if (cpu_isar_feature(aa64_sve, cpu)) {
11052            int sve_el = sve_exception_el(env, current_el);
11053            uint32_t zcr_len;
11054
11055            /* If SVE is disabled, but FP is enabled,
11056             * then the effective len is 0.
11057             */
11058            if (sve_el != 0 && fp_el == 0) {
11059                zcr_len = 0;
11060            } else {
11061                zcr_len = sve_zcr_len_for_el(env, current_el);
11062            }
11063            flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
11064            flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
11065        }
11066
11067        sctlr = arm_sctlr(env, current_el);
11068
11069        if (cpu_isar_feature(aa64_pauth, cpu)) {
11070            /*
11071             * In order to save space in flags, we record only whether
11072             * pauth is "inactive", meaning all insns are implemented as
11073             * a nop, or "active" when some action must be performed.
11074             * The decision of which action to take is left to a helper.
11075             */
11076            if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
11077                flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
11078            }
11079        }
11080
11081        if (cpu_isar_feature(aa64_bti, cpu)) {
11082            /* Note that SCTLR_EL[23].BT == SCTLR_BT1.  */
11083            if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
11084                flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
11085            }
11086            flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
11087        }
11088    } else {
11089        *pc = env->regs[15];
11090        flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
11091        flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
11092        flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
11093        flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
11094        flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
11095        flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
11096        if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
11097            || arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
11098            flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
11099        }
11100        /* Note that XSCALE_CPAR shares bits with VECSTRIDE */
11101        if (arm_feature(env, ARM_FEATURE_XSCALE)) {
11102            flags = FIELD_DP32(flags, TBFLAG_A32,
11103                               XSCALE_CPAR, env->cp15.c15_cpar);
11104        }
11105    }
11106
11107    flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
11108
11109    /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
11110     * states defined in the ARM ARM for software singlestep:
11111     *  SS_ACTIVE   PSTATE.SS   State
11112     *     0            x       Inactive (the TB flag for SS is always 0)
11113     *     1            0       Active-pending
11114     *     1            1       Active-not-pending
11115     */
11116    if (arm_singlestep_active(env)) {
11117        flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
11118        if (is_a64(env)) {
11119            if (env->pstate & PSTATE_SS) {
11120                flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
11121            }
11122        } else {
11123            if (env->uncached_cpsr & PSTATE_SS) {
11124                flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
11125            }
11126        }
11127    }
11128    if (arm_cpu_data_is_big_endian(env)) {
11129        flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
11130    }
11131    flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
11132
11133    if (arm_v7m_is_handler_mode(env)) {
11134        flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
11135    }
11136
11137    /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
11138     * suppressing them because the requested execution priority is less than 0.
11139     */
11140    if (arm_feature(env, ARM_FEATURE_V8) &&
11141        arm_feature(env, ARM_FEATURE_M) &&
11142        !((mmu_idx  & ARM_MMU_IDX_M_NEGPRI) &&
11143          (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
11144        flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
11145    }
11146
11147    if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
11148        FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) {
11149        flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
11150    }
11151
11152    if (arm_feature(env, ARM_FEATURE_M) &&
11153        (env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
11154        (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
11155         (env->v7m.secure &&
11156          !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
11157        /*
11158         * ASPEN is set, but FPCA/SFPA indicate that there is no active
11159         * FP context; we must create a new FP context before executing
11160         * any FP insn.
11161         */
11162        flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
11163    }
11164
11165    if (arm_feature(env, ARM_FEATURE_M)) {
11166        bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
11167
11168        if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
11169            flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
11170        }
11171    }
11172
11173    *pflags = flags;
11174    *cs_base = 0;
11175}
11176
11177#ifdef TARGET_AARCH64
11178/*
11179 * The manual says that when SVE is enabled and VQ is widened the
11180 * implementation is allowed to zero the previously inaccessible
11181 * portion of the registers.  The corollary to that is that when
11182 * SVE is enabled and VQ is narrowed we are also allowed to zero
11183 * the now inaccessible portion of the registers.
11184 *
11185 * The intent of this is that no predicate bit beyond VQ is ever set.
11186 * Which means that some operations on predicate registers themselves
11187 * may operate on full uint64_t or even unrolled across the maximum
11188 * uint64_t[4].  Performing 4 bits of host arithmetic unconditionally
11189 * may well be cheaper than conditionals to restrict the operation
11190 * to the relevant portion of a uint16_t[16].
11191 */
11192void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq)
11193{
11194    int i, j;
11195    uint64_t pmask;
11196
11197    assert(vq >= 1 && vq <= ARM_MAX_VQ);
11198    assert(vq <= env_archcpu(env)->sve_max_vq);
11199
11200    /* Zap the high bits of the zregs.  */
11201    for (i = 0; i < 32; i++) {
11202        memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq));
11203    }
11204
11205    /* Zap the high bits of the pregs and ffr.  */
11206    pmask = 0;
11207    if (vq & 3) {
11208        pmask = ~(-1ULL << (16 * (vq & 3)));
11209    }
11210    for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) {
11211        for (i = 0; i < 17; ++i) {
11212            env->vfp.pregs[i].p[j] &= pmask;
11213        }
11214        pmask = 0;
11215    }
11216}
11217
11218/*
11219 * Notice a change in SVE vector size when changing EL.
11220 */
11221void aarch64_sve_change_el(CPUARMState *env, int old_el,
11222                           int new_el, bool el0_a64)
11223{
11224    ARMCPU *cpu = env_archcpu(env);
11225    int old_len, new_len;
11226    bool old_a64, new_a64;
11227
11228    /* Nothing to do if no SVE.  */
11229    if (!cpu_isar_feature(aa64_sve, cpu)) {
11230        return;
11231    }
11232
11233    /* Nothing to do if FP is disabled in either EL.  */
11234    if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) {
11235        return;
11236    }
11237
11238    /*
11239     * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped
11240     * at ELx, or not available because the EL is in AArch32 state, then
11241     * for all purposes other than a direct read, the ZCR_ELx.LEN field
11242     * has an effective value of 0".
11243     *
11244     * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0).
11245     * If we ignore aa32 state, we would fail to see the vq4->vq0 transition
11246     * from EL2->EL1.  Thus we go ahead and narrow when entering aa32 so that
11247     * we already have the correct register contents when encountering the
11248     * vq0->vq0 transition between EL0->EL1.
11249     */
11250    old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64;
11251    old_len = (old_a64 && !sve_exception_el(env, old_el)
11252               ? sve_zcr_len_for_el(env, old_el) : 0);
11253    new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64;
11254    new_len = (new_a64 && !sve_exception_el(env, new_el)
11255               ? sve_zcr_len_for_el(env, new_el) : 0);
11256
11257    /* When changing vector length, clear inaccessible state.  */
11258    if (new_len < old_len) {
11259        aarch64_sve_narrow_vq(env, new_len + 1);
11260    }
11261}
11262#endif
11263